` element
+ is obsolete. You can no longer use the element with any supported voice.
"""
self.custom_pronunciation = custom_pronunciation
self.voice_transformation = voice_transformation
@classmethod
- def _from_dict(cls, _dict):
+ def from_dict(cls, _dict: Dict) -> 'SupportedFeatures':
"""Initialize a SupportedFeatures object from a json dictionary."""
args = {}
- if 'custom_pronunciation' in _dict:
- args['custom_pronunciation'] = _dict.get('custom_pronunciation')
+ if (custom_pronunciation :=
+ _dict.get('custom_pronunciation')) is not None:
+ args['custom_pronunciation'] = custom_pronunciation
else:
raise ValueError(
'Required property \'custom_pronunciation\' not present in SupportedFeatures JSON'
)
- if 'voice_transformation' in _dict:
- args['voice_transformation'] = _dict.get('voice_transformation')
+ if (voice_transformation :=
+ _dict.get('voice_transformation')) is not None:
+ args['voice_transformation'] = voice_transformation
else:
raise ValueError(
'Required property \'voice_transformation\' not present in SupportedFeatures JSON'
)
return cls(**args)
- def _to_dict(self):
+ @classmethod
+ def _from_dict(cls, _dict):
+ """Initialize a SupportedFeatures object from a json dictionary."""
+ return cls.from_dict(_dict)
+
+ def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'custom_pronunciation'
@@ -1004,70 +3196,86 @@ def _to_dict(self):
_dict['voice_transformation'] = self.voice_transformation
return _dict
- def __str__(self):
+ def _to_dict(self):
+ """Return a json dictionary representing this model."""
+ return self.to_dict()
+
+ def __str__(self) -> str:
"""Return a `str` version of this SupportedFeatures object."""
- return json.dumps(self._to_dict(), indent=2)
+ return json.dumps(self.to_dict(), indent=2)
- def __eq__(self, other):
+ def __eq__(self, other: 'SupportedFeatures') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
- def __ne__(self, other):
+ def __ne__(self, other: 'SupportedFeatures') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
-class Translation(object):
+class Translation:
"""
- Translation.
-
- :attr str translation: The phonetic or sounds-like translation for the word. A
- phonetic translation is based on the SSML format for representing the phonetic string
- of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is
- one or more words that, when combined, sound like the word.
- :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the
- word. The service uses the value to produce the correct intonation for the word. You
- can create only a single entry, with or without a single part of speech, for any word;
- you cannot create multiple entries with different parts of speech for the same word.
- For more information, see [Working with Japanese
- entries](https://cloud.ibm.com/docs/services/text-to-speech/custom-rules.html#jaNotes).
+ Information about the translation for the specified text.
+
+ :param str translation: The phonetic or sounds-like translation for the word. A
+ phonetic translation is based on the SSML format for representing the phonetic
+ string of a word either as an IPA translation or as an IBM SPR translation. A
+ sounds-like is one or more words that, when combined, sound like the word.
+ :param str part_of_speech: (optional) **Japanese only.** The part of speech for
+ the word. The service uses the value to produce the correct intonation for the
+ word. You can create only a single entry, with or without a single part of
+ speech, for any word; you cannot create multiple entries with different parts of
+ speech for the same word. For more information, see [Working with Japanese
+ entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes).
"""
- def __init__(self, translation, part_of_speech=None):
+ def __init__(
+ self,
+ translation: str,
+ *,
+ part_of_speech: Optional[str] = None,
+ ) -> None:
"""
Initialize a Translation object.
- :param str translation: The phonetic or sounds-like translation for the word. A
- phonetic translation is based on the SSML format for representing the phonetic
- string of a word either as an IPA translation or as an IBM SPR translation. A
- sounds-like is one or more words that, when combined, sound like the word.
- :param str part_of_speech: (optional) **Japanese only.** The part of speech for
- the word. The service uses the value to produce the correct intonation for the
- word. You can create only a single entry, with or without a single part of speech,
- for any word; you cannot create multiple entries with different parts of speech
- for the same word. For more information, see [Working with Japanese
- entries](https://cloud.ibm.com/docs/services/text-to-speech/custom-rules.html#jaNotes).
+ :param str translation: The phonetic or sounds-like translation for the
+ word. A phonetic translation is based on the SSML format for representing
+ the phonetic string of a word either as an IPA translation or as an IBM SPR
+ translation. A sounds-like is one or more words that, when combined, sound
+ like the word.
+ :param str part_of_speech: (optional) **Japanese only.** The part of speech
+ for the word. The service uses the value to produce the correct intonation
+ for the word. You can create only a single entry, with or without a single
+ part of speech, for any word; you cannot create multiple entries with
+ different parts of speech for the same word. For more information, see
+ [Working with Japanese
+ entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes).
"""
self.translation = translation
self.part_of_speech = part_of_speech
@classmethod
- def _from_dict(cls, _dict):
+ def from_dict(cls, _dict: Dict) -> 'Translation':
"""Initialize a Translation object from a json dictionary."""
args = {}
- if 'translation' in _dict:
- args['translation'] = _dict.get('translation')
+ if (translation := _dict.get('translation')) is not None:
+ args['translation'] = translation
else:
raise ValueError(
'Required property \'translation\' not present in Translation JSON'
)
- if 'part_of_speech' in _dict:
- args['part_of_speech'] = _dict.get('part_of_speech')
+ if (part_of_speech := _dict.get('part_of_speech')) is not None:
+ args['part_of_speech'] = part_of_speech
return cls(**args)
- def _to_dict(self):
+ @classmethod
+ def _from_dict(cls, _dict):
+ """Initialize a Translation object from a json dictionary."""
+ return cls.from_dict(_dict)
+
+ def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'translation') and self.translation is not None:
@@ -1076,67 +3284,106 @@ def _to_dict(self):
_dict['part_of_speech'] = self.part_of_speech
return _dict
- def __str__(self):
+ def _to_dict(self):
+ """Return a json dictionary representing this model."""
+ return self.to_dict()
+
+ def __str__(self) -> str:
"""Return a `str` version of this Translation object."""
- return json.dumps(self._to_dict(), indent=2)
+ return json.dumps(self.to_dict(), indent=2)
- def __eq__(self, other):
+ def __eq__(self, other: 'Translation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
- def __ne__(self, other):
+ def __ne__(self, other: 'Translation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
+ class PartOfSpeechEnum(str, Enum):
+ """
+ **Japanese only.** The part of speech for the word. The service uses the value to
+ produce the correct intonation for the word. You can create only a single entry,
+ with or without a single part of speech, for any word; you cannot create multiple
+ entries with different parts of speech for the same word. For more information,
+ see [Working with Japanese
+ entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes).
+ """
-class Voice(object):
+ DOSI = 'Dosi'
+ FUKU = 'Fuku'
+ GOBI = 'Gobi'
+ HOKA = 'Hoka'
+ JODO = 'Jodo'
+ JOSI = 'Josi'
+ KATO = 'Kato'
+ KEDO = 'Kedo'
+ KEYO = 'Keyo'
+ KIGO = 'Kigo'
+ KOYU = 'Koyu'
+ MESI = 'Mesi'
+ RETA = 'Reta'
+ STBI = 'Stbi'
+ STTO = 'Stto'
+ STZO = 'Stzo'
+ SUJI = 'Suji'
+
+
+class Voice:
"""
- Voice.
-
- :attr str url: The URI of the voice.
- :attr str gender: The gender of the voice: `male` or `female`.
- :attr str name: The name of the voice. Use this as the voice identifier in all
- requests.
- :attr str language: The language and region of the voice (for example, `en-US`).
- :attr str description: A textual description of the voice.
- :attr bool customizable: If `true`, the voice can be customized; if `false`, the voice
- cannot be customized. (Same as `custom_pronunciation`; maintained for backward
- compatibility.).
- :attr SupportedFeatures supported_features: Describes the additional service features
- that are supported with the voice.
- :attr VoiceModel customization: (optional) Returns information about a specified
- custom voice model. This field is returned only by the **Get a voice** method and only
- when you specify the customization ID of a custom voice model.
+ Information about an available voice.
+
+ :param str url: The URI of the voice.
+ :param str gender: The gender of the voice: `male` or `female`.
+ :param str name: The name of the voice. Use this as the voice identifier in all
+ requests.
+ :param str language: The language and region of the voice (for example,
+ `en-US`).
+ :param str description: A textual description of the voice.
+ :param bool customizable: If `true`, the voice can be customized; if `false`,
+ the voice cannot be customized. (Same as `custom_pronunciation`; maintained for
+ backward compatibility.).
+ :param SupportedFeatures supported_features: Additional service features that
+ are supported with the voice.
+ :param CustomModel customization: (optional) Returns information about a
+ specified custom model. This field is returned only by the [Get a
+ voice](#getvoice) method and only when you specify the customization ID of a
+ custom model.
"""
- def __init__(self,
- url,
- gender,
- name,
- language,
- description,
- customizable,
- supported_features,
- customization=None):
+ def __init__(
+ self,
+ url: str,
+ gender: str,
+ name: str,
+ language: str,
+ description: str,
+ customizable: bool,
+ supported_features: 'SupportedFeatures',
+ *,
+ customization: Optional['CustomModel'] = None,
+ ) -> None:
"""
Initialize a Voice object.
:param str url: The URI of the voice.
:param str gender: The gender of the voice: `male` or `female`.
- :param str name: The name of the voice. Use this as the voice identifier in all
- requests.
- :param str language: The language and region of the voice (for example, `en-US`).
+ :param str name: The name of the voice. Use this as the voice identifier in
+ all requests.
+ :param str language: The language and region of the voice (for example,
+ `en-US`).
:param str description: A textual description of the voice.
- :param bool customizable: If `true`, the voice can be customized; if `false`, the
- voice cannot be customized. (Same as `custom_pronunciation`; maintained for
- backward compatibility.).
- :param SupportedFeatures supported_features: Describes the additional service
- features that are supported with the voice.
- :param VoiceModel customization: (optional) Returns information about a specified
- custom voice model. This field is returned only by the **Get a voice** method and
- only when you specify the customization ID of a custom voice model.
+ :param bool customizable: If `true`, the voice can be customized; if
+ `false`, the voice cannot be customized. (Same as `custom_pronunciation`;
+ maintained for backward compatibility.).
+ :param SupportedFeatures supported_features: Additional service features
+ that are supported with the voice.
+ :param CustomModel customization: (optional) Returns information about a
+ specified custom model. This field is returned only by the [Get a
+ voice](#getvoice) method and only when you specify the customization ID of
+ a custom model.
"""
self.url = url
self.gender = gender
@@ -1148,52 +3395,56 @@ def __init__(self,
self.customization = customization
@classmethod
- def _from_dict(cls, _dict):
+ def from_dict(cls, _dict: Dict) -> 'Voice':
"""Initialize a Voice object from a json dictionary."""
args = {}
- if 'url' in _dict:
- args['url'] = _dict.get('url')
+ if (url := _dict.get('url')) is not None:
+ args['url'] = url
else:
raise ValueError(
'Required property \'url\' not present in Voice JSON')
- if 'gender' in _dict:
- args['gender'] = _dict.get('gender')
+ if (gender := _dict.get('gender')) is not None:
+ args['gender'] = gender
else:
raise ValueError(
'Required property \'gender\' not present in Voice JSON')
- if 'name' in _dict:
- args['name'] = _dict.get('name')
+ if (name := _dict.get('name')) is not None:
+ args['name'] = name
else:
raise ValueError(
'Required property \'name\' not present in Voice JSON')
- if 'language' in _dict:
- args['language'] = _dict.get('language')
+ if (language := _dict.get('language')) is not None:
+ args['language'] = language
else:
raise ValueError(
'Required property \'language\' not present in Voice JSON')
- if 'description' in _dict:
- args['description'] = _dict.get('description')
+ if (description := _dict.get('description')) is not None:
+ args['description'] = description
else:
raise ValueError(
'Required property \'description\' not present in Voice JSON')
- if 'customizable' in _dict:
- args['customizable'] = _dict.get('customizable')
+ if (customizable := _dict.get('customizable')) is not None:
+ args['customizable'] = customizable
else:
raise ValueError(
'Required property \'customizable\' not present in Voice JSON')
- if 'supported_features' in _dict:
- args['supported_features'] = SupportedFeatures._from_dict(
- _dict.get('supported_features'))
+ if (supported_features := _dict.get('supported_features')) is not None:
+ args['supported_features'] = SupportedFeatures.from_dict(
+ supported_features)
else:
raise ValueError(
'Required property \'supported_features\' not present in Voice JSON'
)
- if 'customization' in _dict:
- args['customization'] = VoiceModel._from_dict(
- _dict.get('customization'))
+ if (customization := _dict.get('customization')) is not None:
+ args['customization'] = CustomModel.from_dict(customization)
return cls(**args)
- def _to_dict(self):
+ @classmethod
+ def _from_dict(cls, _dict):
+ """Initialize a Voice object from a json dictionary."""
+ return cls.from_dict(_dict)
+
+ def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'url') and self.url is not None:
@@ -1211,326 +3462,174 @@ def _to_dict(self):
if hasattr(
self,
'supported_features') and self.supported_features is not None:
- _dict['supported_features'] = self.supported_features._to_dict()
+ if isinstance(self.supported_features, dict):
+ _dict['supported_features'] = self.supported_features
+ else:
+ _dict['supported_features'] = self.supported_features.to_dict()
if hasattr(self, 'customization') and self.customization is not None:
- _dict['customization'] = self.customization._to_dict()
- return _dict
-
- def __str__(self):
- """Return a `str` version of this Voice object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class VoiceModel(object):
- """
- VoiceModel.
-
- :attr str customization_id: The customization ID (GUID) of the custom voice model. The
- **Create a custom model** method returns only this field. It does not not return the
- other fields of this object.
- :attr str name: (optional) The name of the custom voice model.
- :attr str language: (optional) The language identifier of the custom voice model (for
- example, `en-US`).
- :attr str owner: (optional) The GUID of the service credentials for the instance of
- the service that owns the custom voice model.
- :attr str created: (optional) The date and time in Coordinated Universal Time (UTC) at
- which the custom voice model was created. The value is provided in full ISO 8601
- format (`YYYY-MM-DDThh:mm:ss.sTZD`).
- :attr str last_modified: (optional) The date and time in Coordinated Universal Time
- (UTC) at which the custom voice model was last modified. Equals `created` when a new
- voice model is first added but has yet to be updated. The value is provided in full
- ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`).
- :attr str description: (optional) The description of the custom voice model.
- :attr list[Word] words: (optional) An array of `Word` objects that lists the words and
- their translations from the custom voice model. The words are listed in alphabetical
- order, with uppercase letters listed before lowercase letters. The array is empty if
- the custom model contains no words. This field is returned only by the **Get a voice**
- method and only when you specify the customization ID of a custom voice model.
- """
-
- def __init__(self,
- customization_id,
- name=None,
- language=None,
- owner=None,
- created=None,
- last_modified=None,
- description=None,
- words=None):
- """
- Initialize a VoiceModel object.
-
- :param str customization_id: The customization ID (GUID) of the custom voice
- model. The **Create a custom model** method returns only this field. It does not
- not return the other fields of this object.
- :param str name: (optional) The name of the custom voice model.
- :param str language: (optional) The language identifier of the custom voice model
- (for example, `en-US`).
- :param str owner: (optional) The GUID of the service credentials for the instance
- of the service that owns the custom voice model.
- :param str created: (optional) The date and time in Coordinated Universal Time
- (UTC) at which the custom voice model was created. The value is provided in full
- ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`).
- :param str last_modified: (optional) The date and time in Coordinated Universal
- Time (UTC) at which the custom voice model was last modified. Equals `created`
- when a new voice model is first added but has yet to be updated. The value is
- provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`).
- :param str description: (optional) The description of the custom voice model.
- :param list[Word] words: (optional) An array of `Word` objects that lists the
- words and their translations from the custom voice model. The words are listed in
- alphabetical order, with uppercase letters listed before lowercase letters. The
- array is empty if the custom model contains no words. This field is returned only
- by the **Get a voice** method and only when you specify the customization ID of a
- custom voice model.
- """
- self.customization_id = customization_id
- self.name = name
- self.language = language
- self.owner = owner
- self.created = created
- self.last_modified = last_modified
- self.description = description
- self.words = words
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a VoiceModel object from a json dictionary."""
- args = {}
- if 'customization_id' in _dict:
- args['customization_id'] = _dict.get('customization_id')
- else:
- raise ValueError(
- 'Required property \'customization_id\' not present in VoiceModel JSON'
- )
- if 'name' in _dict:
- args['name'] = _dict.get('name')
- if 'language' in _dict:
- args['language'] = _dict.get('language')
- if 'owner' in _dict:
- args['owner'] = _dict.get('owner')
- if 'created' in _dict:
- args['created'] = _dict.get('created')
- if 'last_modified' in _dict:
- args['last_modified'] = _dict.get('last_modified')
- if 'description' in _dict:
- args['description'] = _dict.get('description')
- if 'words' in _dict:
- args['words'] = [Word._from_dict(x) for x in (_dict.get('words'))]
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self,
- 'customization_id') and self.customization_id is not None:
- _dict['customization_id'] = self.customization_id
- if hasattr(self, 'name') and self.name is not None:
- _dict['name'] = self.name
- if hasattr(self, 'language') and self.language is not None:
- _dict['language'] = self.language
- if hasattr(self, 'owner') and self.owner is not None:
- _dict['owner'] = self.owner
- if hasattr(self, 'created') and self.created is not None:
- _dict['created'] = self.created
- if hasattr(self, 'last_modified') and self.last_modified is not None:
- _dict['last_modified'] = self.last_modified
- if hasattr(self, 'description') and self.description is not None:
- _dict['description'] = self.description
- if hasattr(self, 'words') and self.words is not None:
- _dict['words'] = [x._to_dict() for x in self.words]
+ if isinstance(self.customization, dict):
+ _dict['customization'] = self.customization
+ else:
+ _dict['customization'] = self.customization.to_dict()
return _dict
- def __str__(self):
- """Return a `str` version of this VoiceModel object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class VoiceModels(object):
- """
- VoiceModels.
-
- :attr list[VoiceModel] customizations: An array of `VoiceModel` objects that provides
- information about each available custom voice model. The array is empty if the
- requesting service credentials own no custom voice models (if no language is
- specified) or own no custom voice models for the specified language.
- """
-
- def __init__(self, customizations):
- """
- Initialize a VoiceModels object.
-
- :param list[VoiceModel] customizations: An array of `VoiceModel` objects that
- provides information about each available custom voice model. The array is empty
- if the requesting service credentials own no custom voice models (if no language
- is specified) or own no custom voice models for the specified language.
- """
- self.customizations = customizations
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a VoiceModels object from a json dictionary."""
- args = {}
- if 'customizations' in _dict:
- args['customizations'] = [
- VoiceModel._from_dict(x) for x in (_dict.get('customizations'))
- ]
- else:
- raise ValueError(
- 'Required property \'customizations\' not present in VoiceModels JSON'
- )
- return cls(**args)
-
def _to_dict(self):
"""Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'customizations') and self.customizations is not None:
- _dict['customizations'] = [
- x._to_dict() for x in self.customizations
- ]
- return _dict
+ return self.to_dict()
- def __str__(self):
- """Return a `str` version of this VoiceModels object."""
- return json.dumps(self._to_dict(), indent=2)
+ def __str__(self) -> str:
+ """Return a `str` version of this Voice object."""
+ return json.dumps(self.to_dict(), indent=2)
- def __eq__(self, other):
+ def __eq__(self, other: 'Voice') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
- def __ne__(self, other):
+ def __ne__(self, other: 'Voice') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
-class Voices(object):
+class Voices:
"""
- Voices.
+ Information about all available voices.
- :attr list[Voice] voices: A list of available voices.
+ :param List[Voice] voices: A list of available voices.
"""
- def __init__(self, voices):
+ def __init__(
+ self,
+ voices: List['Voice'],
+ ) -> None:
"""
Initialize a Voices object.
- :param list[Voice] voices: A list of available voices.
+ :param List[Voice] voices: A list of available voices.
"""
self.voices = voices
@classmethod
- def _from_dict(cls, _dict):
+ def from_dict(cls, _dict: Dict) -> 'Voices':
"""Initialize a Voices object from a json dictionary."""
args = {}
- if 'voices' in _dict:
- args['voices'] = [
- Voice._from_dict(x) for x in (_dict.get('voices'))
- ]
+ if (voices := _dict.get('voices')) is not None:
+ args['voices'] = [Voice.from_dict(v) for v in voices]
else:
raise ValueError(
'Required property \'voices\' not present in Voices JSON')
return cls(**args)
- def _to_dict(self):
+ @classmethod
+ def _from_dict(cls, _dict):
+ """Initialize a Voices object from a json dictionary."""
+ return cls.from_dict(_dict)
+
+ def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'voices') and self.voices is not None:
- _dict['voices'] = [x._to_dict() for x in self.voices]
+ voices_list = []
+ for v in self.voices:
+ if isinstance(v, dict):
+ voices_list.append(v)
+ else:
+ voices_list.append(v.to_dict())
+ _dict['voices'] = voices_list
return _dict
- def __str__(self):
+ def _to_dict(self):
+ """Return a json dictionary representing this model."""
+ return self.to_dict()
+
+ def __str__(self) -> str:
"""Return a `str` version of this Voices object."""
- return json.dumps(self._to_dict(), indent=2)
+ return json.dumps(self.to_dict(), indent=2)
- def __eq__(self, other):
+ def __eq__(self, other: 'Voices') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
- def __ne__(self, other):
+ def __ne__(self, other: 'Voices') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
-class Word(object):
+class Word:
"""
- Word.
-
- :attr str word: A word from the custom voice model.
- :attr str translation: The phonetic or sounds-like translation for the word. A
- phonetic translation is based on the SSML format for representing the phonetic string
- of a word either as an IPA or IBM SPR translation. A sounds-like translation consists
- of one or more words that, when combined, sound like the word.
- :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the
- word. The service uses the value to produce the correct intonation for the word. You
- can create only a single entry, with or without a single part of speech, for any word;
- you cannot create multiple entries with different parts of speech for the same word.
- For more information, see [Working with Japanese
- entries](https://cloud.ibm.com/docs/services/text-to-speech/custom-rules.html#jaNotes).
+ Information about a word for the custom model.
+
+ :param str word: The word for the custom model. The maximum length of a word is
+ 49 characters.
+ :param str translation: The phonetic or sounds-like translation for the word. A
+ phonetic translation is based on the SSML format for representing the phonetic
+ string of a word either as an IPA or IBM SPR translation. A sounds-like
+ translation consists of one or more words that, when combined, sound like the
+ word. The maximum length of a translation is 499 characters.
+ :param str part_of_speech: (optional) **Japanese only.** The part of speech for
+ the word. The service uses the value to produce the correct intonation for the
+ word. You can create only a single entry, with or without a single part of
+ speech, for any word; you cannot create multiple entries with different parts of
+ speech for the same word. For more information, see [Working with Japanese
+ entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes).
"""
- def __init__(self, word, translation, part_of_speech=None):
+ def __init__(
+ self,
+ word: str,
+ translation: str,
+ *,
+ part_of_speech: Optional[str] = None,
+ ) -> None:
"""
Initialize a Word object.
- :param str word: A word from the custom voice model.
- :param str translation: The phonetic or sounds-like translation for the word. A
- phonetic translation is based on the SSML format for representing the phonetic
- string of a word either as an IPA or IBM SPR translation. A sounds-like
- translation consists of one or more words that, when combined, sound like the
- word.
- :param str part_of_speech: (optional) **Japanese only.** The part of speech for
- the word. The service uses the value to produce the correct intonation for the
- word. You can create only a single entry, with or without a single part of speech,
- for any word; you cannot create multiple entries with different parts of speech
- for the same word. For more information, see [Working with Japanese
- entries](https://cloud.ibm.com/docs/services/text-to-speech/custom-rules.html#jaNotes).
+ :param str word: The word for the custom model. The maximum length of a
+ word is 49 characters.
+ :param str translation: The phonetic or sounds-like translation for the
+ word. A phonetic translation is based on the SSML format for representing
+ the phonetic string of a word either as an IPA or IBM SPR translation. A
+ sounds-like translation consists of one or more words that, when combined,
+ sound like the word. The maximum length of a translation is 499 characters.
+ :param str part_of_speech: (optional) **Japanese only.** The part of speech
+ for the word. The service uses the value to produce the correct intonation
+ for the word. You can create only a single entry, with or without a single
+ part of speech, for any word; you cannot create multiple entries with
+ different parts of speech for the same word. For more information, see
+ [Working with Japanese
+ entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes).
"""
self.word = word
self.translation = translation
self.part_of_speech = part_of_speech
@classmethod
- def _from_dict(cls, _dict):
+ def from_dict(cls, _dict: Dict) -> 'Word':
"""Initialize a Word object from a json dictionary."""
args = {}
- if 'word' in _dict:
- args['word'] = _dict.get('word')
+ if (word := _dict.get('word')) is not None:
+ args['word'] = word
else:
raise ValueError(
'Required property \'word\' not present in Word JSON')
- if 'translation' in _dict:
- args['translation'] = _dict.get('translation')
+ if (translation := _dict.get('translation')) is not None:
+ args['translation'] = translation
else:
raise ValueError(
'Required property \'translation\' not present in Word JSON')
- if 'part_of_speech' in _dict:
- args['part_of_speech'] = _dict.get('part_of_speech')
+ if (part_of_speech := _dict.get('part_of_speech')) is not None:
+ args['part_of_speech'] = part_of_speech
return cls(**args)
- def _to_dict(self):
+ @classmethod
+ def _from_dict(cls, _dict):
+ """Initialize a Word object from a json dictionary."""
+ return cls.from_dict(_dict)
+
+ def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'word') and self.word is not None:
@@ -1541,76 +3640,130 @@ def _to_dict(self):
_dict['part_of_speech'] = self.part_of_speech
return _dict
- def __str__(self):
+ def _to_dict(self):
+ """Return a json dictionary representing this model."""
+ return self.to_dict()
+
+ def __str__(self) -> str:
"""Return a `str` version of this Word object."""
- return json.dumps(self._to_dict(), indent=2)
+ return json.dumps(self.to_dict(), indent=2)
- def __eq__(self, other):
+ def __eq__(self, other: 'Word') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
- def __ne__(self, other):
+ def __ne__(self, other: 'Word') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
+ class PartOfSpeechEnum(str, Enum):
+ """
+ **Japanese only.** The part of speech for the word. The service uses the value to
+ produce the correct intonation for the word. You can create only a single entry,
+ with or without a single part of speech, for any word; you cannot create multiple
+ entries with different parts of speech for the same word. For more information,
+ see [Working with Japanese
+ entries](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-rules#jaNotes).
+ """
-class Words(object):
+ DOSI = 'Dosi'
+ FUKU = 'Fuku'
+ GOBI = 'Gobi'
+ HOKA = 'Hoka'
+ JODO = 'Jodo'
+ JOSI = 'Josi'
+ KATO = 'Kato'
+ KEDO = 'Kedo'
+ KEYO = 'Keyo'
+ KIGO = 'Kigo'
+ KOYU = 'Koyu'
+ MESI = 'Mesi'
+ RETA = 'Reta'
+ STBI = 'Stbi'
+ STTO = 'Stto'
+ STZO = 'Stzo'
+ SUJI = 'Suji'
+
+
+class Words:
"""
- Words.
-
- :attr list[Word] words: The **Add custom words** method accepts an array of `Word`
- objects. Each object provides a word that is to be added or updated for the custom
- voice model and the word's translation.
- The **List custom words** method returns an array of `Word` objects. Each object shows
- a word and its translation from the custom voice model. The words are listed in
- alphabetical order, with uppercase letters listed before lowercase letters. The array
- is empty if the custom model contains no words.
+ For the [Add custom words](#addwords) method, one or more words that are to be added
+ or updated for the custom model and the translation for each specified word.
+ For the [List custom words](#listwords) method, the words and their translations from
+ the custom model.
+
+ :param List[Word] words: The [Add custom words](#addwords) method accepts an
+ array of `Word` objects. Each object provides a word that is to be added or
+ updated for the custom model and the word's translation.
+ The [List custom words](#listwords) method returns an array of `Word` objects.
+ Each object shows a word and its translation from the custom model. The words
+ are listed in alphabetical order, with uppercase letters listed before lowercase
+ letters. The array is empty if the custom model contains no words.
"""
- def __init__(self, words):
+ def __init__(
+ self,
+ words: List['Word'],
+ ) -> None:
"""
Initialize a Words object.
- :param list[Word] words: The **Add custom words** method accepts an array of
- `Word` objects. Each object provides a word that is to be added or updated for the
- custom voice model and the word's translation.
- The **List custom words** method returns an array of `Word` objects. Each object
- shows a word and its translation from the custom voice model. The words are listed
- in alphabetical order, with uppercase letters listed before lowercase letters. The
- array is empty if the custom model contains no words.
+ :param List[Word] words: The [Add custom words](#addwords) method accepts
+ an array of `Word` objects. Each object provides a word that is to be added
+ or updated for the custom model and the word's translation.
+ The [List custom words](#listwords) method returns an array of `Word`
+ objects. Each object shows a word and its translation from the custom
+ model. The words are listed in alphabetical order, with uppercase letters
+ listed before lowercase letters. The array is empty if the custom model
+ contains no words.
"""
self.words = words
@classmethod
- def _from_dict(cls, _dict):
+ def from_dict(cls, _dict: Dict) -> 'Words':
"""Initialize a Words object from a json dictionary."""
args = {}
- if 'words' in _dict:
- args['words'] = [Word._from_dict(x) for x in (_dict.get('words'))]
+ if (words := _dict.get('words')) is not None:
+ args['words'] = [Word.from_dict(v) for v in words]
else:
raise ValueError(
'Required property \'words\' not present in Words JSON')
return cls(**args)
- def _to_dict(self):
+ @classmethod
+ def _from_dict(cls, _dict):
+ """Initialize a Words object from a json dictionary."""
+ return cls.from_dict(_dict)
+
+ def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'words') and self.words is not None:
- _dict['words'] = [x._to_dict() for x in self.words]
+ words_list = []
+ for v in self.words:
+ if isinstance(v, dict):
+ words_list.append(v)
+ else:
+ words_list.append(v.to_dict())
+ _dict['words'] = words_list
return _dict
- def __str__(self):
+ def _to_dict(self):
+ """Return a json dictionary representing this model."""
+ return self.to_dict()
+
+ def __str__(self) -> str:
"""Return a `str` version of this Words object."""
- return json.dumps(self._to_dict(), indent=2)
+ return json.dumps(self.to_dict(), indent=2)
- def __eq__(self, other):
+ def __eq__(self, other: 'Words') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
- def __ne__(self, other):
+ def __ne__(self, other: 'Words') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
diff --git a/ibm_watson/tone_analyzer_v3.py b/ibm_watson/tone_analyzer_v3.py
deleted file mode 100644
index 4711c7680..000000000
--- a/ibm_watson/tone_analyzer_v3.py
+++ /dev/null
@@ -1,1087 +0,0 @@
-# coding: utf-8
-
-# Copyright 2018 IBM All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-The IBM Watson™ Tone Analyzer service uses linguistic analysis to detect emotional
-and language tones in written text. The service can analyze tone at both the document and
-sentence levels. You can use the service to understand how your written communications are
-perceived and then to improve the tone of your communications. Businesses can use the
-service to learn the tone of their customers' communications and to respond to each
-customer appropriately, or to understand and improve their customer conversations.
-**Note:** Request logging is disabled for the Tone Analyzer service. Regardless of whether
-you set the `X-Watson-Learning-Opt-Out` request header, the service does not log or retain
-data from requests and responses.
-"""
-
-from __future__ import absolute_import
-
-import json
-from .common import get_sdk_headers
-from ibm_cloud_sdk_core import BaseService
-
-##############################################################################
-# Service
-##############################################################################
-
-
-class ToneAnalyzerV3(BaseService):
- """The Tone Analyzer V3 service."""
-
- default_url = 'https://gateway.watsonplatform.net/tone-analyzer/api'
-
- def __init__(
- self,
- version,
- url=default_url,
- username=None,
- password=None,
- iam_apikey=None,
- iam_access_token=None,
- iam_url=None,
- ):
- """
- Construct a new client for the Tone Analyzer service.
-
- :param str version: The API version date to use with the service, in
- "YYYY-MM-DD" format. Whenever the API is changed in a backwards
- incompatible way, a new minor version of the API is released.
- The service uses the API version for the date you specify, or
- the most recent version before that date. Note that you should
- not programmatically specify the current date at runtime, in
- case the API has been updated since your application's release.
- Instead, specify a version date that is compatible with your
- application, and don't change it until your application is
- ready for a later version.
-
- :param str url: The base url to use when contacting the service (e.g.
- "https://gateway.watsonplatform.net/tone-analyzer/api/tone-analyzer/api").
- The base url may differ between IBM Cloud regions.
-
- :param str username: The username used to authenticate with the service.
- Username and password credentials are only required to run your
- application locally or outside of IBM Cloud. When running on
- IBM Cloud, the credentials will be automatically loaded from the
- `VCAP_SERVICES` environment variable.
-
- :param str password: The password used to authenticate with the service.
- Username and password credentials are only required to run your
- application locally or outside of IBM Cloud. When running on
- IBM Cloud, the credentials will be automatically loaded from the
- `VCAP_SERVICES` environment variable.
-
- :param str iam_apikey: An API key that can be used to request IAM tokens. If
- this API key is provided, the SDK will manage the token and handle the
- refreshing.
-
- :param str iam_access_token: An IAM access token is fully managed by the application.
- Responsibility falls on the application to refresh the token, either before
- it expires or reactively upon receiving a 401 from the service as any requests
- made with an expired token will fail.
-
- :param str iam_url: An optional URL for the IAM service API. Defaults to
- 'https://iam.cloud.ibm.com/identity/token'.
- """
-
- BaseService.__init__(
- self,
- vcap_services_name='tone_analyzer',
- url=url,
- username=username,
- password=password,
- iam_apikey=iam_apikey,
- iam_access_token=iam_access_token,
- iam_url=iam_url,
- use_vcap_services=True,
- display_name='Tone Analyzer')
- self.version = version
-
- #########################
- # Methods
- #########################
-
- def tone(self,
- tone_input,
- sentences=None,
- tones=None,
- content_language=None,
- accept_language=None,
- content_type=None,
- **kwargs):
- """
- Analyze general tone.
-
- Use the general purpose endpoint to analyze the tone of your input content. The
- service analyzes the content for emotional and language tones. The method always
- analyzes the tone of the full document; by default, it also analyzes the tone of
- each individual sentence of the content.
- You can submit no more than 128 KB of total input content and no more than 1000
- individual sentences in JSON, plain text, or HTML format. The service analyzes the
- first 1000 sentences for document-level analysis and only the first 100 sentences
- for sentence-level analysis.
- Per the JSON specification, the default character encoding for JSON content is
- effectively always UTF-8; per the HTTP specification, the default encoding for
- plain text and HTML is ISO-8859-1 (effectively, the ASCII character set). When
- specifying a content type of plain text or HTML, include the `charset` parameter
- to indicate the character encoding of the input text; for example: `Content-Type:
- text/plain;charset=utf-8`. For `text/html`, the service removes HTML tags and
- analyzes only the textual content.
- **See also:** [Using the general-purpose
- endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone.html#using-the-general-purpose-endpoint).
-
- :param ToneInput tone_input: JSON, plain text, or HTML input that contains the
- content to be analyzed. For JSON input, provide an object of type `ToneInput`.
- :param bool sentences: Indicates whether the service is to return an analysis of
- each individual sentence in addition to its analysis of the full document. If
- `true` (the default), the service returns results for each sentence.
- :param list[str] tones: **`2017-09-21`:** Deprecated. The service continues to
- accept the parameter for backward-compatibility, but the parameter no longer
- affects the response.
- **`2016-05-19`:** A comma-separated list of tones for which the service is to
- return its analysis of the input; the indicated tones apply both to the full
- document and to individual sentences of the document. You can specify one or more
- of the valid values. Omit the parameter to request results for all three tones.
- :param str content_language: The language of the input text for the request:
- English or French. Regional variants are treated as their parent language; for
- example, `en-US` is interpreted as `en`. The input content must match the
- specified language. Do not submit content that contains both languages. You can
- use different languages for **Content-Language** and **Accept-Language**.
- * **`2017-09-21`:** Accepts `en` or `fr`.
- * **`2016-05-19`:** Accepts only `en`.
- :param str accept_language: The desired language of the response. For
- two-character arguments, regional variants are treated as their parent language;
- for example, `en-US` is interpreted as `en`. You can use different languages for
- **Content-Language** and **Accept-Language**.
- :param str content_type: The type of the input. A character encoding can be
- specified by including a `charset` parameter. For example,
- 'text/plain;charset=utf-8'.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- if tone_input is None:
- raise ValueError('tone_input must be provided')
- if isinstance(tone_input, ToneInput):
- tone_input = self._convert_model(tone_input, ToneInput)
-
- headers = {
- 'Content-Language': content_language,
- 'Accept-Language': accept_language,
- 'Content-Type': content_type
- }
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('tone_analyzer', 'V3', 'tone')
- headers.update(sdk_headers)
-
- params = {
- 'version': self.version,
- 'sentences': sentences,
- 'tones': self._convert_list(tones)
- }
-
- if content_type == 'application/json' and isinstance(tone_input, dict):
- data = json.dumps(tone_input)
- else:
- data = tone_input
-
- url = '/v3/tone'
- response = self.request(
- method='POST',
- url=url,
- headers=headers,
- params=params,
- data=data,
- accept_json=True)
- return response
-
- def tone_chat(self,
- utterances,
- content_language=None,
- accept_language=None,
- **kwargs):
- """
- Analyze customer engagement tone.
-
- Use the customer engagement endpoint to analyze the tone of customer service and
- customer support conversations. For each utterance of a conversation, the method
- reports the most prevalent subset of the following seven tones: sad, frustrated,
- satisfied, excited, polite, impolite, and sympathetic.
- If you submit more than 50 utterances, the service returns a warning for the
- overall content and analyzes only the first 50 utterances. If you submit a single
- utterance that contains more than 500 characters, the service returns an error for
- that utterance and does not analyze the utterance. The request fails if all
- utterances have more than 500 characters. Per the JSON specification, the default
- character encoding for JSON content is effectively always UTF-8.
- **See also:** [Using the customer-engagement
- endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone-chat.html#using-the-customer-engagement-endpoint).
-
- :param list[Utterance] utterances: An array of `Utterance` objects that provides
- the input content that the service is to analyze.
- :param str content_language: The language of the input text for the request:
- English or French. Regional variants are treated as their parent language; for
- example, `en-US` is interpreted as `en`. The input content must match the
- specified language. Do not submit content that contains both languages. You can
- use different languages for **Content-Language** and **Accept-Language**.
- * **`2017-09-21`:** Accepts `en` or `fr`.
- * **`2016-05-19`:** Accepts only `en`.
- :param str accept_language: The desired language of the response. For
- two-character arguments, regional variants are treated as their parent language;
- for example, `en-US` is interpreted as `en`. You can use different languages for
- **Content-Language** and **Accept-Language**.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- if utterances is None:
- raise ValueError('utterances must be provided')
- utterances = [self._convert_model(x, Utterance) for x in utterances]
-
- headers = {
- 'Content-Language': content_language,
- 'Accept-Language': accept_language
- }
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('tone_analyzer', 'V3', 'tone_chat')
- headers.update(sdk_headers)
-
- params = {'version': self.version}
-
- data = {'utterances': utterances}
-
- url = '/v3/tone_chat'
- response = self.request(
- method='POST',
- url=url,
- headers=headers,
- params=params,
- json=data,
- accept_json=True)
- return response
-
-
-##############################################################################
-# Models
-##############################################################################
-
-
-class DocumentAnalysis(object):
- """
- An object of type `DocumentAnalysis` that provides the results of the analysis for the
- full input document.
-
- :attr list[ToneScore] tones: (optional) **`2017-09-21`:** An array of `ToneScore`
- objects that provides the results of the analysis for each qualifying tone of the
- document. The array includes results for any tone whose score is at least 0.5. The
- array is empty if no tone has a score that meets this threshold. **`2016-05-19`:** Not
- returned.
- :attr list[ToneCategory] tone_categories: (optional) **`2017-09-21`:** Not returned.
- **`2016-05-19`:** An array of `ToneCategory` objects that provides the results of the
- tone analysis for the full document of the input content. The service returns results
- only for the tones specified with the `tones` parameter of the request.
- :attr str warning: (optional) **`2017-09-21`:** A warning message if the overall
- content exceeds 128 KB or contains more than 1000 sentences. The service analyzes only
- the first 1000 sentences for document-level analysis and the first 100 sentences for
- sentence-level analysis. **`2016-05-19`:** Not returned.
- """
-
- def __init__(self, tones=None, tone_categories=None, warning=None):
- """
- Initialize a DocumentAnalysis object.
-
- :param list[ToneScore] tones: (optional) **`2017-09-21`:** An array of `ToneScore`
- objects that provides the results of the analysis for each qualifying tone of the
- document. The array includes results for any tone whose score is at least 0.5. The
- array is empty if no tone has a score that meets this threshold. **`2016-05-19`:**
- Not returned.
- :param list[ToneCategory] tone_categories: (optional) **`2017-09-21`:** Not
- returned. **`2016-05-19`:** An array of `ToneCategory` objects that provides the
- results of the tone analysis for the full document of the input content. The
- service returns results only for the tones specified with the `tones` parameter of
- the request.
- :param str warning: (optional) **`2017-09-21`:** A warning message if the overall
- content exceeds 128 KB or contains more than 1000 sentences. The service analyzes
- only the first 1000 sentences for document-level analysis and the first 100
- sentences for sentence-level analysis. **`2016-05-19`:** Not returned.
- """
- self.tones = tones
- self.tone_categories = tone_categories
- self.warning = warning
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a DocumentAnalysis object from a json dictionary."""
- args = {}
- if 'tones' in _dict:
- args['tones'] = [
- ToneScore._from_dict(x) for x in (_dict.get('tones'))
- ]
- if 'tone_categories' in _dict:
- args['tone_categories'] = [
- ToneCategory._from_dict(x)
- for x in (_dict.get('tone_categories'))
- ]
- if 'warning' in _dict:
- args['warning'] = _dict.get('warning')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'tones') and self.tones is not None:
- _dict['tones'] = [x._to_dict() for x in self.tones]
- if hasattr(self,
- 'tone_categories') and self.tone_categories is not None:
- _dict['tone_categories'] = [
- x._to_dict() for x in self.tone_categories
- ]
- if hasattr(self, 'warning') and self.warning is not None:
- _dict['warning'] = self.warning
- return _dict
-
- def __str__(self):
- """Return a `str` version of this DocumentAnalysis object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class SentenceAnalysis(object):
- """
- SentenceAnalysis.
-
- :attr int sentence_id: The unique identifier of a sentence of the input content. The
- first sentence has ID 0, and the ID of each subsequent sentence is incremented by one.
- :attr str text: The text of the input sentence.
- :attr list[ToneScore] tones: (optional) **`2017-09-21`:** An array of `ToneScore`
- objects that provides the results of the analysis for each qualifying tone of the
- sentence. The array includes results for any tone whose score is at least 0.5. The
- array is empty if no tone has a score that meets this threshold. **`2016-05-19`:** Not
- returned.
- :attr list[ToneCategory] tone_categories: (optional) **`2017-09-21`:** Not returned.
- **`2016-05-19`:** An array of `ToneCategory` objects that provides the results of the
- tone analysis for the sentence. The service returns results only for the tones
- specified with the `tones` parameter of the request.
- :attr int input_from: (optional) **`2017-09-21`:** Not returned. **`2016-05-19`:** The
- offset of the first character of the sentence in the overall input content.
- :attr int input_to: (optional) **`2017-09-21`:** Not returned. **`2016-05-19`:** The
- offset of the last character of the sentence in the overall input content.
- """
-
- def __init__(self,
- sentence_id,
- text,
- tones=None,
- tone_categories=None,
- input_from=None,
- input_to=None):
- """
- Initialize a SentenceAnalysis object.
-
- :param int sentence_id: The unique identifier of a sentence of the input content.
- The first sentence has ID 0, and the ID of each subsequent sentence is incremented
- by one.
- :param str text: The text of the input sentence.
- :param list[ToneScore] tones: (optional) **`2017-09-21`:** An array of `ToneScore`
- objects that provides the results of the analysis for each qualifying tone of the
- sentence. The array includes results for any tone whose score is at least 0.5. The
- array is empty if no tone has a score that meets this threshold. **`2016-05-19`:**
- Not returned.
- :param list[ToneCategory] tone_categories: (optional) **`2017-09-21`:** Not
- returned. **`2016-05-19`:** An array of `ToneCategory` objects that provides the
- results of the tone analysis for the sentence. The service returns results only
- for the tones specified with the `tones` parameter of the request.
- :param int input_from: (optional) **`2017-09-21`:** Not returned.
- **`2016-05-19`:** The offset of the first character of the sentence in the overall
- input content.
- :param int input_to: (optional) **`2017-09-21`:** Not returned. **`2016-05-19`:**
- The offset of the last character of the sentence in the overall input content.
- """
- self.sentence_id = sentence_id
- self.text = text
- self.tones = tones
- self.tone_categories = tone_categories
- self.input_from = input_from
- self.input_to = input_to
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a SentenceAnalysis object from a json dictionary."""
- args = {}
- if 'sentence_id' in _dict:
- args['sentence_id'] = _dict.get('sentence_id')
- else:
- raise ValueError(
- 'Required property \'sentence_id\' not present in SentenceAnalysis JSON'
- )
- if 'text' in _dict:
- args['text'] = _dict.get('text')
- else:
- raise ValueError(
- 'Required property \'text\' not present in SentenceAnalysis JSON'
- )
- if 'tones' in _dict:
- args['tones'] = [
- ToneScore._from_dict(x) for x in (_dict.get('tones'))
- ]
- if 'tone_categories' in _dict:
- args['tone_categories'] = [
- ToneCategory._from_dict(x)
- for x in (_dict.get('tone_categories'))
- ]
- if 'input_from' in _dict:
- args['input_from'] = _dict.get('input_from')
- if 'input_to' in _dict:
- args['input_to'] = _dict.get('input_to')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'sentence_id') and self.sentence_id is not None:
- _dict['sentence_id'] = self.sentence_id
- if hasattr(self, 'text') and self.text is not None:
- _dict['text'] = self.text
- if hasattr(self, 'tones') and self.tones is not None:
- _dict['tones'] = [x._to_dict() for x in self.tones]
- if hasattr(self,
- 'tone_categories') and self.tone_categories is not None:
- _dict['tone_categories'] = [
- x._to_dict() for x in self.tone_categories
- ]
- if hasattr(self, 'input_from') and self.input_from is not None:
- _dict['input_from'] = self.input_from
- if hasattr(self, 'input_to') and self.input_to is not None:
- _dict['input_to'] = self.input_to
- return _dict
-
- def __str__(self):
- """Return a `str` version of this SentenceAnalysis object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ToneAnalysis(object):
- """
- ToneAnalysis.
-
- :attr DocumentAnalysis document_tone: An object of type `DocumentAnalysis` that
- provides the results of the analysis for the full input document.
- :attr list[SentenceAnalysis] sentences_tone: (optional) An array of `SentenceAnalysis`
- objects that provides the results of the analysis for the individual sentences of the
- input content. The service returns results only for the first 100 sentences of the
- input. The field is omitted if the `sentences` parameter of the request is set to
- `false`.
- """
-
- def __init__(self, document_tone, sentences_tone=None):
- """
- Initialize a ToneAnalysis object.
-
- :param DocumentAnalysis document_tone: An object of type `DocumentAnalysis` that
- provides the results of the analysis for the full input document.
- :param list[SentenceAnalysis] sentences_tone: (optional) An array of
- `SentenceAnalysis` objects that provides the results of the analysis for the
- individual sentences of the input content. The service returns results only for
- the first 100 sentences of the input. The field is omitted if the `sentences`
- parameter of the request is set to `false`.
- """
- self.document_tone = document_tone
- self.sentences_tone = sentences_tone
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ToneAnalysis object from a json dictionary."""
- args = {}
- if 'document_tone' in _dict:
- args['document_tone'] = DocumentAnalysis._from_dict(
- _dict.get('document_tone'))
- else:
- raise ValueError(
- 'Required property \'document_tone\' not present in ToneAnalysis JSON'
- )
- if 'sentences_tone' in _dict:
- args['sentences_tone'] = [
- SentenceAnalysis._from_dict(x)
- for x in (_dict.get('sentences_tone'))
- ]
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'document_tone') and self.document_tone is not None:
- _dict['document_tone'] = self.document_tone._to_dict()
- if hasattr(self, 'sentences_tone') and self.sentences_tone is not None:
- _dict['sentences_tone'] = [
- x._to_dict() for x in self.sentences_tone
- ]
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ToneAnalysis object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ToneCategory(object):
- """
- ToneCategory.
-
- :attr list[ToneScore] tones: An array of `ToneScore` objects that provides the results
- for the tones of the category.
- :attr str category_id: The unique, non-localized identifier of the category for the
- results. The service can return results for the following category IDs:
- `emotion_tone`, `language_tone`, and `social_tone`.
- :attr str category_name: The user-visible, localized name of the category.
- """
-
- def __init__(self, tones, category_id, category_name):
- """
- Initialize a ToneCategory object.
-
- :param list[ToneScore] tones: An array of `ToneScore` objects that provides the
- results for the tones of the category.
- :param str category_id: The unique, non-localized identifier of the category for
- the results. The service can return results for the following category IDs:
- `emotion_tone`, `language_tone`, and `social_tone`.
- :param str category_name: The user-visible, localized name of the category.
- """
- self.tones = tones
- self.category_id = category_id
- self.category_name = category_name
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ToneCategory object from a json dictionary."""
- args = {}
- if 'tones' in _dict:
- args['tones'] = [
- ToneScore._from_dict(x) for x in (_dict.get('tones'))
- ]
- else:
- raise ValueError(
- 'Required property \'tones\' not present in ToneCategory JSON')
- if 'category_id' in _dict:
- args['category_id'] = _dict.get('category_id')
- else:
- raise ValueError(
- 'Required property \'category_id\' not present in ToneCategory JSON'
- )
- if 'category_name' in _dict:
- args['category_name'] = _dict.get('category_name')
- else:
- raise ValueError(
- 'Required property \'category_name\' not present in ToneCategory JSON'
- )
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'tones') and self.tones is not None:
- _dict['tones'] = [x._to_dict() for x in self.tones]
- if hasattr(self, 'category_id') and self.category_id is not None:
- _dict['category_id'] = self.category_id
- if hasattr(self, 'category_name') and self.category_name is not None:
- _dict['category_name'] = self.category_name
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ToneCategory object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ToneChatScore(object):
- """
- ToneChatScore.
-
- :attr float score: The score for the tone in the range of 0.5 to 1. A score greater
- than 0.75 indicates a high likelihood that the tone is perceived in the utterance.
- :attr str tone_id: The unique, non-localized identifier of the tone for the results.
- The service returns results only for tones whose scores meet a minimum threshold of
- 0.5.
- :attr str tone_name: The user-visible, localized name of the tone.
- """
-
- def __init__(self, score, tone_id, tone_name):
- """
- Initialize a ToneChatScore object.
-
- :param float score: The score for the tone in the range of 0.5 to 1. A score
- greater than 0.75 indicates a high likelihood that the tone is perceived in the
- utterance.
- :param str tone_id: The unique, non-localized identifier of the tone for the
- results. The service returns results only for tones whose scores meet a minimum
- threshold of 0.5.
- :param str tone_name: The user-visible, localized name of the tone.
- """
- self.score = score
- self.tone_id = tone_id
- self.tone_name = tone_name
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ToneChatScore object from a json dictionary."""
- args = {}
- if 'score' in _dict:
- args['score'] = _dict.get('score')
- else:
- raise ValueError(
- 'Required property \'score\' not present in ToneChatScore JSON')
- if 'tone_id' in _dict:
- args['tone_id'] = _dict.get('tone_id')
- else:
- raise ValueError(
- 'Required property \'tone_id\' not present in ToneChatScore JSON'
- )
- if 'tone_name' in _dict:
- args['tone_name'] = _dict.get('tone_name')
- else:
- raise ValueError(
- 'Required property \'tone_name\' not present in ToneChatScore JSON'
- )
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'score') and self.score is not None:
- _dict['score'] = self.score
- if hasattr(self, 'tone_id') and self.tone_id is not None:
- _dict['tone_id'] = self.tone_id
- if hasattr(self, 'tone_name') and self.tone_name is not None:
- _dict['tone_name'] = self.tone_name
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ToneChatScore object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ToneInput(object):
- """
- ToneInput.
-
- :attr str text: The input content that the service is to analyze.
- """
-
- def __init__(self, text):
- """
- Initialize a ToneInput object.
-
- :param str text: The input content that the service is to analyze.
- """
- self.text = text
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ToneInput object from a json dictionary."""
- args = {}
- if 'text' in _dict:
- args['text'] = _dict.get('text')
- else:
- raise ValueError(
- 'Required property \'text\' not present in ToneInput JSON')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'text') and self.text is not None:
- _dict['text'] = self.text
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ToneInput object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ToneScore(object):
- """
- ToneScore.
-
- :attr float score: The score for the tone.
- * **`2017-09-21`:** The score that is returned lies in the range of 0.5 to 1. A score
- greater than 0.75 indicates a high likelihood that the tone is perceived in the
- content.
- * **`2016-05-19`:** The score that is returned lies in the range of 0 to 1. A score
- less than 0.5 indicates that the tone is unlikely to be perceived in the content; a
- score greater than 0.75 indicates a high likelihood that the tone is perceived.
- :attr str tone_id: The unique, non-localized identifier of the tone.
- * **`2017-09-21`:** The service can return results for the following tone IDs:
- `anger`, `fear`, `joy`, and `sadness` (emotional tones); `analytical`, `confident`,
- and `tentative` (language tones). The service returns results only for tones whose
- scores meet a minimum threshold of 0.5.
- * **`2016-05-19`:** The service can return results for the following tone IDs of the
- different categories: for the `emotion` category: `anger`, `disgust`, `fear`, `joy`,
- and `sadness`; for the `language` category: `analytical`, `confident`, and
- `tentative`; for the `social` category: `openness_big5`, `conscientiousness_big5`,
- `extraversion_big5`, `agreeableness_big5`, and `emotional_range_big5`. The service
- returns scores for all tones of a category, regardless of their values.
- :attr str tone_name: The user-visible, localized name of the tone.
- """
-
- def __init__(self, score, tone_id, tone_name):
- """
- Initialize a ToneScore object.
-
- :param float score: The score for the tone.
- * **`2017-09-21`:** The score that is returned lies in the range of 0.5 to 1. A
- score greater than 0.75 indicates a high likelihood that the tone is perceived in
- the content.
- * **`2016-05-19`:** The score that is returned lies in the range of 0 to 1. A
- score less than 0.5 indicates that the tone is unlikely to be perceived in the
- content; a score greater than 0.75 indicates a high likelihood that the tone is
- perceived.
- :param str tone_id: The unique, non-localized identifier of the tone.
- * **`2017-09-21`:** The service can return results for the following tone IDs:
- `anger`, `fear`, `joy`, and `sadness` (emotional tones); `analytical`,
- `confident`, and `tentative` (language tones). The service returns results only
- for tones whose scores meet a minimum threshold of 0.5.
- * **`2016-05-19`:** The service can return results for the following tone IDs of
- the different categories: for the `emotion` category: `anger`, `disgust`, `fear`,
- `joy`, and `sadness`; for the `language` category: `analytical`, `confident`, and
- `tentative`; for the `social` category: `openness_big5`, `conscientiousness_big5`,
- `extraversion_big5`, `agreeableness_big5`, and `emotional_range_big5`. The service
- returns scores for all tones of a category, regardless of their values.
- :param str tone_name: The user-visible, localized name of the tone.
- """
- self.score = score
- self.tone_id = tone_id
- self.tone_name = tone_name
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ToneScore object from a json dictionary."""
- args = {}
- if 'score' in _dict:
- args['score'] = _dict.get('score')
- else:
- raise ValueError(
- 'Required property \'score\' not present in ToneScore JSON')
- if 'tone_id' in _dict:
- args['tone_id'] = _dict.get('tone_id')
- else:
- raise ValueError(
- 'Required property \'tone_id\' not present in ToneScore JSON')
- if 'tone_name' in _dict:
- args['tone_name'] = _dict.get('tone_name')
- else:
- raise ValueError(
- 'Required property \'tone_name\' not present in ToneScore JSON')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'score') and self.score is not None:
- _dict['score'] = self.score
- if hasattr(self, 'tone_id') and self.tone_id is not None:
- _dict['tone_id'] = self.tone_id
- if hasattr(self, 'tone_name') and self.tone_name is not None:
- _dict['tone_name'] = self.tone_name
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ToneScore object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class Utterance(object):
- """
- Utterance.
-
- :attr str text: An utterance contributed by a user in the conversation that is to be
- analyzed. The utterance can contain multiple sentences.
- :attr str user: (optional) A string that identifies the user who contributed the
- utterance specified by the `text` parameter.
- """
-
- def __init__(self, text, user=None):
- """
- Initialize a Utterance object.
-
- :param str text: An utterance contributed by a user in the conversation that is to
- be analyzed. The utterance can contain multiple sentences.
- :param str user: (optional) A string that identifies the user who contributed the
- utterance specified by the `text` parameter.
- """
- self.text = text
- self.user = user
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a Utterance object from a json dictionary."""
- args = {}
- if 'text' in _dict:
- args['text'] = _dict.get('text')
- else:
- raise ValueError(
- 'Required property \'text\' not present in Utterance JSON')
- if 'user' in _dict:
- args['user'] = _dict.get('user')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'text') and self.text is not None:
- _dict['text'] = self.text
- if hasattr(self, 'user') and self.user is not None:
- _dict['user'] = self.user
- return _dict
-
- def __str__(self):
- """Return a `str` version of this Utterance object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class UtteranceAnalyses(object):
- """
- UtteranceAnalyses.
-
- :attr list[UtteranceAnalysis] utterances_tone: An array of `UtteranceAnalysis` objects
- that provides the results for each utterance of the input.
- :attr str warning: (optional) **`2017-09-21`:** A warning message if the content
- contains more than 50 utterances. The service analyzes only the first 50 utterances.
- **`2016-05-19`:** Not returned.
- """
-
- def __init__(self, utterances_tone, warning=None):
- """
- Initialize a UtteranceAnalyses object.
-
- :param list[UtteranceAnalysis] utterances_tone: An array of `UtteranceAnalysis`
- objects that provides the results for each utterance of the input.
- :param str warning: (optional) **`2017-09-21`:** A warning message if the content
- contains more than 50 utterances. The service analyzes only the first 50
- utterances. **`2016-05-19`:** Not returned.
- """
- self.utterances_tone = utterances_tone
- self.warning = warning
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a UtteranceAnalyses object from a json dictionary."""
- args = {}
- if 'utterances_tone' in _dict:
- args['utterances_tone'] = [
- UtteranceAnalysis._from_dict(x)
- for x in (_dict.get('utterances_tone'))
- ]
- else:
- raise ValueError(
- 'Required property \'utterances_tone\' not present in UtteranceAnalyses JSON'
- )
- if 'warning' in _dict:
- args['warning'] = _dict.get('warning')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self,
- 'utterances_tone') and self.utterances_tone is not None:
- _dict['utterances_tone'] = [
- x._to_dict() for x in self.utterances_tone
- ]
- if hasattr(self, 'warning') and self.warning is not None:
- _dict['warning'] = self.warning
- return _dict
-
- def __str__(self):
- """Return a `str` version of this UtteranceAnalyses object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class UtteranceAnalysis(object):
- """
- UtteranceAnalysis.
-
- :attr int utterance_id: The unique identifier of the utterance. The first utterance
- has ID 0, and the ID of each subsequent utterance is incremented by one.
- :attr str utterance_text: The text of the utterance.
- :attr list[ToneChatScore] tones: An array of `ToneChatScore` objects that provides
- results for the most prevalent tones of the utterance. The array includes results for
- any tone whose score is at least 0.5. The array is empty if no tone has a score that
- meets this threshold.
- :attr str error: (optional) **`2017-09-21`:** An error message if the utterance
- contains more than 500 characters. The service does not analyze the utterance.
- **`2016-05-19`:** Not returned.
- """
-
- def __init__(self, utterance_id, utterance_text, tones, error=None):
- """
- Initialize a UtteranceAnalysis object.
-
- :param int utterance_id: The unique identifier of the utterance. The first
- utterance has ID 0, and the ID of each subsequent utterance is incremented by one.
- :param str utterance_text: The text of the utterance.
- :param list[ToneChatScore] tones: An array of `ToneChatScore` objects that
- provides results for the most prevalent tones of the utterance. The array includes
- results for any tone whose score is at least 0.5. The array is empty if no tone
- has a score that meets this threshold.
- :param str error: (optional) **`2017-09-21`:** An error message if the utterance
- contains more than 500 characters. The service does not analyze the utterance.
- **`2016-05-19`:** Not returned.
- """
- self.utterance_id = utterance_id
- self.utterance_text = utterance_text
- self.tones = tones
- self.error = error
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a UtteranceAnalysis object from a json dictionary."""
- args = {}
- if 'utterance_id' in _dict:
- args['utterance_id'] = _dict.get('utterance_id')
- else:
- raise ValueError(
- 'Required property \'utterance_id\' not present in UtteranceAnalysis JSON'
- )
- if 'utterance_text' in _dict:
- args['utterance_text'] = _dict.get('utterance_text')
- else:
- raise ValueError(
- 'Required property \'utterance_text\' not present in UtteranceAnalysis JSON'
- )
- if 'tones' in _dict:
- args['tones'] = [
- ToneChatScore._from_dict(x) for x in (_dict.get('tones'))
- ]
- else:
- raise ValueError(
- 'Required property \'tones\' not present in UtteranceAnalysis JSON'
- )
- if 'error' in _dict:
- args['error'] = _dict.get('error')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'utterance_id') and self.utterance_id is not None:
- _dict['utterance_id'] = self.utterance_id
- if hasattr(self, 'utterance_text') and self.utterance_text is not None:
- _dict['utterance_text'] = self.utterance_text
- if hasattr(self, 'tones') and self.tones is not None:
- _dict['tones'] = [x._to_dict() for x in self.tones]
- if hasattr(self, 'error') and self.error is not None:
- _dict['error'] = self.error
- return _dict
-
- def __str__(self):
- """Return a `str` version of this UtteranceAnalysis object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
diff --git a/ibm_watson/version.py b/ibm_watson/version.py
index 9a8a1b10a..00eae9620 100644
--- a/ibm_watson/version.py
+++ b/ibm_watson/version.py
@@ -1 +1 @@
-__version__ = '3.0.4'
+__version__ = '11.2.0'
diff --git a/ibm_watson/visual_recognition_v3.py b/ibm_watson/visual_recognition_v3.py
deleted file mode 100644
index 05dd2f16c..000000000
--- a/ibm_watson/visual_recognition_v3.py
+++ /dev/null
@@ -1,1771 +0,0 @@
-# coding: utf-8
-
-# Copyright 2018 IBM All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-The IBM Watson™ Visual Recognition service uses deep learning algorithms to identify
-scenes, objects, and faces in images you upload to the service. You can create and train
-a custom classifier to identify subjects that suit your needs.
-"""
-
-from __future__ import absolute_import
-
-import json
-from .common import get_sdk_headers
-from ibm_cloud_sdk_core import BaseService
-from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime
-from os.path import basename
-
-##############################################################################
-# Service
-##############################################################################
-
-
-class VisualRecognitionV3(BaseService):
- """The Visual Recognition V3 service."""
-
- default_url = 'https://gateway.watsonplatform.net/visual-recognition/api'
-
- def __init__(
- self,
- version,
- url=default_url,
- iam_apikey=None,
- iam_access_token=None,
- iam_url=None,
- ):
- """
- Construct a new client for the Visual Recognition service.
-
- :param str version: The API version date to use with the service, in
- "YYYY-MM-DD" format. Whenever the API is changed in a backwards
- incompatible way, a new minor version of the API is released.
- The service uses the API version for the date you specify, or
- the most recent version before that date. Note that you should
- not programmatically specify the current date at runtime, in
- case the API has been updated since your application's release.
- Instead, specify a version date that is compatible with your
- application, and don't change it until your application is
- ready for a later version.
-
- :param str url: The base url to use when contacting the service (e.g.
- "https://gateway.watsonplatform.net/visual-recognition/api/visual-recognition/api").
- The base url may differ between IBM Cloud regions.
-
- :param str iam_apikey: An API key that can be used to request IAM tokens. If
- this API key is provided, the SDK will manage the token and handle the
- refreshing.
-
- :param str iam_access_token: An IAM access token is fully managed by the application.
- Responsibility falls on the application to refresh the token, either before
- it expires or reactively upon receiving a 401 from the service as any requests
- made with an expired token will fail.
-
- :param str iam_url: An optional URL for the IAM service API. Defaults to
- 'https://iam.cloud.ibm.com/identity/token'.
- """
-
- BaseService.__init__(
- self,
- vcap_services_name='watson_vision_combined',
- url=url,
- iam_apikey=iam_apikey,
- iam_access_token=iam_access_token,
- iam_url=iam_url,
- use_vcap_services=True,
- display_name='Visual Recognition')
- self.version = version
-
- #########################
- # General
- #########################
-
- def classify(self,
- images_file=None,
- images_filename=None,
- images_file_content_type=None,
- url=None,
- threshold=None,
- owners=None,
- classifier_ids=None,
- accept_language=None,
- **kwargs):
- """
- Classify images.
-
- Classify images with built-in or custom classifiers.
-
- :param file images_file: An image file (.gif, .jpg, .png, .tif) or .zip file with
- images. Maximum image size is 10 MB. Include no more than 20 images and limit the
- .zip file to 100 MB. Encode the image and .zip file names in UTF-8 if they contain
- non-ASCII characters. The service assumes UTF-8 encoding if it encounters
- non-ASCII characters.
- You can also include an image with the **url** parameter.
- :param str images_filename: The filename for images_file.
- :param str images_file_content_type: The content type of images_file.
- :param str url: The URL of an image (.gif, .jpg, .png, .tif) to analyze. The
- minimum recommended pixel density is 32X32 pixels, but the service tends to
- perform better with images that are at least 224 x 224 pixels. The maximum image
- size is 10 MB.
- You can also include images with the **images_file** parameter.
- :param float threshold: The minimum score a class must have to be displayed in the
- response. Set the threshold to `0.0` to return all identified classes.
- :param list[str] owners: The categories of classifiers to apply. The
- **classifier_ids** parameter overrides **owners**, so make sure that
- **classifier_ids** is empty.
- - Use `IBM` to classify against the `default` general classifier. You get the same
- result if both **classifier_ids** and **owners** parameters are empty.
- - Use `me` to classify against all your custom classifiers. However, for better
- performance use **classifier_ids** to specify the specific custom classifiers to
- apply.
- - Use both `IBM` and `me` to analyze the image against both classifier categories.
- :param list[str] classifier_ids: Which classifiers to apply. Overrides the
- **owners** parameter. You can specify both custom and built-in classifier IDs. The
- built-in `default` classifier is used if both **classifier_ids** and **owners**
- parameters are empty.
- The following built-in classifier IDs require no training:
- - `default`: Returns classes from thousands of general tags.
- - `food`: Enhances specificity and accuracy for images of food items.
- - `explicit`: Evaluates whether the image might be pornographic.
- :param str accept_language: The desired language of parts of the response. See the
- response for details.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- headers = {'Accept-Language': accept_language}
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
- 'classify')
- headers.update(sdk_headers)
-
- params = {'version': self.version}
-
- form_data = {}
- if images_file:
- if not images_filename and hasattr(images_file, 'name'):
- images_filename = basename(images_file.name)
- if not images_filename:
- raise ValueError('images_filename must be provided')
- form_data['images_file'] = (images_filename, images_file,
- images_file_content_type or
- 'application/octet-stream')
- if url:
- form_data['url'] = (None, url, 'text/plain')
- if threshold:
- form_data['threshold'] = (None, threshold, 'application/json')
- if owners:
- owners = self._convert_list(owners)
- form_data['owners'] = (None, owners, 'application/json')
- if classifier_ids:
- classifier_ids = self._convert_list(classifier_ids)
- form_data['classifier_ids'] = (None, classifier_ids,
- 'application/json')
-
- url = '/v3/classify'
- response = self.request(
- method='POST',
- url=url,
- headers=headers,
- params=params,
- files=form_data,
- accept_json=True)
- return response
-
- #########################
- # Face
- #########################
-
- def detect_faces(self,
- images_file=None,
- images_filename=None,
- images_file_content_type=None,
- url=None,
- accept_language=None,
- **kwargs):
- """
- Detect faces in images.
-
- **Important:** On April 2, 2018, the identity information in the response to calls
- to the Face model was removed. The identity information refers to the `name` of
- the person, `score`, and `type_hierarchy` knowledge graph. For details about the
- enhanced Face model, see the [Release
- notes](https://cloud.ibm.com/docs/services/visual-recognition/release-notes.html#2april2018).
- Analyze and get data about faces in images. Responses can include estimated age
- and gender. This feature uses a built-in model, so no training is necessary. The
- Detect faces method does not support general biometric facial recognition.
- Supported image formats include .gif, .jpg, .png, and .tif. The maximum image size
- is 10 MB. The minimum recommended pixel density is 32X32 pixels, but the service
- tends to perform better with images that are at least 224 x 224 pixels.
-
- :param file images_file: An image file (gif, .jpg, .png, .tif.) or .zip file with
- images. Limit the .zip file to 100 MB. You can include a maximum of 15 images in a
- request.
- Encode the image and .zip file names in UTF-8 if they contain non-ASCII
- characters. The service assumes UTF-8 encoding if it encounters non-ASCII
- characters.
- You can also include an image with the **url** parameter.
- :param str images_filename: The filename for images_file.
- :param str images_file_content_type: The content type of images_file.
- :param str url: The URL of an image to analyze. Must be in .gif, .jpg, .png, or
- .tif format. The minimum recommended pixel density is 32X32 pixels, but the
- service tends to perform better with images that are at least 224 x 224 pixels.
- The maximum image size is 10 MB. Redirects are followed, so you can use a
- shortened URL.
- You can also include images with the **images_file** parameter.
- :param str accept_language: The desired language of parts of the response. See the
- response for details.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- headers = {'Accept-Language': accept_language}
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
- 'detect_faces')
- headers.update(sdk_headers)
-
- params = {'version': self.version}
-
- form_data = {}
- if images_file:
- if not images_filename and hasattr(images_file, 'name'):
- images_filename = basename(images_file.name)
- if not images_filename:
- raise ValueError('images_filename must be provided')
- form_data['images_file'] = (images_filename, images_file,
- images_file_content_type or
- 'application/octet-stream')
- if url:
- form_data['url'] = (None, url, 'text/plain')
-
- url = '/v3/detect_faces'
- response = self.request(
- method='POST',
- url=url,
- headers=headers,
- params=params,
- files=form_data,
- accept_json=True)
- return response
-
- #########################
- # Custom
- #########################
-
- def create_classifier(self,
- name,
- positive_examples,
- negative_examples=None,
- negative_examples_filename=None,
- **kwargs):
- """
- Create a classifier.
-
- Train a new multi-faceted classifier on the uploaded image data. Create your
- custom classifier with positive or negative examples. Include at least two sets of
- examples, either two positive example files or one positive and one negative file.
- You can upload a maximum of 256 MB per call.
- Encode all names in UTF-8 if they contain non-ASCII characters (.zip and image
- file names, and classifier and class names). The service assumes UTF-8 encoding if
- it encounters non-ASCII characters.
-
- :param str name: The name of the new classifier. Encode special characters in
- UTF-8.
- :param dict positive_examples: A dictionary that contains the value for each
- classname. The value is a .zip file of images that depict the visual subject of a
- class in the new classifier. You can include more than one positive example file
- in a call.
- Specify the parameter name by appending `_positive_examples` to the class name.
- For example, `goldenretriever_positive_examples` creates the class
- **goldenretriever**.
- Include at least 10 images in .jpg or .png format. The minimum recommended image
- resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100
- MB per .zip file.
- Encode special characters in the file name in UTF-8.
- :param file negative_examples: A .zip file of images that do not depict the visual
- subject of any of the classes of the new classifier. Must contain a minimum of 10
- images.
- Encode special characters in the file name in UTF-8.
- :param str negative_examples_filename: The filename for negative_examples.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- if name is None:
- raise ValueError('name must be provided')
- if not positive_examples:
- raise ValueError('positive_examples must be provided')
-
- headers = {}
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
- 'create_classifier')
- headers.update(sdk_headers)
-
- params = {'version': self.version}
-
- form_data = {}
- form_data['name'] = (None, name, 'text/plain')
- for key in positive_examples.keys():
- part_name = '%s_positive_examples' % (key)
- value = positive_examples[key]
- if hasattr(value, 'name'):
- filename = basename(value.name)
- form_data[part_name] = (filename, value, 'application/octet-stream')
- if negative_examples:
- if not negative_examples_filename and hasattr(
- negative_examples, 'name'):
- negative_examples_filename = basename(negative_examples.name)
- if not negative_examples_filename:
- raise ValueError('negative_examples_filename must be provided')
- form_data['negative_examples'] = (negative_examples_filename,
- negative_examples,
- 'application/octet-stream')
-
- url = '/v3/classifiers'
- response = self.request(
- method='POST',
- url=url,
- headers=headers,
- params=params,
- files=form_data,
- accept_json=True)
- return response
-
- def delete_classifier(self, classifier_id, **kwargs):
- """
- Delete a classifier.
-
- :param str classifier_id: The ID of the classifier.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- if classifier_id is None:
- raise ValueError('classifier_id must be provided')
-
- headers = {}
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
- 'delete_classifier')
- headers.update(sdk_headers)
-
- params = {'version': self.version}
-
- url = '/v3/classifiers/{0}'.format(
- *self._encode_path_vars(classifier_id))
- response = self.request(
- method='DELETE',
- url=url,
- headers=headers,
- params=params,
- accept_json=True)
- return response
-
- def get_classifier(self, classifier_id, **kwargs):
- """
- Retrieve classifier details.
-
- Retrieve information about a custom classifier.
-
- :param str classifier_id: The ID of the classifier.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- if classifier_id is None:
- raise ValueError('classifier_id must be provided')
-
- headers = {}
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
- 'get_classifier')
- headers.update(sdk_headers)
-
- params = {'version': self.version}
-
- url = '/v3/classifiers/{0}'.format(
- *self._encode_path_vars(classifier_id))
- response = self.request(
- method='GET',
- url=url,
- headers=headers,
- params=params,
- accept_json=True)
- return response
-
- def list_classifiers(self, verbose=None, **kwargs):
- """
- Retrieve a list of classifiers.
-
- :param bool verbose: Specify `true` to return details about the classifiers. Omit
- this parameter to return a brief list of classifiers.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- headers = {}
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
- 'list_classifiers')
- headers.update(sdk_headers)
-
- params = {'version': self.version, 'verbose': verbose}
-
- url = '/v3/classifiers'
- response = self.request(
- method='GET',
- url=url,
- headers=headers,
- params=params,
- accept_json=True)
- return response
-
- def update_classifier(self,
- classifier_id,
- positive_examples={},
- negative_examples=None,
- negative_examples_filename=None,
- **kwargs):
- """
- Update a classifier.
-
- Update a custom classifier by adding new positive or negative classes or by adding
- new images to existing classes. You must supply at least one set of positive or
- negative examples. For details, see [Updating custom
- classifiers](https://cloud.ibm.com/docs/services/visual-recognition/customizing.html#updating-custom-classifiers).
- Encode all names in UTF-8 if they contain non-ASCII characters (.zip and image
- file names, and classifier and class names). The service assumes UTF-8 encoding if
- it encounters non-ASCII characters.
- **Tip:** Don't make retraining calls on a classifier until the status is ready.
- When you submit retraining requests in parallel, the last request overwrites the
- previous requests. The retrained property shows the last time the classifier
- retraining finished.
-
- :param str classifier_id: The ID of the classifier.
- :param dict positive_examples: A dictionary that contains the value for each
- classname. The value is a .zip file of images that depict the visual subject of a
- class in the classifier. The positive examples create or update classes in the
- classifier. You can include more than one positive example file in a call.
- Specify the parameter name by appending `_positive_examples` to the class name.
- For example, `goldenretriever_positive_examples` creates the class
- `goldenretriever`.
- Include at least 10 images in .jpg or .png format. The minimum recommended image
- resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100
- MB per .zip file.
- Encode special characters in the file name in UTF-8.
- :param file negative_examples: A .zip file of images that do not depict the visual
- subject of any of the classes of the new classifier. Must contain a minimum of 10
- images.
- Encode special characters in the file name in UTF-8.
- :param str negative_examples_filename: The filename for negative_examples.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- if classifier_id is None:
- raise ValueError('classifier_id must be provided')
-
- headers = {}
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
- 'update_classifier')
- headers.update(sdk_headers)
-
- params = {'version': self.version}
-
- form_data = {}
- for key in positive_examples.keys():
- part_name = '%s_positive_examples' % (key)
- value = positive_examples[key]
- if hasattr(value, 'name'):
- filename = basename(value.name)
- form_data[part_name] = (filename, value, 'application/octet-stream')
- if negative_examples:
- if not negative_examples_filename and hasattr(
- negative_examples, 'name'):
- negative_examples_filename = basename(negative_examples.name)
- if not negative_examples_filename:
- raise ValueError('negative_examples_filename must be provided')
- form_data['negative_examples'] = (negative_examples_filename,
- negative_examples,
- 'application/octet-stream')
-
- url = '/v3/classifiers/{0}'.format(
- *self._encode_path_vars(classifier_id))
- response = self.request(
- method='POST',
- url=url,
- headers=headers,
- params=params,
- files=form_data,
- accept_json=True)
- return response
-
- #########################
- # Core ML
- #########################
-
- def get_core_ml_model(self, classifier_id, **kwargs):
- """
- Retrieve a Core ML model of a classifier.
-
- Download a Core ML model file (.mlmodel) of a custom classifier that returns
- \"core_ml_enabled\": true in the classifier details.
-
- :param str classifier_id: The ID of the classifier.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- if classifier_id is None:
- raise ValueError('classifier_id must be provided')
-
- headers = {}
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
- 'get_core_ml_model')
- headers.update(sdk_headers)
-
- params = {'version': self.version}
-
- url = '/v3/classifiers/{0}/core_ml_model'.format(
- *self._encode_path_vars(classifier_id))
- response = self.request(
- method='GET',
- url=url,
- headers=headers,
- params=params,
- accept_json=False)
- return response
-
- #########################
- # User data
- #########################
-
- def delete_user_data(self, customer_id, **kwargs):
- """
- Delete labeled data.
-
- Deletes all data associated with a specified customer ID. The method has no effect
- if no data is associated with the customer ID.
- You associate a customer ID with data by passing the `X-Watson-Metadata` header
- with a request that passes data. For more information about personal data and
- customer IDs, see [Information
- security](https://cloud.ibm.com/docs/services/visual-recognition/information-security.html).
-
- :param str customer_id: The customer ID for which all data is to be deleted.
- :param dict headers: A `dict` containing the request headers
- :return: A `DetailedResponse` containing the result, headers and HTTP status code.
- :rtype: DetailedResponse
- """
-
- if customer_id is None:
- raise ValueError('customer_id must be provided')
-
- headers = {}
- if 'headers' in kwargs:
- headers.update(kwargs.get('headers'))
- sdk_headers = get_sdk_headers('watson_vision_combined', 'V3',
- 'delete_user_data')
- headers.update(sdk_headers)
-
- params = {'version': self.version, 'customer_id': customer_id}
-
- url = '/v3/user_data'
- response = self.request(
- method='DELETE',
- url=url,
- headers=headers,
- params=params,
- accept_json=True)
- return response
-
-
-##############################################################################
-# Models
-##############################################################################
-
-
-class Class(object):
- """
- A category within a classifier.
-
- :attr str class_name: The name of the class.
- """
-
- def __init__(self, class_name):
- """
- Initialize a Class object.
-
- :param str class_name: The name of the class.
- """
- self.class_name = class_name
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a Class object from a json dictionary."""
- args = {}
- if 'class' in _dict or 'class_name' in _dict:
- args['class_name'] = _dict.get('class') or _dict.get('class_name')
- else:
- raise ValueError(
- 'Required property \'class\' not present in Class JSON')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'class_name') and self.class_name is not None:
- _dict['class'] = self.class_name
- return _dict
-
- def __str__(self):
- """Return a `str` version of this Class object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ClassResult(object):
- """
- Result of a class within a classifier.
-
- :attr str class_name: Name of the class.
- Class names are translated in the language defined by the **Accept-Language** request
- header for the build-in classifier IDs (`default`, `food`, and `explicit`). Class
- names of custom classifiers are not translated. The response might not be in the
- specified language when the requested language is not supported or when there is no
- translation for the class name.
- :attr float score: Confidence score for the property in the range of 0 to 1. A higher
- score indicates greater likelihood that the class is depicted in the image. The
- default threshold for returning scores from a classifier is 0.5.
- :attr str type_hierarchy: (optional) Knowledge graph of the property. For example,
- `/fruit/pome/apple/eating apple/Granny Smith`. Included only if identified.
- """
-
- def __init__(self, class_name, score, type_hierarchy=None):
- """
- Initialize a ClassResult object.
-
- :param str class_name: Name of the class.
- Class names are translated in the language defined by the **Accept-Language**
- request header for the build-in classifier IDs (`default`, `food`, and
- `explicit`). Class names of custom classifiers are not translated. The response
- might not be in the specified language when the requested language is not
- supported or when there is no translation for the class name.
- :param float score: Confidence score for the property in the range of 0 to 1. A
- higher score indicates greater likelihood that the class is depicted in the image.
- The default threshold for returning scores from a classifier is 0.5.
- :param str type_hierarchy: (optional) Knowledge graph of the property. For
- example, `/fruit/pome/apple/eating apple/Granny Smith`. Included only if
- identified.
- """
- self.class_name = class_name
- self.score = score
- self.type_hierarchy = type_hierarchy
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ClassResult object from a json dictionary."""
- args = {}
- if 'class' in _dict or 'class_name' in _dict:
- args['class_name'] = _dict.get('class') or _dict.get('class_name')
- else:
- raise ValueError(
- 'Required property \'class\' not present in ClassResult JSON')
- if 'score' in _dict:
- args['score'] = _dict.get('score')
- else:
- raise ValueError(
- 'Required property \'score\' not present in ClassResult JSON')
- if 'type_hierarchy' in _dict:
- args['type_hierarchy'] = _dict.get('type_hierarchy')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'class_name') and self.class_name is not None:
- _dict['class'] = self.class_name
- if hasattr(self, 'score') and self.score is not None:
- _dict['score'] = self.score
- if hasattr(self, 'type_hierarchy') and self.type_hierarchy is not None:
- _dict['type_hierarchy'] = self.type_hierarchy
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ClassResult object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ClassifiedImage(object):
- """
- Results for one image.
-
- :attr str source_url: (optional) Source of the image before any redirects. Not
- returned when the image is uploaded.
- :attr str resolved_url: (optional) Fully resolved URL of the image after redirects are
- followed. Not returned when the image is uploaded.
- :attr str image: (optional) Relative path of the image file if uploaded directly. Not
- returned when the image is passed by URL.
- :attr ErrorInfo error: (optional) Information about what might have caused a failure,
- such as an image that is too large. Not returned when there is no error.
- :attr list[ClassifierResult] classifiers: The classifiers.
- """
-
- def __init__(self,
- classifiers,
- source_url=None,
- resolved_url=None,
- image=None,
- error=None):
- """
- Initialize a ClassifiedImage object.
-
- :param list[ClassifierResult] classifiers: The classifiers.
- :param str source_url: (optional) Source of the image before any redirects. Not
- returned when the image is uploaded.
- :param str resolved_url: (optional) Fully resolved URL of the image after
- redirects are followed. Not returned when the image is uploaded.
- :param str image: (optional) Relative path of the image file if uploaded directly.
- Not returned when the image is passed by URL.
- :param ErrorInfo error: (optional) Information about what might have caused a
- failure, such as an image that is too large. Not returned when there is no error.
- """
- self.source_url = source_url
- self.resolved_url = resolved_url
- self.image = image
- self.error = error
- self.classifiers = classifiers
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ClassifiedImage object from a json dictionary."""
- args = {}
- if 'source_url' in _dict:
- args['source_url'] = _dict.get('source_url')
- if 'resolved_url' in _dict:
- args['resolved_url'] = _dict.get('resolved_url')
- if 'image' in _dict:
- args['image'] = _dict.get('image')
- if 'error' in _dict:
- args['error'] = ErrorInfo._from_dict(_dict.get('error'))
- if 'classifiers' in _dict:
- args['classifiers'] = [
- ClassifierResult._from_dict(x)
- for x in (_dict.get('classifiers'))
- ]
- else:
- raise ValueError(
- 'Required property \'classifiers\' not present in ClassifiedImage JSON'
- )
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'source_url') and self.source_url is not None:
- _dict['source_url'] = self.source_url
- if hasattr(self, 'resolved_url') and self.resolved_url is not None:
- _dict['resolved_url'] = self.resolved_url
- if hasattr(self, 'image') and self.image is not None:
- _dict['image'] = self.image
- if hasattr(self, 'error') and self.error is not None:
- _dict['error'] = self.error._to_dict()
- if hasattr(self, 'classifiers') and self.classifiers is not None:
- _dict['classifiers'] = [x._to_dict() for x in self.classifiers]
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ClassifiedImage object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ClassifiedImages(object):
- """
- Results for all images.
-
- :attr int custom_classes: (optional) Number of custom classes identified in the
- images.
- :attr int images_processed: (optional) Number of images processed for the API call.
- :attr list[ClassifiedImage] images: Classified images.
- :attr list[WarningInfo] warnings: (optional) Information about what might cause less
- than optimal output. For example, a request sent with a corrupt .zip file and a list
- of image URLs will still complete, but does not return the expected output. Not
- returned when there is no warning.
- """
-
- def __init__(self,
- images,
- custom_classes=None,
- images_processed=None,
- warnings=None):
- """
- Initialize a ClassifiedImages object.
-
- :param list[ClassifiedImage] images: Classified images.
- :param int custom_classes: (optional) Number of custom classes identified in the
- images.
- :param int images_processed: (optional) Number of images processed for the API
- call.
- :param list[WarningInfo] warnings: (optional) Information about what might cause
- less than optimal output. For example, a request sent with a corrupt .zip file and
- a list of image URLs will still complete, but does not return the expected output.
- Not returned when there is no warning.
- """
- self.custom_classes = custom_classes
- self.images_processed = images_processed
- self.images = images
- self.warnings = warnings
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ClassifiedImages object from a json dictionary."""
- args = {}
- if 'custom_classes' in _dict:
- args['custom_classes'] = _dict.get('custom_classes')
- if 'images_processed' in _dict:
- args['images_processed'] = _dict.get('images_processed')
- if 'images' in _dict:
- args['images'] = [
- ClassifiedImage._from_dict(x) for x in (_dict.get('images'))
- ]
- else:
- raise ValueError(
- 'Required property \'images\' not present in ClassifiedImages JSON'
- )
- if 'warnings' in _dict:
- args['warnings'] = [
- WarningInfo._from_dict(x) for x in (_dict.get('warnings'))
- ]
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'custom_classes') and self.custom_classes is not None:
- _dict['custom_classes'] = self.custom_classes
- if hasattr(self,
- 'images_processed') and self.images_processed is not None:
- _dict['images_processed'] = self.images_processed
- if hasattr(self, 'images') and self.images is not None:
- _dict['images'] = [x._to_dict() for x in self.images]
- if hasattr(self, 'warnings') and self.warnings is not None:
- _dict['warnings'] = [x._to_dict() for x in self.warnings]
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ClassifiedImages object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class Classifier(object):
- """
- Information about a classifier.
-
- :attr str classifier_id: ID of a classifier identified in the image.
- :attr str name: Name of the classifier.
- :attr str owner: (optional) Unique ID of the account who owns the classifier. Might
- not be returned by some requests.
- :attr str status: (optional) Training status of classifier.
- :attr bool core_ml_enabled: (optional) Whether the classifier can be downloaded as a
- Core ML model after the training status is `ready`.
- :attr str explanation: (optional) If classifier training has failed, this field might
- explain why.
- :attr datetime created: (optional) Date and time in Coordinated Universal Time (UTC)
- that the classifier was created.
- :attr list[Class] classes: (optional) Classes that define a classifier.
- :attr datetime retrained: (optional) Date and time in Coordinated Universal Time (UTC)
- that the classifier was updated. Might not be returned by some requests. Identical to
- `updated` and retained for backward compatibility.
- :attr datetime updated: (optional) Date and time in Coordinated Universal Time (UTC)
- that the classifier was most recently updated. The field matches either `retrained` or
- `created`. Might not be returned by some requests.
- """
-
- def __init__(self,
- classifier_id,
- name,
- owner=None,
- status=None,
- core_ml_enabled=None,
- explanation=None,
- created=None,
- classes=None,
- retrained=None,
- updated=None):
- """
- Initialize a Classifier object.
-
- :param str classifier_id: ID of a classifier identified in the image.
- :param str name: Name of the classifier.
- :param str owner: (optional) Unique ID of the account who owns the classifier.
- Might not be returned by some requests.
- :param str status: (optional) Training status of classifier.
- :param bool core_ml_enabled: (optional) Whether the classifier can be downloaded
- as a Core ML model after the training status is `ready`.
- :param str explanation: (optional) If classifier training has failed, this field
- might explain why.
- :param datetime created: (optional) Date and time in Coordinated Universal Time
- (UTC) that the classifier was created.
- :param list[Class] classes: (optional) Classes that define a classifier.
- :param datetime retrained: (optional) Date and time in Coordinated Universal Time
- (UTC) that the classifier was updated. Might not be returned by some requests.
- Identical to `updated` and retained for backward compatibility.
- :param datetime updated: (optional) Date and time in Coordinated Universal Time
- (UTC) that the classifier was most recently updated. The field matches either
- `retrained` or `created`. Might not be returned by some requests.
- """
- self.classifier_id = classifier_id
- self.name = name
- self.owner = owner
- self.status = status
- self.core_ml_enabled = core_ml_enabled
- self.explanation = explanation
- self.created = created
- self.classes = classes
- self.retrained = retrained
- self.updated = updated
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a Classifier object from a json dictionary."""
- args = {}
- if 'classifier_id' in _dict:
- args['classifier_id'] = _dict.get('classifier_id')
- else:
- raise ValueError(
- 'Required property \'classifier_id\' not present in Classifier JSON'
- )
- if 'name' in _dict:
- args['name'] = _dict.get('name')
- else:
- raise ValueError(
- 'Required property \'name\' not present in Classifier JSON')
- if 'owner' in _dict:
- args['owner'] = _dict.get('owner')
- if 'status' in _dict:
- args['status'] = _dict.get('status')
- if 'core_ml_enabled' in _dict:
- args['core_ml_enabled'] = _dict.get('core_ml_enabled')
- if 'explanation' in _dict:
- args['explanation'] = _dict.get('explanation')
- if 'created' in _dict:
- args['created'] = string_to_datetime(_dict.get('created'))
- if 'classes' in _dict:
- args['classes'] = [
- Class._from_dict(x) for x in (_dict.get('classes'))
- ]
- if 'retrained' in _dict:
- args['retrained'] = string_to_datetime(_dict.get('retrained'))
- if 'updated' in _dict:
- args['updated'] = string_to_datetime(_dict.get('updated'))
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'classifier_id') and self.classifier_id is not None:
- _dict['classifier_id'] = self.classifier_id
- if hasattr(self, 'name') and self.name is not None:
- _dict['name'] = self.name
- if hasattr(self, 'owner') and self.owner is not None:
- _dict['owner'] = self.owner
- if hasattr(self, 'status') and self.status is not None:
- _dict['status'] = self.status
- if hasattr(self,
- 'core_ml_enabled') and self.core_ml_enabled is not None:
- _dict['core_ml_enabled'] = self.core_ml_enabled
- if hasattr(self, 'explanation') and self.explanation is not None:
- _dict['explanation'] = self.explanation
- if hasattr(self, 'created') and self.created is not None:
- _dict['created'] = datetime_to_string(self.created)
- if hasattr(self, 'classes') and self.classes is not None:
- _dict['classes'] = [x._to_dict() for x in self.classes]
- if hasattr(self, 'retrained') and self.retrained is not None:
- _dict['retrained'] = datetime_to_string(self.retrained)
- if hasattr(self, 'updated') and self.updated is not None:
- _dict['updated'] = datetime_to_string(self.updated)
- return _dict
-
- def __str__(self):
- """Return a `str` version of this Classifier object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ClassifierResult(object):
- """
- Classifier and score combination.
-
- :attr str name: Name of the classifier.
- :attr str classifier_id: ID of a classifier identified in the image.
- :attr list[ClassResult] classes: Classes within the classifier.
- """
-
- def __init__(self, name, classifier_id, classes):
- """
- Initialize a ClassifierResult object.
-
- :param str name: Name of the classifier.
- :param str classifier_id: ID of a classifier identified in the image.
- :param list[ClassResult] classes: Classes within the classifier.
- """
- self.name = name
- self.classifier_id = classifier_id
- self.classes = classes
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ClassifierResult object from a json dictionary."""
- args = {}
- if 'name' in _dict:
- args['name'] = _dict.get('name')
- else:
- raise ValueError(
- 'Required property \'name\' not present in ClassifierResult JSON'
- )
- if 'classifier_id' in _dict:
- args['classifier_id'] = _dict.get('classifier_id')
- else:
- raise ValueError(
- 'Required property \'classifier_id\' not present in ClassifierResult JSON'
- )
- if 'classes' in _dict:
- args['classes'] = [
- ClassResult._from_dict(x) for x in (_dict.get('classes'))
- ]
- else:
- raise ValueError(
- 'Required property \'classes\' not present in ClassifierResult JSON'
- )
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'name') and self.name is not None:
- _dict['name'] = self.name
- if hasattr(self, 'classifier_id') and self.classifier_id is not None:
- _dict['classifier_id'] = self.classifier_id
- if hasattr(self, 'classes') and self.classes is not None:
- _dict['classes'] = [x._to_dict() for x in self.classes]
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ClassifierResult object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class Classifiers(object):
- """
- A container for the list of classifiers.
-
- :attr list[Classifier] classifiers: List of classifiers.
- """
-
- def __init__(self, classifiers):
- """
- Initialize a Classifiers object.
-
- :param list[Classifier] classifiers: List of classifiers.
- """
- self.classifiers = classifiers
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a Classifiers object from a json dictionary."""
- args = {}
- if 'classifiers' in _dict:
- args['classifiers'] = [
- Classifier._from_dict(x) for x in (_dict.get('classifiers'))
- ]
- else:
- raise ValueError(
- 'Required property \'classifiers\' not present in Classifiers JSON'
- )
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'classifiers') and self.classifiers is not None:
- _dict['classifiers'] = [x._to_dict() for x in self.classifiers]
- return _dict
-
- def __str__(self):
- """Return a `str` version of this Classifiers object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class DetectedFaces(object):
- """
- Results for all faces.
-
- :attr int images_processed: Number of images processed for the API call.
- :attr list[ImageWithFaces] images: The images.
- :attr list[WarningInfo] warnings: (optional) Information about what might cause less
- than optimal output. For example, a request sent with a corrupt .zip file and a list
- of image URLs will still complete, but does not return the expected output. Not
- returned when there is no warning.
- """
-
- def __init__(self, images_processed, images, warnings=None):
- """
- Initialize a DetectedFaces object.
-
- :param int images_processed: Number of images processed for the API call.
- :param list[ImageWithFaces] images: The images.
- :param list[WarningInfo] warnings: (optional) Information about what might cause
- less than optimal output. For example, a request sent with a corrupt .zip file and
- a list of image URLs will still complete, but does not return the expected output.
- Not returned when there is no warning.
- """
- self.images_processed = images_processed
- self.images = images
- self.warnings = warnings
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a DetectedFaces object from a json dictionary."""
- args = {}
- if 'images_processed' in _dict:
- args['images_processed'] = _dict.get('images_processed')
- else:
- raise ValueError(
- 'Required property \'images_processed\' not present in DetectedFaces JSON'
- )
- if 'images' in _dict:
- args['images'] = [
- ImageWithFaces._from_dict(x) for x in (_dict.get('images'))
- ]
- else:
- raise ValueError(
- 'Required property \'images\' not present in DetectedFaces JSON'
- )
- if 'warnings' in _dict:
- args['warnings'] = [
- WarningInfo._from_dict(x) for x in (_dict.get('warnings'))
- ]
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self,
- 'images_processed') and self.images_processed is not None:
- _dict['images_processed'] = self.images_processed
- if hasattr(self, 'images') and self.images is not None:
- _dict['images'] = [x._to_dict() for x in self.images]
- if hasattr(self, 'warnings') and self.warnings is not None:
- _dict['warnings'] = [x._to_dict() for x in self.warnings]
- return _dict
-
- def __str__(self):
- """Return a `str` version of this DetectedFaces object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ErrorInfo(object):
- """
- Information about what might have caused a failure, such as an image that is too
- large. Not returned when there is no error.
-
- :attr int code: HTTP status code.
- :attr str description: Human-readable error description. For example, `File size limit
- exceeded`.
- :attr str error_id: Codified error string. For example, `limit_exceeded`.
- """
-
- def __init__(self, code, description, error_id):
- """
- Initialize a ErrorInfo object.
-
- :param int code: HTTP status code.
- :param str description: Human-readable error description. For example, `File size
- limit exceeded`.
- :param str error_id: Codified error string. For example, `limit_exceeded`.
- """
- self.code = code
- self.description = description
- self.error_id = error_id
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ErrorInfo object from a json dictionary."""
- args = {}
- if 'code' in _dict:
- args['code'] = _dict.get('code')
- else:
- raise ValueError(
- 'Required property \'code\' not present in ErrorInfo JSON')
- if 'description' in _dict:
- args['description'] = _dict.get('description')
- else:
- raise ValueError(
- 'Required property \'description\' not present in ErrorInfo JSON'
- )
- if 'error_id' in _dict:
- args['error_id'] = _dict.get('error_id')
- else:
- raise ValueError(
- 'Required property \'error_id\' not present in ErrorInfo JSON')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'code') and self.code is not None:
- _dict['code'] = self.code
- if hasattr(self, 'description') and self.description is not None:
- _dict['description'] = self.description
- if hasattr(self, 'error_id') and self.error_id is not None:
- _dict['error_id'] = self.error_id
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ErrorInfo object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class Face(object):
- """
- Information about the face.
-
- :attr FaceAge age: (optional) Age information about a face.
- :attr FaceGender gender: (optional) Information about the gender of the face.
- :attr FaceLocation face_location: (optional) The location of the bounding box around
- the face.
- """
-
- def __init__(self, age=None, gender=None, face_location=None):
- """
- Initialize a Face object.
-
- :param FaceAge age: (optional) Age information about a face.
- :param FaceGender gender: (optional) Information about the gender of the face.
- :param FaceLocation face_location: (optional) The location of the bounding box
- around the face.
- """
- self.age = age
- self.gender = gender
- self.face_location = face_location
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a Face object from a json dictionary."""
- args = {}
- if 'age' in _dict:
- args['age'] = FaceAge._from_dict(_dict.get('age'))
- if 'gender' in _dict:
- args['gender'] = FaceGender._from_dict(_dict.get('gender'))
- if 'face_location' in _dict:
- args['face_location'] = FaceLocation._from_dict(
- _dict.get('face_location'))
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'age') and self.age is not None:
- _dict['age'] = self.age._to_dict()
- if hasattr(self, 'gender') and self.gender is not None:
- _dict['gender'] = self.gender._to_dict()
- if hasattr(self, 'face_location') and self.face_location is not None:
- _dict['face_location'] = self.face_location._to_dict()
- return _dict
-
- def __str__(self):
- """Return a `str` version of this Face object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class FaceAge(object):
- """
- Age information about a face.
-
- :attr int min: (optional) Estimated minimum age.
- :attr int max: (optional) Estimated maximum age.
- :attr float score: Confidence score in the range of 0 to 1. A higher score indicates
- greater confidence in the estimated value for the property.
- """
-
- def __init__(self, score, min=None, max=None):
- """
- Initialize a FaceAge object.
-
- :param float score: Confidence score in the range of 0 to 1. A higher score
- indicates greater confidence in the estimated value for the property.
- :param int min: (optional) Estimated minimum age.
- :param int max: (optional) Estimated maximum age.
- """
- self.min = min
- self.max = max
- self.score = score
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a FaceAge object from a json dictionary."""
- args = {}
- if 'min' in _dict:
- args['min'] = _dict.get('min')
- if 'max' in _dict:
- args['max'] = _dict.get('max')
- if 'score' in _dict:
- args['score'] = _dict.get('score')
- else:
- raise ValueError(
- 'Required property \'score\' not present in FaceAge JSON')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'min') and self.min is not None:
- _dict['min'] = self.min
- if hasattr(self, 'max') and self.max is not None:
- _dict['max'] = self.max
- if hasattr(self, 'score') and self.score is not None:
- _dict['score'] = self.score
- return _dict
-
- def __str__(self):
- """Return a `str` version of this FaceAge object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class FaceGender(object):
- """
- Information about the gender of the face.
-
- :attr str gender: Gender identified by the face. For example, `MALE` or `FEMALE`.
- :attr str gender_label: The word for "male" or "female" in the language defined by the
- **Accept-Language** request header.
- :attr float score: Confidence score in the range of 0 to 1. A higher score indicates
- greater confidence in the estimated value for the property.
- """
-
- def __init__(self, gender, gender_label, score):
- """
- Initialize a FaceGender object.
-
- :param str gender: Gender identified by the face. For example, `MALE` or `FEMALE`.
- :param str gender_label: The word for "male" or "female" in the language defined
- by the **Accept-Language** request header.
- :param float score: Confidence score in the range of 0 to 1. A higher score
- indicates greater confidence in the estimated value for the property.
- """
- self.gender = gender
- self.gender_label = gender_label
- self.score = score
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a FaceGender object from a json dictionary."""
- args = {}
- if 'gender' in _dict:
- args['gender'] = _dict.get('gender')
- else:
- raise ValueError(
- 'Required property \'gender\' not present in FaceGender JSON')
- if 'gender_label' in _dict:
- args['gender_label'] = _dict.get('gender_label')
- else:
- raise ValueError(
- 'Required property \'gender_label\' not present in FaceGender JSON'
- )
- if 'score' in _dict:
- args['score'] = _dict.get('score')
- else:
- raise ValueError(
- 'Required property \'score\' not present in FaceGender JSON')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'gender') and self.gender is not None:
- _dict['gender'] = self.gender
- if hasattr(self, 'gender_label') and self.gender_label is not None:
- _dict['gender_label'] = self.gender_label
- if hasattr(self, 'score') and self.score is not None:
- _dict['score'] = self.score
- return _dict
-
- def __str__(self):
- """Return a `str` version of this FaceGender object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class FaceLocation(object):
- """
- The location of the bounding box around the face.
-
- :attr float width: Width in pixels of face region.
- :attr float height: Height in pixels of face region.
- :attr float left: X-position of top-left pixel of face region.
- :attr float top: Y-position of top-left pixel of face region.
- """
-
- def __init__(self, width, height, left, top):
- """
- Initialize a FaceLocation object.
-
- :param float width: Width in pixels of face region.
- :param float height: Height in pixels of face region.
- :param float left: X-position of top-left pixel of face region.
- :param float top: Y-position of top-left pixel of face region.
- """
- self.width = width
- self.height = height
- self.left = left
- self.top = top
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a FaceLocation object from a json dictionary."""
- args = {}
- if 'width' in _dict:
- args['width'] = _dict.get('width')
- else:
- raise ValueError(
- 'Required property \'width\' not present in FaceLocation JSON')
- if 'height' in _dict:
- args['height'] = _dict.get('height')
- else:
- raise ValueError(
- 'Required property \'height\' not present in FaceLocation JSON')
- if 'left' in _dict:
- args['left'] = _dict.get('left')
- else:
- raise ValueError(
- 'Required property \'left\' not present in FaceLocation JSON')
- if 'top' in _dict:
- args['top'] = _dict.get('top')
- else:
- raise ValueError(
- 'Required property \'top\' not present in FaceLocation JSON')
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'width') and self.width is not None:
- _dict['width'] = self.width
- if hasattr(self, 'height') and self.height is not None:
- _dict['height'] = self.height
- if hasattr(self, 'left') and self.left is not None:
- _dict['left'] = self.left
- if hasattr(self, 'top') and self.top is not None:
- _dict['top'] = self.top
- return _dict
-
- def __str__(self):
- """Return a `str` version of this FaceLocation object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class ImageWithFaces(object):
- """
- Information about faces in the image.
-
- :attr list[Face] faces: Faces detected in the images.
- :attr str image: (optional) Relative path of the image file if uploaded directly. Not
- returned when the image is passed by URL.
- :attr str source_url: (optional) Source of the image before any redirects. Not
- returned when the image is uploaded.
- :attr str resolved_url: (optional) Fully resolved URL of the image after redirects are
- followed. Not returned when the image is uploaded.
- :attr ErrorInfo error: (optional) Information about what might have caused a failure,
- such as an image that is too large. Not returned when there is no error.
- """
-
- def __init__(self,
- faces,
- image=None,
- source_url=None,
- resolved_url=None,
- error=None):
- """
- Initialize a ImageWithFaces object.
-
- :param list[Face] faces: Faces detected in the images.
- :param str image: (optional) Relative path of the image file if uploaded directly.
- Not returned when the image is passed by URL.
- :param str source_url: (optional) Source of the image before any redirects. Not
- returned when the image is uploaded.
- :param str resolved_url: (optional) Fully resolved URL of the image after
- redirects are followed. Not returned when the image is uploaded.
- :param ErrorInfo error: (optional) Information about what might have caused a
- failure, such as an image that is too large. Not returned when there is no error.
- """
- self.faces = faces
- self.image = image
- self.source_url = source_url
- self.resolved_url = resolved_url
- self.error = error
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a ImageWithFaces object from a json dictionary."""
- args = {}
- if 'faces' in _dict:
- args['faces'] = [Face._from_dict(x) for x in (_dict.get('faces'))]
- else:
- raise ValueError(
- 'Required property \'faces\' not present in ImageWithFaces JSON'
- )
- if 'image' in _dict:
- args['image'] = _dict.get('image')
- if 'source_url' in _dict:
- args['source_url'] = _dict.get('source_url')
- if 'resolved_url' in _dict:
- args['resolved_url'] = _dict.get('resolved_url')
- if 'error' in _dict:
- args['error'] = ErrorInfo._from_dict(_dict.get('error'))
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'faces') and self.faces is not None:
- _dict['faces'] = [x._to_dict() for x in self.faces]
- if hasattr(self, 'image') and self.image is not None:
- _dict['image'] = self.image
- if hasattr(self, 'source_url') and self.source_url is not None:
- _dict['source_url'] = self.source_url
- if hasattr(self, 'resolved_url') and self.resolved_url is not None:
- _dict['resolved_url'] = self.resolved_url
- if hasattr(self, 'error') and self.error is not None:
- _dict['error'] = self.error._to_dict()
- return _dict
-
- def __str__(self):
- """Return a `str` version of this ImageWithFaces object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
-
-
-class WarningInfo(object):
- """
- Information about something that went wrong.
-
- :attr str warning_id: Codified warning string, such as `limit_reached`.
- :attr str description: Information about the error.
- """
-
- def __init__(self, warning_id, description):
- """
- Initialize a WarningInfo object.
-
- :param str warning_id: Codified warning string, such as `limit_reached`.
- :param str description: Information about the error.
- """
- self.warning_id = warning_id
- self.description = description
-
- @classmethod
- def _from_dict(cls, _dict):
- """Initialize a WarningInfo object from a json dictionary."""
- args = {}
- if 'warning_id' in _dict:
- args['warning_id'] = _dict.get('warning_id')
- else:
- raise ValueError(
- 'Required property \'warning_id\' not present in WarningInfo JSON'
- )
- if 'description' in _dict:
- args['description'] = _dict.get('description')
- else:
- raise ValueError(
- 'Required property \'description\' not present in WarningInfo JSON'
- )
- return cls(**args)
-
- def _to_dict(self):
- """Return a json dictionary representing this model."""
- _dict = {}
- if hasattr(self, 'warning_id') and self.warning_id is not None:
- _dict['warning_id'] = self.warning_id
- if hasattr(self, 'description') and self.description is not None:
- _dict['description'] = self.description
- return _dict
-
- def __str__(self):
- """Return a `str` version of this WarningInfo object."""
- return json.dumps(self._to_dict(), indent=2)
-
- def __eq__(self, other):
- """Return `true` when self and other are equal, false otherwise."""
- if not isinstance(other, self.__class__):
- return False
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- """Return `true` when self and other are not equal, false otherwise."""
- return not self == other
diff --git a/ibm_watson/websocket/__init__.py b/ibm_watson/websocket/__init__.py
index ed6564545..f50ad9fdf 100644
--- a/ibm_watson/websocket/__init__.py
+++ b/ibm_watson/websocket/__init__.py
@@ -1,6 +1,6 @@
# coding: utf-8
-# Copyright 2018 IBM All Rights Reserved.
+# (C) Copyright IBM Corp. 2018, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/ibm_watson/websocket/audio_source.py b/ibm_watson/websocket/audio_source.py
index b33930578..181eeab18 100644
--- a/ibm_watson/websocket/audio_source.py
+++ b/ibm_watson/websocket/audio_source.py
@@ -1,6 +1,6 @@
# coding: utf-8
-# Copyright 2018 IBM All Rights Reserved.
+# (C) Copyright IBM Corp. 2018, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+
class AudioSource(object):
""""Audio source for the speech to text recognize using websocket"""
diff --git a/ibm_watson/websocket/recognize_abstract_callback.py b/ibm_watson/websocket/recognize_abstract_callback.py
index ffbb4bfeb..a8574c6d0 100644
--- a/ibm_watson/websocket/recognize_abstract_callback.py
+++ b/ibm_watson/websocket/recognize_abstract_callback.py
@@ -1,6 +1,6 @@
# coding: utf-8
-# Copyright 2018 IBM All Rights Reserved.
+# (C) Copyright IBM Corp. 2018, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,53 +16,46 @@
class RecognizeCallback(object):
+
def __init__(self):
pass
def on_transcription(self, transcript):
"""
- Called after the service returns the final result for the transcription.
- """
- pass
+ Called after the service returns the final result for the transcription.
+ """
def on_connected(self):
"""
- Called when a Websocket connection was made
- """
- pass
+ Called when a Websocket connection was made
+ """
def on_error(self, error):
"""
- Called when there is an error in the Websocket connection.
- """
- pass
+ Called when there is an error in the Websocket connection.
+ """
def on_inactivity_timeout(self, error):
"""
- Called when there is an inactivity timeout.
- """
- pass
+ Called when there is an inactivity timeout.
+ """
def on_listening(self):
"""
- Called when the service is listening for audio.
- """
- pass
+ Called when the service is listening for audio.
+ """
def on_hypothesis(self, hypothesis):
"""
- Called when an interim result is received.
- """
- pass
+ Called when an interim result is received.
+ """
def on_data(self, data):
"""
- Called when the service returns results. The data is returned unparsed.
- """
- pass
+ Called when the service returns results. The data is returned unparsed.
+ """
def on_close(self):
"""
- Called when the Websocket connection is closed
- """
- pass
+ Called when the Websocket connection is closed
+ """
diff --git a/ibm_watson/websocket/recognize_listener.py b/ibm_watson/websocket/recognize_listener.py
index e1471c0cb..43eb79618 100644
--- a/ibm_watson/websocket/recognize_listener.py
+++ b/ibm_watson/websocket/recognize_listener.py
@@ -1,6 +1,6 @@
# coding: utf-8
-# Copyright 2018 IBM All Rights Reserved.
+# (C) Copyright IBM Corp. 2018, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,7 +31,9 @@
START = "start"
STOP = "stop"
+
class RecognizeListener(object):
+
def __init__(self,
audio_source,
options,
@@ -51,8 +53,6 @@ def __init__(self,
self.isListening = False
self.verify = verify
- # websocket.enableTrace(True)
-
self.ws_client = websocket.WebSocketApp(
self.url,
header=self.headers,
@@ -64,7 +64,8 @@ def __init__(self,
self.ws_client.run_forever(http_proxy_host=self.http_proxy_host,
http_proxy_port=self.http_proxy_port,
- sslopt={"cert_reqs": ssl.CERT_NONE} if self.verify is not None else None)
+ sslopt={"cert_reqs": ssl.CERT_NONE}
+ if self.verify is not None else None)
@classmethod
def build_start_message(cls, options):
@@ -102,6 +103,7 @@ def send_audio(self, ws):
:param ws: Websocket client
"""
+
def run(*args):
"""Background process to stream the data"""
if not self.audio_source.is_buffer:
@@ -118,7 +120,8 @@ def run(*args):
try:
if not self.audio_source.input.empty():
chunk = self.audio_source.input.get()
- self.ws_client.send(chunk, websocket.ABNF.OPCODE_BINARY)
+ self.ws_client.send(chunk,
+ websocket.ABNF.OPCODE_BINARY)
time.sleep(TEN_MILLISECONDS)
if self.audio_source.input.empty():
if self.audio_source.is_recording:
@@ -132,7 +135,8 @@ def run(*args):
break
time.sleep(TEN_MILLISECONDS)
- self.ws_client.send(self.build_closing_message(), websocket.ABNF.OPCODE_TEXT)
+ self.ws_client.send(self.build_closing_message(),
+ websocket.ABNF.OPCODE_TEXT)
thread.start_new_thread(run, ())
@@ -147,7 +151,8 @@ def on_open(self, ws):
# Send initialization message
init_data = self.build_start_message(self.options)
- self.ws_client.send(json.dumps(init_data).encode('utf8'), websocket.ABNF.OPCODE_TEXT)
+ self.ws_client.send(
+ json.dumps(init_data).encode('utf8'), websocket.ABNF.OPCODE_TEXT)
def on_data(self, ws, message, message_type, fin):
"""
@@ -187,18 +192,30 @@ def on_data(self, ws, message, message_type, fin):
# if in streaming
elif 'results' in json_object or 'speaker_labels' in json_object:
- hypothesis = ''
- if 'results' in json_object:
- hypothesis = json_object['results'][0]['alternatives'][0][
- 'transcript']
- b_final = (json_object['results'][0]['final'] is True)
- transcripts = self.extract_transcripts(
- json_object['results'][0]['alternatives'])
-
- if b_final:
- self.callback.on_transcription(transcripts)
-
- self.callback.on_hypothesis(hypothesis)
+ # If results are present, extract the hypothesis and, if finalized, the full
+ # set of transcriptions and send them to the appropriate callbacks.
+ results = json_object.get('results')
+ if results:
+ if (self.options.get('interim_results') is True):
+ b_final = (results[0].get('final') is True)
+ alternatives = results[0].get('alternatives')
+ if alternatives:
+ hypothesis = alternatives[0].get('transcript')
+ transcripts = self.extract_transcripts(alternatives)
+ if b_final:
+ self.callback.on_transcription(transcripts)
+ if hypothesis:
+ self.callback.on_hypothesis(hypothesis)
+ else:
+ final_transcript = []
+ for result in results:
+ transcript = self.extract_transcripts(
+ result.get('alternatives'))
+ final_transcript.append(transcript)
+
+ self.callback.on_transcription(final_transcript)
+
+ # Always call the on_data callback if 'results' or 'speaker_labels' are present
self.callback.on_data(json_object)
def on_error(self, ws, error):
@@ -210,7 +227,7 @@ def on_error(self, ws, error):
"""
self.callback.on_error(error)
- def on_close(self, ws):
+ def on_close(self, ws, *args):
"""
Callback executed when websocket connection is closed
diff --git a/ibm_watson/websocket/synthesize_callback.py b/ibm_watson/websocket/synthesize_callback.py
index 70c7a6075..ec62ea493 100644
--- a/ibm_watson/websocket/synthesize_callback.py
+++ b/ibm_watson/websocket/synthesize_callback.py
@@ -1,6 +1,6 @@
# coding: utf-8
-# Copyright 2018 IBM All Rights Reserved.
+# (C) Copyright IBM Corp. 2018, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,48 +16,41 @@
class SynthesizeCallback(object):
+
def __init__(self):
pass
def on_connected(self):
"""
- Called when a Websocket connection was made
- """
- pass
+ Called when a Websocket connection was made
+ """
def on_error(self, error):
"""
- Called when there is an error in the Websocket connection.
- """
- pass
-
+ Called when there is an error in the Websocket connection.
+ """
def on_content_type(self, content_type):
"""
- Called when the service responds with the format of the audio response
- """
- pass
+ Called when the service responds with the format of the audio response
+ """
def on_timing_information(self, timing_information):
"""
- Called when the service returns timing information
- """
- pass
+ Called when the service returns timing information
+ """
def on_audio_stream(self, audio_stream):
"""
- Called when the service sends the synthesized audio as a binary stream of data in the indicated format.
- """
- pass
+ Called when the service sends the synthesized audio as a binary stream of data in the indicated format.
+ """
def on_data(self, data):
"""
- Called when the service returns results. The data is returned unparsed.
- """
- pass
+ Called when the service returns results. The data is returned unparsed.
+ """
def on_close(self):
"""
- Called when the Websocket connection is closed
- """
- pass
+ Called when the Websocket connection is closed
+ """
diff --git a/ibm_watson/websocket/synthesize_listener.py b/ibm_watson/websocket/synthesize_listener.py
index 905e02ba3..33caf81d5 100644
--- a/ibm_watson/websocket/synthesize_listener.py
+++ b/ibm_watson/websocket/synthesize_listener.py
@@ -1,6 +1,6 @@
# coding: utf-8
-# Copyright 2018 IBM All Rights Reserved.
+# (C) Copyright IBM Corp. 2018, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,10 +23,11 @@
except ImportError:
import _thread as thread
-
TEN_MILLISECONDS = 0.01
+
class SynthesizeListener(object):
+
def __init__(self,
options,
callback,
@@ -43,8 +44,6 @@ def __init__(self,
self.http_proxy_port = http_proxy_port
self.verify = verify
- # websocket.enableTrace(True)
-
self.ws_client = websocket.WebSocketApp(
self.url,
header=self.headers,
@@ -56,13 +55,16 @@ def __init__(self,
self.ws_client.run_forever(http_proxy_host=self.http_proxy_host,
http_proxy_port=self.http_proxy_port,
- sslopt={'cert_reqs': ssl.CERT_NONE} if self.verify is not None else None)
+ suppress_origin=True,
+ sslopt={'cert_reqs': ssl.CERT_NONE}
+ if self.verify is not None else None)
def send_text(self):
"""
Sends the text message
Note: The service handles one request per connection
"""
+
def run(*args):
"""Background process to send the text"""
self.ws_client.send(json.dumps(self.options).encode('utf8'))
@@ -94,7 +96,8 @@ def on_data(self, ws, message, message_type, fin):
if message_type == websocket.ABNF.OPCODE_TEXT:
json_object = json.loads(message)
if 'binary_streams' in json_object:
- self.callback.on_content_type(json_object['binary_streams'][0]['content_type'])
+ self.callback.on_content_type(
+ json_object['binary_streams'][0]['content_type'])
elif 'error' in json_object:
self.on_error(ws, json_object.get('error'))
return
@@ -117,7 +120,7 @@ def on_error(self, ws, error):
"""
self.callback.on_error(error)
- def on_close(self, ws, **kwargs):
+ def on_close(self, ws, *args, **kwargs):
"""
Callback executed when websocket connection is closed
diff --git a/package-lock.json b/package-lock.json
new file mode 100644
index 000000000..073021c0e
--- /dev/null
+++ b/package-lock.json
@@ -0,0 +1,838 @@
+{
+ "requires": true,
+ "lockfileVersion": 1,
+ "dependencies": {
+ "@babel/code-frame": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz",
+ "integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==",
+ "requires": {
+ "@babel/highlight": "^7.14.5"
+ }
+ },
+ "@babel/helper-validator-identifier": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.5.tgz",
+ "integrity": "sha512-5lsetuxCLilmVGyiLEfoHBRX8UCFD+1m2x3Rj97WrW3V7H3u4RWRXA4evMjImCsin2J2YT0QaVDGf+z8ondbAg=="
+ },
+ "@babel/highlight": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.5.tgz",
+ "integrity": "sha512-qf9u2WFWVV0MppaL877j2dBtQIDgmidgjGk5VIMw3OadXvYaXn66U1BFlH2t4+t3i+8PhedppRv+i40ABzd+gg==",
+ "requires": {
+ "@babel/helper-validator-identifier": "^7.14.5",
+ "chalk": "^2.0.0",
+ "js-tokens": "^4.0.0"
+ }
+ },
+ "@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "requires": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ }
+ },
+ "@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="
+ },
+ "@nodelib/fs.walk": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.7.tgz",
+ "integrity": "sha512-BTIhocbPBSrRmHxOAJFtR18oLhxTtAFDAvL8hY1S3iU8k+E60W/YFs4jrixGzQjMpF4qPXxIQHcjVD9dz1C2QA==",
+ "requires": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ }
+ },
+ "@octokit/auth-token": {
+ "version": "2.4.5",
+ "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.4.5.tgz",
+ "integrity": "sha512-BpGYsPgJt05M7/L/5FoE1PiAbdxXFZkX/3kDYcsvd1v6UhlnE5e96dTDr0ezX/EFwciQxf3cNV0loipsURU+WA==",
+ "requires": {
+ "@octokit/types": "^6.0.3"
+ }
+ },
+ "@octokit/core": {
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/@octokit/core/-/core-3.4.0.tgz",
+ "integrity": "sha512-6/vlKPP8NF17cgYXqucdshWqmMZGXkuvtcrWCgU5NOI0Pl2GjlmZyWgBMrU8zJ3v2MJlM6++CiB45VKYmhiWWg==",
+ "requires": {
+ "@octokit/auth-token": "^2.4.4",
+ "@octokit/graphql": "^4.5.8",
+ "@octokit/request": "^5.4.12",
+ "@octokit/request-error": "^2.0.5",
+ "@octokit/types": "^6.0.3",
+ "before-after-hook": "^2.2.0",
+ "universal-user-agent": "^6.0.0"
+ }
+ },
+ "@octokit/endpoint": {
+ "version": "6.0.11",
+ "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-6.0.11.tgz",
+ "integrity": "sha512-fUIPpx+pZyoLW4GCs3yMnlj2LfoXTWDUVPTC4V3MUEKZm48W+XYpeWSZCv+vYF1ZABUm2CqnDVf1sFtIYrj7KQ==",
+ "requires": {
+ "@octokit/types": "^6.0.3",
+ "is-plain-object": "^5.0.0",
+ "universal-user-agent": "^6.0.0"
+ }
+ },
+ "@octokit/graphql": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-4.6.2.tgz",
+ "integrity": "sha512-WmsIR1OzOr/3IqfG9JIczI8gMJUMzzyx5j0XXQ4YihHtKlQc+u35VpVoOXhlKAlaBntvry1WpAzPl/a+s3n89Q==",
+ "requires": {
+ "@octokit/request": "^5.3.0",
+ "@octokit/types": "^6.0.3",
+ "universal-user-agent": "^6.0.0"
+ }
+ },
+ "@octokit/openapi-types": {
+ "version": "7.3.0",
+ "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-7.3.0.tgz",
+ "integrity": "sha512-o00X2FCLiEeXZkm1Ab5nvPUdVOlrpediwWZkpizUJ/xtZQsJ4FiQ2RB/dJEmb0Nk+NIz7zyDePcSCu/Y/0M3Ew=="
+ },
+ "@octokit/plugin-paginate-rest": {
+ "version": "2.13.3",
+ "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.13.3.tgz",
+ "integrity": "sha512-46lptzM9lTeSmIBt/sVP/FLSTPGx6DCzAdSX3PfeJ3mTf4h9sGC26WpaQzMEq/Z44cOcmx8VsOhO+uEgE3cjYg==",
+ "requires": {
+ "@octokit/types": "^6.11.0"
+ }
+ },
+ "@octokit/plugin-request-log": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.3.tgz",
+ "integrity": "sha512-4RFU4li238jMJAzLgAwkBAw+4Loile5haQMQr+uhFq27BmyJXcXSKvoQKqh0agsZEiUlW6iSv3FAgvmGkur7OQ=="
+ },
+ "@octokit/plugin-rest-endpoint-methods": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.3.1.tgz",
+ "integrity": "sha512-3B2iguGmkh6bQQaVOtCsS0gixrz8Lg0v4JuXPqBcFqLKuJtxAUf3K88RxMEf/naDOI73spD+goJ/o7Ie7Cvdjg==",
+ "requires": {
+ "@octokit/types": "^6.16.2",
+ "deprecation": "^2.3.1"
+ }
+ },
+ "@octokit/request": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.5.0.tgz",
+ "integrity": "sha512-jxbMLQdQ3heFMZUaTLSCqcKs2oAHEYh7SnLLXyxbZmlULExZ/RXai7QUWWFKowcGGPlCZuKTZg0gSKHWrfYEoQ==",
+ "requires": {
+ "@octokit/endpoint": "^6.0.1",
+ "@octokit/request-error": "^2.0.0",
+ "@octokit/types": "^6.16.1",
+ "is-plain-object": "^5.0.0",
+ "node-fetch": "^2.6.1",
+ "universal-user-agent": "^6.0.0"
+ }
+ },
+ "@octokit/request-error": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-2.0.5.tgz",
+ "integrity": "sha512-T/2wcCFyM7SkXzNoyVNWjyVlUwBvW3igM3Btr/eKYiPmucXTtkxt2RBsf6gn3LTzaLSLTQtNmvg+dGsOxQrjZg==",
+ "requires": {
+ "@octokit/types": "^6.0.3",
+ "deprecation": "^2.0.0",
+ "once": "^1.4.0"
+ }
+ },
+ "@octokit/rest": {
+ "version": "18.5.6",
+ "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-18.5.6.tgz",
+ "integrity": "sha512-8HdG6ZjQdZytU6tCt8BQ2XLC7EJ5m4RrbyU/EARSkAM1/HP3ceOzMG/9atEfe17EDMer3IVdHWLedz2wDi73YQ==",
+ "requires": {
+ "@octokit/core": "^3.2.3",
+ "@octokit/plugin-paginate-rest": "^2.6.2",
+ "@octokit/plugin-request-log": "^1.0.2",
+ "@octokit/plugin-rest-endpoint-methods": "5.3.1"
+ }
+ },
+ "@octokit/types": {
+ "version": "6.16.2",
+ "resolved": "https://registry.npmjs.org/@octokit/types/-/types-6.16.2.tgz",
+ "integrity": "sha512-wWPSynU4oLy3i4KGyk+J1BLwRKyoeW2TwRHgwbDz17WtVFzSK2GOErGliruIx8c+MaYtHSYTx36DSmLNoNbtgA==",
+ "requires": {
+ "@octokit/openapi-types": "^7.2.3"
+ }
+ },
+ "@semantic-release/changelog": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/@semantic-release/changelog/-/changelog-5.0.1.tgz",
+ "integrity": "sha512-unvqHo5jk4dvAf2nZ3aw4imrlwQ2I50eVVvq9D47Qc3R+keNqepx1vDYwkjF8guFXnOYaYcR28yrZWno1hFbiw==",
+ "requires": {
+ "@semantic-release/error": "^2.1.0",
+ "aggregate-error": "^3.0.0",
+ "fs-extra": "^9.0.0",
+ "lodash": "^4.17.4"
+ }
+ },
+ "@semantic-release/error": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/@semantic-release/error/-/error-2.2.0.tgz",
+ "integrity": "sha512-9Tj/qn+y2j+sjCI3Jd+qseGtHjOAeg7dU2/lVcqIQ9TV3QDaDXDYXcoOHU+7o2Hwh8L8ymL4gfuO7KxDs3q2zg=="
+ },
+ "@semantic-release/exec": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/@semantic-release/exec/-/exec-5.0.0.tgz",
+ "integrity": "sha512-t7LWXIvDJQbuGCy2WmMG51WyaGSLTvZBv9INvcI4S0kn+QjnnVVUMhcioIqhb0r3yqqarMzHVcABFug0q0OXjw==",
+ "requires": {
+ "@semantic-release/error": "^2.1.0",
+ "aggregate-error": "^3.0.0",
+ "debug": "^4.0.0",
+ "execa": "^4.0.0",
+ "lodash": "^4.17.4",
+ "parse-json": "^5.0.0"
+ }
+ },
+ "@semantic-release/git": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/@semantic-release/git/-/git-9.0.0.tgz",
+ "integrity": "sha512-AZ4Zha5NAPAciIJH3ipzw/WU9qLAn8ENaoVAhD6srRPxTpTzuV3NhNh14rcAo8Paj9dO+5u4rTKcpetOBluYVw==",
+ "requires": {
+ "@semantic-release/error": "^2.1.0",
+ "aggregate-error": "^3.0.0",
+ "debug": "^4.0.0",
+ "dir-glob": "^3.0.0",
+ "execa": "^4.0.0",
+ "lodash": "^4.17.4",
+ "micromatch": "^4.0.0",
+ "p-reduce": "^2.0.0"
+ }
+ },
+ "@semantic-release/github": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/@semantic-release/github/-/github-7.2.3.tgz",
+ "integrity": "sha512-lWjIVDLal+EQBzy697ayUNN8MoBpp+jYIyW2luOdqn5XBH4d9bQGfTnjuLyzARZBHejqh932HVjiH/j4+R7VHw==",
+ "requires": {
+ "@octokit/rest": "^18.0.0",
+ "@semantic-release/error": "^2.2.0",
+ "aggregate-error": "^3.0.0",
+ "bottleneck": "^2.18.1",
+ "debug": "^4.0.0",
+ "dir-glob": "^3.0.0",
+ "fs-extra": "^10.0.0",
+ "globby": "^11.0.0",
+ "http-proxy-agent": "^4.0.0",
+ "https-proxy-agent": "^5.0.0",
+ "issue-parser": "^6.0.0",
+ "lodash": "^4.17.4",
+ "mime": "^2.4.3",
+ "p-filter": "^2.0.0",
+ "p-retry": "^4.0.0",
+ "url-join": "^4.0.0"
+ },
+ "dependencies": {
+ "fs-extra": {
+ "version": "10.0.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.0.0.tgz",
+ "integrity": "sha512-C5owb14u9eJwizKGdchcDUQeFtlSHHthBk8pbX9Vc1PFZrLombudjDnNns88aYslCyF6IY5SUw3Roz6xShcEIQ==",
+ "requires": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ }
+ }
+ }
+ },
+ "@tootallnate/once": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz",
+ "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw=="
+ },
+ "@types/retry": {
+ "version": "0.12.0",
+ "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
+ "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA=="
+ },
+ "agent-base": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
+ "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
+ "requires": {
+ "debug": "4"
+ }
+ },
+ "aggregate-error": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz",
+ "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==",
+ "requires": {
+ "clean-stack": "^2.0.0",
+ "indent-string": "^4.0.0"
+ }
+ },
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "array-union": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
+ "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw=="
+ },
+ "at-least-node": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz",
+ "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg=="
+ },
+ "before-after-hook": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.2.tgz",
+ "integrity": "sha512-3pZEU3NT5BFUo/AD5ERPWOgQOCZITni6iavr5AUw5AUwQjMlI0kzu5btnyD39AF0gUEsDPwJT+oY1ORBJijPjQ=="
+ },
+ "bottleneck": {
+ "version": "2.19.5",
+ "resolved": "https://registry.npmjs.org/bottleneck/-/bottleneck-2.19.5.tgz",
+ "integrity": "sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw=="
+ },
+ "braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "requires": {
+ "fill-range": "^7.0.1"
+ }
+ },
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "clean-stack": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz",
+ "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A=="
+ },
+ "color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "requires": {
+ "color-name": "1.1.3"
+ }
+ },
+ "color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU="
+ },
+ "cross-spawn": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
+ "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "requires": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ }
+ },
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "deprecation": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz",
+ "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ=="
+ },
+ "dir-glob": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
+ "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
+ "requires": {
+ "path-type": "^4.0.0"
+ }
+ },
+ "end-of-stream": {
+ "version": "1.4.4",
+ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
+ "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
+ "requires": {
+ "once": "^1.4.0"
+ }
+ },
+ "error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "requires": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ="
+ },
+ "execa": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz",
+ "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==",
+ "requires": {
+ "cross-spawn": "^7.0.0",
+ "get-stream": "^5.0.0",
+ "human-signals": "^1.1.1",
+ "is-stream": "^2.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^4.0.0",
+ "onetime": "^5.1.0",
+ "signal-exit": "^3.0.2",
+ "strip-final-newline": "^2.0.0"
+ }
+ },
+ "fast-glob": {
+ "version": "3.2.5",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.5.tgz",
+ "integrity": "sha512-2DtFcgT68wiTTiwZ2hNdJfcHNke9XOfnwmBRWXhmeKM8rF0TGwmC/Qto3S7RoZKp5cilZbxzO5iTNTQsJ+EeDg==",
+ "requires": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.0",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.2",
+ "picomatch": "^2.2.1"
+ }
+ },
+ "fastq": {
+ "version": "1.11.0",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.11.0.tgz",
+ "integrity": "sha512-7Eczs8gIPDrVzT+EksYBcupqMyxSHXXrHOLRRxU2/DicV8789MRBRR8+Hc2uWzUupOs4YS4JzBmBxjjCVBxD/g==",
+ "requires": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "requires": {
+ "to-regex-range": "^5.0.1"
+ }
+ },
+ "fs-extra": {
+ "version": "9.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz",
+ "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==",
+ "requires": {
+ "at-least-node": "^1.0.0",
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ }
+ },
+ "get-stream": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz",
+ "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==",
+ "requires": {
+ "pump": "^3.0.0"
+ }
+ },
+ "glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "requires": {
+ "is-glob": "^4.0.1"
+ }
+ },
+ "globby": {
+ "version": "11.0.3",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.3.tgz",
+ "integrity": "sha512-ffdmosjA807y7+lA1NM0jELARVmYul/715xiILEjo3hBLPTcirgQNnXECn5g3mtR8TOLCVbkfua1Hpen25/Xcg==",
+ "requires": {
+ "array-union": "^2.1.0",
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.1.1",
+ "ignore": "^5.1.4",
+ "merge2": "^1.3.0",
+ "slash": "^3.0.0"
+ }
+ },
+ "graceful-fs": {
+ "version": "4.2.6",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz",
+ "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ=="
+ },
+ "has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0="
+ },
+ "http-proxy-agent": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz",
+ "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==",
+ "requires": {
+ "@tootallnate/once": "1",
+ "agent-base": "6",
+ "debug": "4"
+ }
+ },
+ "https-proxy-agent": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz",
+ "integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==",
+ "requires": {
+ "agent-base": "6",
+ "debug": "4"
+ }
+ },
+ "human-signals": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz",
+ "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw=="
+ },
+ "ignore": {
+ "version": "5.1.8",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz",
+ "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw=="
+ },
+ "indent-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
+ "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg=="
+ },
+ "is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0="
+ },
+ "is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI="
+ },
+ "is-glob": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
+ "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
+ "requires": {
+ "is-extglob": "^2.1.1"
+ }
+ },
+ "is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="
+ },
+ "is-plain-object": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz",
+ "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q=="
+ },
+ "is-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz",
+ "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw=="
+ },
+ "isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA="
+ },
+ "issue-parser": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/issue-parser/-/issue-parser-6.0.0.tgz",
+ "integrity": "sha512-zKa/Dxq2lGsBIXQ7CUZWTHfvxPC2ej0KfO7fIPqLlHB9J2hJ7rGhZ5rilhuufylr4RXYPzJUeFjKxz305OsNlA==",
+ "requires": {
+ "lodash.capitalize": "^4.2.1",
+ "lodash.escaperegexp": "^4.1.2",
+ "lodash.isplainobject": "^4.0.6",
+ "lodash.isstring": "^4.0.1",
+ "lodash.uniqby": "^4.7.0"
+ }
+ },
+ "js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
+ },
+ "json-parse-even-better-errors": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
+ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w=="
+ },
+ "jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "requires": {
+ "graceful-fs": "^4.1.6",
+ "universalify": "^2.0.0"
+ }
+ },
+ "lines-and-columns": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz",
+ "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA="
+ },
+ "lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
+ },
+ "lodash.capitalize": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/lodash.capitalize/-/lodash.capitalize-4.2.1.tgz",
+ "integrity": "sha1-+CbJtOKoUR2E46yinbBeGk87cqk="
+ },
+ "lodash.escaperegexp": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz",
+ "integrity": "sha1-ZHYsSGGAglGKw99Mz11YhtriA0c="
+ },
+ "lodash.isplainobject": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
+ "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs="
+ },
+ "lodash.isstring": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz",
+ "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE="
+ },
+ "lodash.uniqby": {
+ "version": "4.7.0",
+ "resolved": "https://registry.npmjs.org/lodash.uniqby/-/lodash.uniqby-4.7.0.tgz",
+ "integrity": "sha1-2ZwHpmnp5tJOE2Lf4mbGdhavEwI="
+ },
+ "merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="
+ },
+ "merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="
+ },
+ "micromatch": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz",
+ "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==",
+ "requires": {
+ "braces": "^3.0.1",
+ "picomatch": "^2.2.3"
+ }
+ },
+ "mime": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz",
+ "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg=="
+ },
+ "mimic-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+ "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg=="
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node-fetch": {
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz",
+ "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw=="
+ },
+ "npm-run-path": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
+ "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
+ "requires": {
+ "path-key": "^3.0.0"
+ }
+ },
+ "once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "onetime": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
+ "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
+ "requires": {
+ "mimic-fn": "^2.1.0"
+ }
+ },
+ "p-filter": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/p-filter/-/p-filter-2.1.0.tgz",
+ "integrity": "sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==",
+ "requires": {
+ "p-map": "^2.0.0"
+ }
+ },
+ "p-map": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz",
+ "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw=="
+ },
+ "p-reduce": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/p-reduce/-/p-reduce-2.1.0.tgz",
+ "integrity": "sha512-2USApvnsutq8uoxZBGbbWM0JIYLiEMJ9RlaN7fAzVNb9OZN0SHjjTTfIcb667XynS5Y1VhwDJVDa72TnPzAYWw=="
+ },
+ "p-retry": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.5.0.tgz",
+ "integrity": "sha512-5Hwh4aVQSu6BEP+w2zKlVXtFAaYQe1qWuVADSgoeVlLjwe/Q/AMSoRR4MDeaAfu8llT+YNbEijWu/YF3m6avkg==",
+ "requires": {
+ "@types/retry": "^0.12.0",
+ "retry": "^0.12.0"
+ }
+ },
+ "parse-json": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
+ "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
+ "requires": {
+ "@babel/code-frame": "^7.0.0",
+ "error-ex": "^1.3.1",
+ "json-parse-even-better-errors": "^2.3.0",
+ "lines-and-columns": "^1.1.6"
+ }
+ },
+ "path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="
+ },
+ "path-type": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
+ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw=="
+ },
+ "picomatch": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz",
+ "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw=="
+ },
+ "pump": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
+ "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
+ "requires": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ },
+ "queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="
+ },
+ "retry": {
+ "version": "0.12.0",
+ "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz",
+ "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs="
+ },
+ "reusify": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
+ "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw=="
+ },
+ "run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "requires": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "requires": {
+ "shebang-regex": "^3.0.0"
+ }
+ },
+ "shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="
+ },
+ "signal-exit": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz",
+ "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA=="
+ },
+ "slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q=="
+ },
+ "strip-final-newline": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
+ "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA=="
+ },
+ "supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ },
+ "to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "requires": {
+ "is-number": "^7.0.0"
+ }
+ },
+ "universal-user-agent": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-6.0.0.tgz",
+ "integrity": "sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w=="
+ },
+ "universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ=="
+ },
+ "url-join": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz",
+ "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA=="
+ },
+ "which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "requires": {
+ "isexe": "^2.0.0"
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
+ }
+ }
+}
diff --git a/pylint.sh b/pylint.sh
index 906f3921b..ad71867fb 100644
--- a/pylint.sh
+++ b/pylint.sh
@@ -1,8 +1,8 @@
#!/bin/bash
-# Runs pylint only for Python 2.7.X
+# Runs pylint only for Python 3.7
PYTHON_VERSION=$(python -c 'import sys; print(".".join(map(str, sys.version_info[:2])))')
echo "Python version: $PYTHON_VERSION"
-if [ $PYTHON_VERSION = '2.7' ]; then
+if [ $PYTHON_VERSION = '3.7' ]; then
pylint ibm_watson test examples
fi
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000..09eb447dc
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,20 @@
+[build-system]
+requires = ["setuptools", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[tool.semantic_release]
+version_variables = [
+ "setup.py:__version__",
+ "ibm_watson/version.py:__version__",
+]
+version_toml = []
+branch = "master"
+
+[tool.semantic_release.changelog]
+exclude_commit_patterns = [
+ '''chore(?:\([^)]*?\))?: .+''',
+ '''ci(?:\([^)]*?\))?: .+''',
+ '''refactor(?:\([^)]*?\))?: .+''',
+ '''test(?:\([^)]*?\))?: .+''',
+ '''build\((?!deps\): .+)''',
+]
\ No newline at end of file
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 364178644..f04883ea7 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,21 +1,20 @@
# test dependencies
-pytest>=2.8.2
-responses==0.9.0
-python_dotenv>=0.1.5;python_version!='3.2'
-pylint>=1.4.4
-tox>=2.9.1
-pytest-rerunfailures>=3.1
-ibm_cloud_sdk_core>=0.2.0
+pytest==6.2.4
+responses==0.13.3
+python_dotenv==0.17.1;python_version!='3.2'
+pylint==2.8.2
+pytest-rerunfailures==9.1.1
+ibm_cloud_sdk_core>=3.3.6, == 3.*
# code coverage
-coverage<5
+coverage>=4, <5
codecov>=1.6.3
pytest-cov>=2.2.1
# documentation
-recommonmark>=0.2.0
-Sphinx>=1.3.1
-bumpversion>=0.5.3
+recommonmark==0.7.1
+Sphinx==3.5.2
+bumpversion==0.6.0
# Web sockets
-websocket-client==0.48.0
+websocket-client>=1.1.0
diff --git a/requirements.txt b/requirements.txt
index 9c43ea3ed..461b8746a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
requests>=2.0,<3.0
python_dateutil>=2.5.3
-websocket-client==0.48.0
-ibm_cloud_sdk_core>=0.2.0
\ No newline at end of file
+websocket-client>=1.1.0
+ibm_cloud_sdk_core>=3.3.6, == 3.*
diff --git a/resources/South_Africa_Luca_Galuzzi_2004.JPG b/resources/South_Africa_Luca_Galuzzi_2004.JPG
new file mode 100755
index 000000000..673758771
Binary files /dev/null and b/resources/South_Africa_Luca_Galuzzi_2004.JPG differ
diff --git a/resources/South_Africa_Luca_Galuzzi_2004.jpeg b/resources/South_Africa_Luca_Galuzzi_2004.jpeg
new file mode 100755
index 000000000..673758771
Binary files /dev/null and b/resources/South_Africa_Luca_Galuzzi_2004.jpeg differ
diff --git a/resources/TestEnrichments.csv b/resources/TestEnrichments.csv
new file mode 100644
index 000000000..0acd7812b
--- /dev/null
+++ b/resources/TestEnrichments.csv
@@ -0,0 +1,2 @@
+engine,gasket,piston,valves
+flag,green,yellow,red
\ No newline at end of file
diff --git a/resources/hello_world.txt b/resources/hello_world.txt
new file mode 100644
index 000000000..3b18e512d
--- /dev/null
+++ b/resources/hello_world.txt
@@ -0,0 +1 @@
+hello world
diff --git a/resources/ibm-credentials.env b/resources/ibm-credentials.env
deleted file mode 100644
index 008cb4f94..000000000
--- a/resources/ibm-credentials.env
+++ /dev/null
@@ -1,4 +0,0 @@
-VISUAL_RECOGNITION_APIKEY=1234abcd
-VISUAL_RECOGNITION_URL=https://stgwat-us-south-mzr-cruiser6.us-south.containers.cloud.ibm.com/visual-recognition/api
-WATSON_APIKEY=5678efgh
-WATSON_URL=https://gateway-s.watsonplatform.net/watson/api
\ No newline at end of file
diff --git a/resources/my-giraffe.jpeg b/resources/my-giraffe.jpeg
new file mode 100644
index 000000000..ebed77d35
Binary files /dev/null and b/resources/my-giraffe.jpeg differ
diff --git a/resources/personality-v3-es.txt b/resources/personality-v3-es.txt
deleted file mode 100644
index 950fdb28e..000000000
--- a/resources/personality-v3-es.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero, adarga antigua, rocín flaco y galgo corredor. Una olla de algo más vaca que carnero, salpicón las más noches, duelos y quebrantos los sábados, lantejas los viernes, algún palomino de añadidura los domingos, consumían las tres partes de su hacienda. El resto della concluían sayo de velarte, calzas de velludo para las fiestas, con sus pantuflos de lo mesmo, y los días de entresemana se honraba con su vellorí de lo más fino. Tenía en su casa una ama que pasaba de los cuarenta, y una sobrina que no llegaba a los veinte, y un mozo de campo y plaza, que así ensillaba el rocín como tomaba la podadera. Frisaba la edad de nuestro hidalgo con los cincuenta años; era de complexión recia, seco de carnes, enjuto de rostro, gran madrugador y amigo de la caza. Quieren decir que tenía el sobrenombre de Quijada, o Quesada, que en esto hay alguna diferencia en los autores que deste caso escriben; aunque, por conjeturas verosímiles, se deja entender que se llamaba Quejana. Pero esto importa poco a nuestro cuento; basta que en la narración dél no se salga un punto de la verdad.
-Es, pues, de saber que este sobredicho hidalgo, los ratos que estaba ocioso, que eran los más del año, se daba a leer libros de caballerías, con tanta afición y gusto, que olvidó casi de todo punto el ejercicio de la caza, y aun la administración de su hacienda. Y llegó a tanto su curiosidad y desatino en esto, que vendió muchas hanegas de tierra de sembradura para comprar libros de caballerías en que leer, y así, llevó a su casa todos cuantos pudo haber dellos; y de todos, ningunos le parecían tan bien como los que compuso el famoso Feliciano de Silva, porque la claridad de su prosa y aquellas entricadas razones suyas le parecían de perlas, y más cuando llegaba a leer aquellos requiebros y cartas de desafíos, donde en muchas partes hallaba escrito: La razón de la sinrazón que a mi razón se hace, de tal manera mi razón enflaquece, que con razón me quejo de la vuestra fermosura. Y también cuando leía: ...los altos cielos que de vuestra divinidad divinamente con las estrellas os fortifican, y os hacen merecedora del merecimiento que merece la vuestra grandeza.
-Con estas razones perdía el pobre caballero el juicio, y desvelábase por entenderlas y desentrañarles el sentido, que no se lo sacara ni las entendiera el mesmo Aristóteles, si resucitara para sólo ello. No estaba muy bien con las heridas que don Belianís daba y recebía, porque se imaginaba que, por grandes maestros que le hubiesen curado, no dejaría de tener el rostro y todo el cuerpo lleno de cicatrices y señales. Pero, con todo, alababa en su autor aquel acabar su libro con la promesa de aquella inacabable aventura, y muchas veces le vino deseo de tomar la pluma y dalle fin al pie de la letra, como allí se promete; y sin duda alguna lo hiciera, y aun saliera con ello, si otros mayores y continuos pensamientos no se lo estorbaran. Tuvo muchas veces competencia con el cura de su lugar —que era hombre docto, graduado en Sigüenza—, sobre cuál había sido mejor caballero: Palmerín de Ingalaterra o Amadís de Gaula; mas maese Nicolás, barbero del mesmo pueblo, decía que ninguno llegaba al Caballero del Febo, y que si alguno se le podía comparar, era don Galaor, hermano de Amadís de Gaula, porque tenía muy acomodada condición para todo; que no era caballero melindroso, ni tan llorón como su hermano, y que en lo de la valentía no le iba en zaga.
-En resolución, él se enfrascó tanto en su letura, que se le pasaban las noches leyendo de claro en claro, y los días de turbio en turbio; y así, del poco dormir y del mucho leer, se le secó el celebro, de manera que vino a perder el juicio. Llenósele la fantasía de todo aquello que leía en los libros, así de encantamentos como de pendencias, batallas, desafíos, heridas, requiebros, amores, tormentas y disparates imposibles; y asentósele de tal modo en la imaginación que era verdad toda aquella máquina de aquellas sonadas soñadas invenciones que leía, que para él no había otra historia más cierta en el mundo. Decía él que el Cid Ruy Díaz había sido muy buen caballero, pero que no tenía que ver con el Caballero de la Ardiente Espada, que de sólo un revés había partido por medio dos fieros y descomunales gigantes. Mejor estaba con Bernardo del Carpio, porque en Roncesvalles había muerto a Roldán el encantado, valiéndose de la industria de Hércules, cuando ahogó a Anteo, el hijo de la Tierra, entre los brazos. Decía mucho bien del gigante Morgante, porque, con ser de aquella generación gigantea, que todos son soberbios y descomedidos, él solo era afable y bien criado. Pero, sobre todos, estaba bien con Reinaldos de Montalbán, y más cuando le veía salir de su castillo y robar cuantos topaba, y cuando en allende robó aquel ídolo de Mahoma que era todo de oro, según dice su historia. Diera él, por dar una mano de coces al traidor de Galalón, al ama que tenía, y aun a su sobrina de añadidura.
-En efeto, rematado ya su juicio, vino a dar en el más estraño pensamiento que jamás dio loco en el mundo; y fue que le pareció convenible y necesario, así para el aumento de su honra como para el servicio de su república, hacerse caballero andante, y irse por todo el mundo con sus armas y caballo a buscar las aventuras y a ejercitarse en todo aquello que él había leído que los caballeros andantes se ejercitaban, deshaciendo todo género de agravio, y poniéndose en ocasiones y peligros donde, acabándolos, cobrase eterno nombre y fama. Imaginábase el pobre ya coronado por el valor de su brazo, por lo menos, del imperio de Trapisonda; y así, con estos tan agradables pensamientos, llevado del estraño gusto que en ellos sentía, se dio priesa a poner en efeto lo que deseaba.
-Y lo primero que hizo fue limpiar unas armas que habían sido de sus bisabuelos, que, tomadas de orín y llenas de moho, luengos siglos había que estaban puestas y olvidadas en un rincón. Limpiólas y aderezólas lo mejor que pudo, pero vio que tenían una gran falta, y era que no tenían celada de encaje, sino morrión simple; mas a esto suplió su industria, porque de cartones hizo un modo de media celada, que, encajada con el morrión, hacían una apariencia de celada entera. Es verdad que para probar si era fuerte y podía estar al riesgo de una cuchillada, sacó su espada y le dio dos golpes, y con el primero y en un punto deshizo lo que había hecho en una semana; y no dejó de parecerle mal la facilidad con que la había hecho pedazos, y, por asegurarse deste peligro, la tornó a hacer de nuevo, poniéndole unas barras de hierro por de dentro, de tal manera que él quedó satisfecho de su fortaleza; y, sin querer hacer nueva experiencia della, la diputó y tuvo por celada finísima de encaje.
-Fue luego a ver su rocín, y, aunque tenía más cuartos que un real y más tachas que el caballo de Gonela, que tantum pellis et ossa fuit, le pareció que ni el Bucéfalo de Alejandro ni Babieca el del Cid con él se igualaban. Cuatro días se le pasaron en imaginar qué nombre le pondría; porque, según se decía él a sí mesmo, no era razón que caballo de caballero tan famoso, y tan bueno él por sí, estuviese sin nombre conocido; y ansí, procuraba acomodársele de manera que declarase quién había sido, antes que fuese de caballero andante, y lo que era entonces; pues estaba muy puesto en razón que, mudando su señor estado, mudase él también el nombre, y le cobrase famoso y de estruendo, como convenía a la nueva orden y al nuevo ejercicio que ya profesaba. Y así, después de muchos nombres que formó, borró y quitó, añadió, deshizo y tornó a hacer en su memoria e imaginación, al fin le vino a llamar Rocinante: nombre, a su parecer, alto, sonoro y significativo de lo que había sido cuando fue rocín, antes de lo que ahora era, que era antes y primero de todos los rocines del mundo.
-Puesto nombre, y tan a su gusto, a su caballo, quiso ponérsele a sí mismo, y en este pensamiento duró otros ocho días, y al cabo se vino a llamar don Quijote; de donde —como queda dicho— tomaron ocasión los autores desta tan verdadera historia que, sin duda, se debía de llamar Quijada, y no Quesada, como otros quisieron decir. Pero, acordándose que el valeroso Amadís no sólo se había contentado con llamarse Amadís a secas, sino que añadió el nombre de su reino y patria, por Hepila famosa, y se llamó Amadís de Gaula, así quiso, como buen caballero, añadir al suyo el nombre de la suya y llamarse don Quijote de la Mancha, con que, a su parecer, declaraba muy al vivo su linaje y patria, y la honraba con tomar el sobrenombre della.
-Limpias, pues, sus armas, hecho del morrión celada, puesto nombre a su rocín y confirmándose a sí mismo, se dio a entender que no le faltaba otra cosa sino buscar una dama de quien enamorarse; porque el caballero andante sin amores era árbol sin hojas y sin fruto y cuerpo sin alma. Decíase él a sí:
-— Si yo, por malos de mis pecados, o por mi buena suerte, me encuentro por ahí con algún gigante, como de ordinario les acontece a los caballeros andantes, y le derribo de un encuentro, o le parto por mitad del cuerpo, o, finalmente, le venzo y le rindo, ¿no será bien tener a quien enviarle presentado y que entre y se hinque de rodillas ante mi dulce señora, y diga con voz humilde y rendido: ''Yo, señora, soy el gigante Caraculiambro, señor de la ínsula Malindrania, a quien venció en singular batalla el jamás como se debe alabado caballero don Quijote de la Mancha, el cual me mandó que me presentase ante vuestra merced, para que la vuestra grandeza disponga de mí a su talante''?
-¡Oh, cómo se holgó nuestro buen caballero cuando hubo hecho este discurso, y más cuando halló a quien dar nombre de su dama! Y fue, a lo que se cree, que en un lugar cerca del suyo había una moza labradora de muy buen parecer, de quien él un tiempo anduvo enamorado, aunque, según se entiende, ella jamás lo supo, ni le dio cata dello. Llamábase Aldonza Lorenzo, y a ésta le pareció ser bien darle título de señora de sus pensamientos; y, buscándole nombre que no desdijese mucho del suyo, y que tirase y se encaminase al de princesa y gran señora, vino a llamarla Dulcinea del Toboso, porque era natural del Toboso; nombre, a su parecer, músico y peregrino y significativo, como todos los demás que a él y a sus cosas había puesto.
-
-
diff --git a/resources/personality-v3-expect1.txt b/resources/personality-v3-expect1.txt
deleted file mode 100755
index b69ea6bd4..000000000
--- a/resources/personality-v3-expect1.txt
+++ /dev/null
@@ -1 +0,0 @@
-{"word_count":1365,"processed_language":"en","personality":[{"trait_id":"big5_openness","name":"Openness","category":"personality","percentile":0.9970814244982864,"children":[{"trait_id":"facet_adventurousness","name":"Adventurousness","category":"personality","percentile":0.7897453561510369},{"trait_id":"facet_artistic_interests","name":"Artistic interests","category":"personality","percentile":0.9946576519208279},{"trait_id":"facet_emotionality","name":"Emotionality","category":"personality","percentile":0.7671631753694098},{"trait_id":"facet_imagination","name":"Imagination","category":"personality","percentile":0.3116772371947326},{"trait_id":"facet_intellect","name":"Intellect","category":"personality","percentile":0.9965199807027891},{"trait_id":"facet_liberalism","name":"Authority-challenging","category":"personality","percentile":0.797907272149325}]},{"trait_id":"big5_conscientiousness","name":"Conscientiousness","category":"personality","percentile":0.986401677449357,"children":[{"trait_id":"facet_achievement_striving","name":"Achievement striving","category":"personality","percentile":0.8403728912342907},{"trait_id":"facet_cautiousness","name":"Cautiousness","category":"personality","percentile":0.944186945742299},{"trait_id":"facet_dutifulness","name":"Dutifulness","category":"personality","percentile":0.7946276293038717},{"trait_id":"facet_orderliness","name":"Orderliness","category":"personality","percentile":0.7610741506407186},{"trait_id":"facet_self_discipline","name":"Self-discipline","category":"personality","percentile":0.712864917583896},{"trait_id":"facet_self_efficacy","name":"Self-efficacy","category":"personality","percentile":0.6994302718651364}]},{"trait_id":"big5_extraversion","name":"Extraversion","category":"personality","percentile":0.08530058556548259,"children":[{"trait_id":"facet_activity_level","name":"Activity level","category":"personality","percentile":0.962401631341592},{"trait_id":"facet_assertiveness","name":"Assertiveness","category":"personality","percentile":0.9198609213386704},{"trait_id":"facet_cheerfulness","name":"Cheerfulness","category":"personality","percentile":0.2293639969883699},{"trait_id":"facet_excitement_seeking","name":"Excitement-seeking","category":"personality","percentile":0.21024192850794732},{"trait_id":"facet_friendliness","name":"Outgoing","category":"personality","percentile":0.7085191412979603},{"trait_id":"facet_gregariousness","name":"Gregariousness","category":"personality","percentile":0.22458619358372}]},{"trait_id":"big5_agreeableness","name":"Agreeableness","category":"personality","percentile":0.1875352860319472,"children":[{"trait_id":"facet_altruism","name":"Altruism","category":"personality","percentile":0.9713302006331768},{"trait_id":"facet_cooperation","name":"Cooperation","category":"personality","percentile":0.8229934901276204},{"trait_id":"facet_modesty","name":"Modesty","category":"personality","percentile":0.761318814834163},{"trait_id":"facet_morality","name":"Uncompromising","category":"personality","percentile":0.9471478882849421},{"trait_id":"facet_sympathy","name":"Sympathy","category":"personality","percentile":0.9991179451374892},{"trait_id":"facet_trust","name":"Trust","category":"personality","percentile":0.830111046812001}]},{"trait_id":"big5_neuroticism","name":"Emotional range","category":"personality","percentile":0.9438564164580463,"children":[{"trait_id":"facet_anger","name":"Fiery","category":"personality","percentile":0.013938100678608567},{"trait_id":"facet_anxiety","name":"Prone to worry","category":"personality","percentile":0.062025789454073055},{"trait_id":"facet_depression","name":"Melancholy","category":"personality","percentile":0.35285841125133055},{"trait_id":"facet_immoderation","name":"Immoderation","category":"personality","percentile":0.011684379342279061},{"trait_id":"facet_self_consciousness","name":"Self-consciousness","category":"personality","percentile":0.19347068940127837},{"trait_id":"facet_vulnerability","name":"Susceptible to stress","category":"personality","percentile":0.06994539774378672}]}],"needs":[{"trait_id":"need_challenge","name":"Challenge","category":"needs","percentile":0.0032546536914939694},{"trait_id":"need_closeness","name":"Closeness","category":"needs","percentile":0.37022781101806856},{"trait_id":"need_curiosity","name":"Curiosity","category":"needs","percentile":0.845180482624851},{"trait_id":"need_excitement","name":"Excitement","category":"needs","percentile":0.11505596926601303},{"trait_id":"need_harmony","name":"Harmony","category":"needs","percentile":0.4664217424750215},{"trait_id":"need_ideal","name":"Ideal","category":"needs","percentile":0.02263412995273062},{"trait_id":"need_liberty","name":"Liberty","category":"needs","percentile":0.10802987716456186},{"trait_id":"need_love","name":"Love","category":"needs","percentile":0.01189533382101321},{"trait_id":"need_practicality","name":"Practicality","category":"needs","percentile":0.018888178951272983},{"trait_id":"need_self_expression","name":"Self-expression","category":"needs","percentile":0.18489782806561655},{"trait_id":"need_stability","name":"Stability","category":"needs","percentile":0.3946227431440047},{"trait_id":"need_structure","name":"Structure","category":"needs","percentile":0.8880129689346332}],"values":[{"trait_id":"value_conservation","name":"Conservation","category":"values","percentile":0.5065929218618456},{"trait_id":"value_openness_to_change","name":"Openness to change","category":"values","percentile":0.6287516949462554},{"trait_id":"value_hedonism","name":"Hedonism","category":"values","percentile":0.005253658217920731},{"trait_id":"value_self_enhancement","name":"Self-enhancement","category":"values","percentile":0.0011936431143393933},{"trait_id":"value_self_transcendence","name":"Self-transcendence","category":"values","percentile":0.3429609693883737}],"warnings":[]}
diff --git a/resources/personality-v3-expect2.txt b/resources/personality-v3-expect2.txt
deleted file mode 100755
index d89e5199e..000000000
--- a/resources/personality-v3-expect2.txt
+++ /dev/null
@@ -1 +0,0 @@
-{"word_count":15223,"processed_language":"en","personality":[{"trait_id":"big5_openness","name":"Openness","category":"personality","percentile":0.8011555009552956,"raw_score":0.7756540425503803,"children":[{"trait_id":"facet_adventurousness","name":"Adventurousness","category":"personality","percentile":0.8975586904731889,"raw_score":0.5499070403121904},{"trait_id":"facet_artistic_interests","name":"Artistic interests","category":"personality","percentile":0.9770309419531911,"raw_score":0.7663670485959833},{"trait_id":"facet_emotionality","name":"Emotionality","category":"personality","percentile":0.9947058875647474,"raw_score":0.7524002152027132},{"trait_id":"facet_imagination","name":"Imagination","category":"personality","percentile":0.8733065387317464,"raw_score":0.7915903144017673},{"trait_id":"facet_intellect","name":"Intellect","category":"personality","percentile":0.8717194796402018,"raw_score":0.6597622585300691},{"trait_id":"facet_liberalism","name":"Authority-challenging","category":"personality","percentile":0.6405414845731194,"raw_score":0.5343564751353819}]},{"trait_id":"big5_conscientiousness","name":"Conscientiousness","category":"personality","percentile":0.8100175318417588,"raw_score":0.6689998488881546,"children":[{"trait_id":"facet_achievement_striving","name":"Achievement striving","category":"personality","percentile":0.8461329922662831,"raw_score":0.7424011845488805},{"trait_id":"facet_cautiousness","name":"Cautiousness","category":"personality","percentile":0.7220362727004178,"raw_score":0.5296482988959449},{"trait_id":"facet_dutifulness","name":"Dutifulness","category":"personality","percentile":0.8421638467925515,"raw_score":0.6834730565103805},{"trait_id":"facet_orderliness","name":"Orderliness","category":"personality","percentile":0.6121858586705231,"raw_score":0.5034920799431641},{"trait_id":"facet_self_discipline","name":"Self-discipline","category":"personality","percentile":0.8317329416265953,"raw_score":0.616433633126353},{"trait_id":"facet_self_efficacy","name":"Self-efficacy","category":"personality","percentile":0.70883137095439,"raw_score":0.7724413163310413}]},{"trait_id":"big5_extraversion","name":"Extraversion","category":"personality","percentile":0.6498079607138185,"raw_score":0.5681773878116614,"children":[{"trait_id":"facet_activity_level","name":"Activity level","category":"personality","percentile":0.8822058491396538,"raw_score":0.6010699592614316},{"trait_id":"facet_assertiveness","name":"Assertiveness","category":"personality","percentile":0.668984138017408,"raw_score":0.6659099991098552},{"trait_id":"facet_cheerfulness","name":"Cheerfulness","category":"personality","percentile":0.9435264775235841,"raw_score":0.671332415082109},{"trait_id":"facet_excitement_seeking","name":"Excitement-seeking","category":"personality","percentile":0.5913387477205387,"raw_score":0.6133983269914512},{"trait_id":"facet_friendliness","name":"Outgoing","category":"personality","percentile":0.9577289025786391,"raw_score":0.6470028893580052},{"trait_id":"facet_gregariousness","name":"Gregariousness","category":"personality","percentile":0.6494284805198431,"raw_score":0.4730737068164407}]},{"trait_id":"big5_agreeableness","name":"Agreeableness","category":"personality","percentile":0.9478612479382063,"raw_score":0.8067781563180865,"children":[{"trait_id":"facet_altruism","name":"Altruism","category":"personality","percentile":0.9924198382420473,"raw_score":0.7902840629074717},{"trait_id":"facet_cooperation","name":"Cooperation","category":"personality","percentile":0.8612307420897902,"raw_score":0.644809933616134},{"trait_id":"facet_modesty","name":"Modesty","category":"personality","percentile":0.7726811931877515,"raw_score":0.4878296372120652},{"trait_id":"facet_morality","name":"Uncompromising","category":"personality","percentile":0.890791023357115,"raw_score":0.6838825205363425},{"trait_id":"facet_sympathy","name":"Sympathy","category":"personality","percentile":0.994218470874908,"raw_score":0.759901709852522},{"trait_id":"facet_trust","name":"Trust","category":"personality","percentile":0.9036111955659848,"raw_score":0.6394572920931907}]},{"trait_id":"big5_neuroticism","name":"Emotional range","category":"personality","percentile":0.5008224041628007,"raw_score":0.46748200007024476,"children":[{"trait_id":"facet_anger","name":"Fiery","category":"personality","percentile":0.17640022058508498,"raw_score":0.48490315691801983},{"trait_id":"facet_anxiety","name":"Prone to worry","category":"personality","percentile":0.42883076062186987,"raw_score":0.5818806184582846},{"trait_id":"facet_depression","name":"Melancholy","category":"personality","percentile":0.15019740428715633,"raw_score":0.3828467842344732},{"trait_id":"facet_immoderation","name":"Immoderation","category":"personality","percentile":0.26916719249302234,"raw_score":0.47694218652589115},{"trait_id":"facet_self_consciousness","name":"Self-consciousness","category":"personality","percentile":0.30351543340675236,"raw_score":0.5196515289516266},{"trait_id":"facet_vulnerability","name":"Susceptible to stress","category":"personality","percentile":0.3897206832678008,"raw_score":0.44977966970810673}]}],"needs":[{"trait_id":"need_challenge","name":"Challenge","category":"needs","percentile":0.673623320545115,"raw_score":0.751963480376755},{"trait_id":"need_closeness","name":"Closeness","category":"needs","percentile":0.8380283404181322,"raw_score":0.8371432732972359},{"trait_id":"need_curiosity","name":"Curiosity","category":"needs","percentile":0.9293839318960936,"raw_score":0.855371256030684},{"trait_id":"need_excitement","name":"Excitement","category":"needs","percentile":0.7280972568828032,"raw_score":0.7334275298402744},{"trait_id":"need_harmony","name":"Harmony","category":"needs","percentile":0.9694112904157444,"raw_score":0.8739053596457717},{"trait_id":"need_ideal","name":"Ideal","category":"needs","percentile":0.6824330657640135,"raw_score":0.7136043544694086},{"trait_id":"need_liberty","name":"Liberty","category":"needs","percentile":0.786964400223518,"raw_score":0.7663288169238623},{"trait_id":"need_love","name":"Love","category":"needs","percentile":0.8207992048058734,"raw_score":0.8133368299186845},{"trait_id":"need_practicality","name":"Practicality","category":"needs","percentile":0.3503620508268639,"raw_score":0.7194693605746305},{"trait_id":"need_self_expression","name":"Self-expression","category":"needs","percentile":0.8673284357850473,"raw_score":0.7134630858462259},{"trait_id":"need_stability","name":"Stability","category":"needs","percentile":0.8732565885512285,"raw_score":0.7708158066758997},{"trait_id":"need_structure","name":"Structure","category":"needs","percentile":0.7456082872690646,"raw_score":0.7139823598365089}],"values":[{"trait_id":"value_conservation","name":"Conservation","category":"values","percentile":0.8926822285613875,"raw_score":0.7213530818742335},{"trait_id":"value_openness_to_change","name":"Openness to change","category":"values","percentile":0.8575991638808613,"raw_score":0.825513084313229},{"trait_id":"value_hedonism","name":"Hedonism","category":"values","percentile":0.44128086884054324,"raw_score":0.7287543244960342},{"trait_id":"value_self_enhancement","name":"Self-enhancement","category":"values","percentile":0.6458578881392593,"raw_score":0.7227461699193419},{"trait_id":"value_self_transcendence","name":"Self-transcendence","category":"values","percentile":0.8237769534534466,"raw_score":0.8481040055218539}],"behavior":[{"trait_id":"behavior_sunday","name":"Sunday","category":"behavior","percentage":0.21392532795156408},{"trait_id":"behavior_monday","name":"Monday","category":"behavior","percentage":0.425832492431887},{"trait_id":"behavior_tuesday","name":"Tuesday","category":"behavior","percentage":0.07164480322906155},{"trait_id":"behavior_wednesday","name":"Wednesday","category":"behavior","percentage":0.011099899091826439},{"trait_id":"behavior_thursday","name":"Thursday","category":"behavior","percentage":0.12209889001009082},{"trait_id":"behavior_friday","name":"Friday","category":"behavior","percentage":0.07769929364278506},{"trait_id":"behavior_saturday","name":"Saturday","category":"behavior","percentage":0.07769929364278506},{"trait_id":"behavior_0000","name":"0:00 am","category":"behavior","percentage":0.45610494450050454},{"trait_id":"behavior_0100","name":"1:00 am","category":"behavior","percentage":0.12209889001009082},{"trait_id":"behavior_0200","name":"2:00 am","category":"behavior","percentage":0.02119071644803229},{"trait_id":"behavior_0300","name":"3:00 am","category":"behavior","percentage":0.09485368314833502},{"trait_id":"behavior_0400","name":"4:00 am","category":"behavior","percentage":0.020181634712411706},{"trait_id":"behavior_0500","name":"5:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_0600","name":"6:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_0700","name":"7:00 am","category":"behavior","percentage":0.011099899091826439},{"trait_id":"behavior_0800","name":"8:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_0900","name":"9:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_1000","name":"10:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_1100","name":"11:00 am","category":"behavior","percentage":0.0},{"trait_id":"behavior_1200","name":"12:00 pm","category":"behavior","percentage":0.0},{"trait_id":"behavior_1300","name":"1:00 pm","category":"behavior","percentage":0.0},{"trait_id":"behavior_1400","name":"2:00 pm","category":"behavior","percentage":0.0},{"trait_id":"behavior_1500","name":"3:00 pm","category":"behavior","percentage":0.022199798183652877},{"trait_id":"behavior_1600","name":"4:00 pm","category":"behavior","percentage":0.022199798183652877},{"trait_id":"behavior_1700","name":"5:00 pm","category":"behavior","percentage":0.03229061553985873},{"trait_id":"behavior_1800","name":"6:00 pm","category":"behavior","percentage":0.010090817356205853},{"trait_id":"behavior_1900","name":"7:00 pm","category":"behavior","percentage":0.011099899091826439},{"trait_id":"behavior_2000","name":"8:00 pm","category":"behavior","percentage":0.022199798183652877},{"trait_id":"behavior_2100","name":"9:00 pm","category":"behavior","percentage":0.0},{"trait_id":"behavior_2200","name":"10:00 pm","category":"behavior","percentage":0.03128153380423814},{"trait_id":"behavior_2300","name":"11:00 pm","category":"behavior","percentage":0.1231079717457114}],"consumption_preferences":[{"consumption_preference_category_id":"consumption_preferences_shopping","name":"Purchasing Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_automobile_ownership_cost","name":"Likely to be sensitive to ownership cost when buying automobiles","score":0.0},{"consumption_preference_id":"consumption_preferences_automobile_safety","name":"Likely to prefer safety when buying automobiles","score":0.5},{"consumption_preference_id":"consumption_preferences_automobile_resale_value","name":"Likely to prefer resale value when buying automobiles","score":1.0},{"consumption_preference_id":"consumption_preferences_clothes_quality","name":"Likely to prefer quality when buying clothes","score":0.0},{"consumption_preference_id":"consumption_preferences_clothes_style","name":"Likely to prefer style when buying clothes","score":1.0},{"consumption_preference_id":"consumption_preferences_clothes_comfort","name":"Likely to prefer comfort when buying clothes","score":0.0},{"consumption_preference_id":"consumption_preferences_influence_brand_name","name":"Likely to be influenced by brand name when making product purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_influence_utility","name":"Likely to be influenced by product utility when making product purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_influence_online_ads","name":"Likely to be influenced by online ads when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_social_media","name":"Likely to be influenced by social media when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_family_members","name":"Likely to be influenced by family when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_spur_of_moment","name":"Likely to indulge in spur of the moment purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_credit_card_payment","name":"Likely to prefer using credit cards for shopping","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_health_and_activity","name":"Health & Activity Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_eat_out","name":"Likely to eat out frequently","score":1.0},{"consumption_preference_id":"consumption_preferences_fast_food_frequency","name":"Likely to eat fast food frequently","score":1.0},{"consumption_preference_id":"consumption_preferences_gym_membership","name":"Likely to have a gym membership","score":1.0},{"consumption_preference_id":"consumption_preferences_adventurous_sports","name":"Likely to like adventurous sports","score":1.0},{"consumption_preference_id":"consumption_preferences_outdoor","name":"Likely to like outdoor activities","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_environmental_concern","name":"Environmental Concern Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_concerned_environment","name":"Likely to be concerned about the environment","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_entrepreneurship","name":"Entrepreneurship Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_start_business","name":"Likely to consider starting a business in next few years","score":1.0}]},{"consumption_preference_category_id":"consumption_preferences_movie","name":"Movie Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_movie_romance","name":"Likely to like romance movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_adventure","name":"Likely to like adventure movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_horror","name":"Likely to like horror movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_musical","name":"Likely to like musical movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_historical","name":"Likely to like historical movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_science_fiction","name":"Likely to like science-fiction movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_war","name":"Likely to like war movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_drama","name":"Likely to like drama movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_action","name":"Likely to like action movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_documentary","name":"Likely to like documentary movies","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_music","name":"Music Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_music_rap","name":"Likely to like rap music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_country","name":"Likely to like country music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_r_b","name":"Likely to like R&B music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_hip_hop","name":"Likely to like hip hop music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_live_event","name":"Likely to attend live musical events","score":0.0},{"consumption_preference_id":"consumption_preferences_music_playing","name":"Likely to have experience playing music","score":0.0},{"consumption_preference_id":"consumption_preferences_music_latin","name":"Likely to like Latin music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_rock","name":"Likely to like rock music","score":0.0},{"consumption_preference_id":"consumption_preferences_music_classical","name":"Likely to like classical music","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_reading","name":"Reading Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_read_frequency","name":"Likely to read often","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_enjoyment","name":"Likely to read for enjoyment","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_information","name":"Likely to read for information","score":0.0},{"consumption_preference_id":"consumption_preferences_books_entertainment_magazines","name":"Likely to read entertainment magazines","score":1.0},{"consumption_preference_id":"consumption_preferences_books_non_fiction","name":"Likely to read non-fiction books","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_mandatory","name":"Likely to do mandatory reading only","score":1.0},{"consumption_preference_id":"consumption_preferences_read_motive_relaxation","name":"Likely to read for relaxation","score":1.0},{"consumption_preference_id":"consumption_preferences_books_financial_investing","name":"Likely to read financial investment books","score":1.0},{"consumption_preference_id":"consumption_preferences_books_autobiographies","name":"Likely to read autobiographical books","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_volunteering","name":"Volunteering Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_volunteer","name":"Likely to volunteer for social causes","score":0.0},{"consumption_preference_id":"consumption_preferences_volunteering_time","name":"Likely to have spent time volunteering","score":1.0},{"consumption_preference_id":"consumption_preferences_volunteer_learning","name":"Likely to volunteer to learn about social causes","score":0.0}]}],"warnings":[]}
diff --git a/resources/personality-v3-expect3.txt b/resources/personality-v3-expect3.txt
deleted file mode 100755
index ce84fc476..000000000
--- a/resources/personality-v3-expect3.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-big5_agreeableness,facet_altruism,facet_cooperation,facet_modesty,facet_morality,facet_sympathy,facet_trust,big5_conscientiousness,facet_achievement_striving,facet_cautiousness,facet_dutifulness,facet_orderliness,facet_self_discipline,facet_self_efficacy,big5_extraversion,facet_activity_level,facet_assertiveness,facet_cheerfulness,facet_excitement_seeking,facet_friendliness,facet_gregariousness,big5_neuroticism,facet_anger,facet_anxiety,facet_depression,facet_immoderation,facet_self_consciousness,facet_vulnerability,big5_openness,facet_adventurousness,facet_artistic_interests,facet_emotionality,facet_imagination,facet_intellect,facet_liberalism,need_liberty,need_ideal,need_love,need_practicality,need_self_expression,need_stability,need_structure,need_challenge,need_closeness,need_curiosity,need_excitement,need_harmony,value_conservation,value_hedonism,value_openness_to_change,value_self_enhancement,value_self_transcendence,behavior_sunday,behavior_monday,behavior_tuesday,behavior_wednesday,behavior_thursday,behavior_friday,behavior_saturday,behavior_0000,behavior_0100,behavior_0200,behavior_0300,behavior_0400,behavior_0500,behavior_0600,behavior_0700,behavior_0800,behavior_0900,behavior_1000,behavior_1100,behavior_1200,behavior_1300,behavior_1400,behavior_1500,behavior_1600,behavior_1700,behavior_1800,behavior_1900,behavior_2000,behavior_2100,behavior_2200,behavior_2300,word_count,processed_language,big5_agreeableness_raw,facet_altruism_raw,facet_cooperation_raw,facet_modesty_raw,facet_morality_raw,facet_sympathy_raw,facet_trust_raw,big5_conscientiousness_raw,facet_achievement_striving_raw,facet_cautiousness_raw,facet_dutifulness_raw,facet_orderliness_raw,facet_self_discipline_raw,facet_self_efficacy_raw,big5_extraversion_raw,facet_activity_level_raw,facet_assertiveness_raw,facet_cheerfulness_raw,facet_excitement_seeking_raw,facet_friendliness_raw,facet_gregariousness_raw,big5_neuroticism_raw,facet_anger_raw,facet_anxiety_raw,facet_depression_raw,facet_immoderation_raw,facet_self_consciousness_raw,facet_vulnerability_raw,big5_openness_raw,facet_adventurousness_raw,facet_artistic_interests_raw,facet_emotionality_raw,facet_imagination_raw,facet_intellect_raw,facet_liberalism_raw,need_liberty_raw,need_ideal_raw,need_love_raw,need_practicality_raw,need_self_expression_raw,need_stability_raw,need_structure_raw,need_challenge_raw,need_closeness_raw,need_curiosity_raw,need_excitement_raw,need_harmony_raw,value_conservation_raw,value_hedonism_raw,value_openness_to_change_raw,value_self_enhancement_raw,value_self_transcendence_raw,consumption_preferences_spur_of_moment,consumption_preferences_credit_card_payment,consumption_preferences_influence_brand_name,consumption_preferences_influence_utility,consumption_preferences_influence_online_ads,consumption_preferences_influence_social_media,consumption_preferences_influence_family_members,consumption_preferences_clothes_quality,consumption_preferences_clothes_style,consumption_preferences_clothes_comfort,consumption_preferences_automobile_ownership_cost,consumption_preferences_automobile_safety,consumption_preferences_automobile_resale_value,consumption_preferences_music_rap,consumption_preferences_music_country,consumption_preferences_music_r_b,consumption_preferences_music_hip_hop,consumption_preferences_music_live_event,consumption_preferences_music_playing,consumption_preferences_music_latin,consumption_preferences_music_rock,consumption_preferences_music_classical,consumption_preferences_gym_membership,consumption_preferences_adventurous_sports,consumption_preferences_outdoor,consumption_preferences_eat_out,consumption_preferences_fast_food_frequency,consumption_preferences_movie_romance,consumption_preferences_movie_adventure,consumption_preferences_movie_horror,consumption_preferences_movie_musical,consumption_preferences_movie_historical,consumption_preferences_movie_science_fiction,consumption_preferences_movie_war,consumption_preferences_movie_drama,consumption_preferences_movie_action,consumption_preferences_movie_documentary,consumption_preferences_read_frequency,consumption_preferences_read_motive_enjoyment,consumption_preferences_read_motive_information,consumption_preferences_read_motive_mandatory,consumption_preferences_read_motive_relaxation,consumption_preferences_books_entertainment_magazines,consumption_preferences_books_non_fiction,consumption_preferences_books_financial_investing,consumption_preferences_books_autobiographies,consumption_preferences_volunteer,consumption_preferences_volunteering_time,consumption_preferences_volunteer_learning,consumption_preferences_concerned_environment,consumption_preferences_start_business
-0.1875352860319472,0.9713302006331768,0.8229934901276204,0.761318814834163,0.9471478882849421,0.9991179451374892,0.830111046812001,0.986401677449357,0.8403728912342907,0.944186945742299,0.7946276293038717,0.7610741506407186,0.712864917583896,0.6994302718651364,0.08530058556548259,0.962401631341592,0.9198609213386704,0.2293639969883699,0.21024192850794732,0.7085191412979603,0.22458619358372,0.9438564164580463,0.013938100678608567,0.062025789454073055,0.35285841125133055,0.011684379342279061,0.19347068940127837,0.06994539774378672,0.9970814244982864,0.7897453561510369,0.9946576519208279,0.7671631753694098,0.3116772371947326,0.9965199807027891,0.797907272149325,0.10802987716456186,0.02263412995273062,0.01189533382101321,0.018888178951272983,0.18489782806561655,0.3946227431440047,0.8880129689346332,0.0032546536914939694,0.37022781101806856,0.845180482624851,0.11505596926601303,0.4664217424750215,0.5065929218618456,0.005253658217920731,0.6287516949462554,0.0011936431143393933,0.3429609693883737,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1365,en,0.7069355244930271,0.7717679751112927,0.6352286286618323,0.4858691141214019,0.702145642516,0.7828928930725229,0.6251288245815745,0.731065896229928,0.7411306938189824,0.5929015783660554,0.6790451583920154,0.5174048448459116,0.5974142772332323,0.7714843433917522,0.4898595875512197,0.6314221244549749,0.7142519242541164,0.5932729161331092,0.5694475628767053,0.5880272412488141,0.4144362156057161,0.5568839124901138,0.41546033577632724,0.489225611469312,0.42452148443292836,0.41344777510142944,0.5011894182219927,0.37357140402417355,0.83666730981323,0.5334674872694041,0.7945583831155767,0.677937382446223,0.7104655052955525,0.7321638770376435,0.5582434731245067,0.6901684496009852,0.5959081695663969,0.6586965498215966,0.6828926516103326,0.6441012500714469,0.7259366269839532,0.7290291261454519,0.6097534338543016,0.7786579590270303,0.8433898277044034,0.5898767782432288,0.8063478875213775,0.6661941759119986,0.5746924423258591,0.7969374222994671,0.5730785322934739,0.8263720901347662,0.0,1.0,0.0,1.0,1.0,0.0,1.0,1.0,0.0,1.0,0.5,0.0,1.0,0.0,0.5,1.0,0.5,0.0,0.0,1.0,0.5,1.0,0.0,0.0,0.5,0.0,0.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,1.0,0.5
diff --git a/resources/personality-v3-expect4.txt b/resources/personality-v3-expect4.txt
deleted file mode 100755
index cefac606d..000000000
--- a/resources/personality-v3-expect4.txt
+++ /dev/null
@@ -1 +0,0 @@
-{"word_count":2054,"processed_language":"es","personality":[{"trait_id":"big5_openness","name":"Apertura a experiencias","category":"personality","percentile":0.937254665925888,"raw_score":0.6665054437659199,"children":[{"trait_id":"facet_adventurousness","name":"Audacia","category":"personality","percentile":0.08223746859291331,"raw_score":0.42933795357475174},{"trait_id":"facet_artistic_interests","name":"Intereses artísticos","category":"personality","percentile":0.9763304400942869,"raw_score":0.7002492316426583},{"trait_id":"facet_emotionality","name":"Emocionalidad","category":"personality","percentile":0.7514798288441382,"raw_score":0.6329457809108067},{"trait_id":"facet_imagination","name":"Imaginación","category":"personality","percentile":0.8149758845160733,"raw_score":0.8358450161141352},{"trait_id":"facet_intellect","name":"Intelecto","category":"personality","percentile":0.709763785945054,"raw_score":0.5393985514175461},{"trait_id":"facet_liberalism","name":"Desafío a la autoridad","category":"personality","percentile":0.6238685851515903,"raw_score":0.5032730384879351}]},{"trait_id":"big5_conscientiousness","name":"Responsabilidad","category":"personality","percentile":0.8652601748372407,"raw_score":0.5675610518817606,"children":[{"trait_id":"facet_achievement_striving","name":"Necesidad de éxito","category":"personality","percentile":0.8616153196657172,"raw_score":0.5590390364812622},{"trait_id":"facet_cautiousness","name":"Cautela","category":"personality","percentile":0.8107894835477681,"raw_score":0.3956917603116589},{"trait_id":"facet_dutifulness","name":"Obediencia","category":"personality","percentile":0.7361183850960512,"raw_score":0.6242547149850359},{"trait_id":"facet_orderliness","name":"Disciplina","category":"personality","percentile":0.7239663954817621,"raw_score":0.4064822536153153},{"trait_id":"facet_self_discipline","name":"Autodisciplina","category":"personality","percentile":0.7198280681937614,"raw_score":0.5069844967090522},{"trait_id":"facet_self_efficacy","name":"Autoeficacia","category":"personality","percentile":0.6555485467551172,"raw_score":0.7166506366360331}]},{"trait_id":"big5_extraversion","name":"Extroversión","category":"personality","percentile":0.8312616324634844,"raw_score":0.5904152727753278,"children":[{"trait_id":"facet_activity_level","name":"Nivel de actividad","category":"personality","percentile":0.3050469697893306,"raw_score":0.48428799368416525},{"trait_id":"facet_assertiveness","name":"Seguridad en uno mismo","category":"personality","percentile":0.8397260688330984,"raw_score":0.6518273502161546},{"trait_id":"facet_cheerfulness","name":"Alegría","category":"personality","percentile":0.15273505645350988,"raw_score":0.6248204372077145},{"trait_id":"facet_excitement_seeking","name":"Búsqueda de emociones","category":"personality","percentile":0.7847013019475804,"raw_score":0.6442345985222767},{"trait_id":"facet_friendliness","name":"Simpatía","category":"personality","percentile":0.4308672854960358,"raw_score":0.5713958380902632},{"trait_id":"facet_gregariousness","name":"Sociabilidad","category":"personality","percentile":0.14583775819539813,"raw_score":0.4718274671256566}]},{"trait_id":"big5_agreeableness","name":"Amabilidad","category":"personality","percentile":0.964097852599053,"raw_score":0.6531530954966219,"children":[{"trait_id":"facet_altruism","name":"Altruismo","category":"personality","percentile":0.8454904962948867,"raw_score":0.6988130323165977},{"trait_id":"facet_cooperation","name":"Cooperación","category":"personality","percentile":0.7090285746898252,"raw_score":0.5034689841495227},{"trait_id":"facet_modesty","name":"Modestia","category":"personality","percentile":0.3356036734453778,"raw_score":0.37505142742666475},{"trait_id":"facet_morality","name":"Intransigencia","category":"personality","percentile":0.5970727450220207,"raw_score":0.5626043098951097},{"trait_id":"facet_sympathy","name":"Compasión","category":"personality","percentile":0.8405910443888318,"raw_score":0.6703129231871922},{"trait_id":"facet_trust","name":"Confianza","category":"personality","percentile":0.7434899651065617,"raw_score":0.584058726755165}]},{"trait_id":"big5_neuroticism","name":"Rango emocional","category":"personality","percentile":0.5289409694752685,"raw_score":0.487815337385794,"children":[{"trait_id":"facet_anger","name":"Vehemencia","category":"personality","percentile":0.49899417826927367,"raw_score":0.5721035977629064},{"trait_id":"facet_anxiety","name":"Tendencia a la preocupación","category":"personality","percentile":0.3288266523535158,"raw_score":0.7282190556201247},{"trait_id":"facet_depression","name":"Melancolía","category":"personality","percentile":0.29056657042415834,"raw_score":0.514863148159452},{"trait_id":"facet_immoderation","name":"Desmesura","category":"personality","percentile":0.4768272523338591,"raw_score":0.49394240481419255},{"trait_id":"facet_self_consciousness","name":"Timidez","category":"personality","percentile":0.41952877081366,"raw_score":0.5533629213910396},{"trait_id":"facet_vulnerability","name":"Susceptibilidad a la tensión","category":"personality","percentile":0.8928596088709371,"raw_score":0.7197355877820822}]}],"needs":[{"trait_id":"need_challenge","name":"Desafío","category":"needs","percentile":0.559611972188894,"raw_score":0.748742086057447},{"trait_id":"need_closeness","name":"Familiaridad","category":"needs","percentile":0.8955577050509591,"raw_score":0.8040722237206381},{"trait_id":"need_curiosity","name":"Curiosidad","category":"needs","percentile":0.09726991406313656,"raw_score":0.7301955596902647},{"trait_id":"need_excitement","name":"Entusiasmo","category":"needs","percentile":0.13382056325102437,"raw_score":0.7037297990204079},{"trait_id":"need_harmony","name":"Armonía","category":"needs","percentile":0.9573838279593837,"raw_score":0.8680468150331786},{"trait_id":"need_ideal","name":"Ideal","category":"needs","percentile":0.21515556100273503,"raw_score":0.6132355010854986},{"trait_id":"need_liberty","name":"Libertad","category":"needs","percentile":0.7345204750013818,"raw_score":0.7668148207046253},{"trait_id":"need_love","name":"Amor","category":"needs","percentile":0.279330012389927,"raw_score":0.7401357410740972},{"trait_id":"need_practicality","name":"Practicidad","category":"needs","percentile":0.9519859431515265,"raw_score":0.8152097612302944},{"trait_id":"need_self_expression","name":"Autoexpresión","category":"needs","percentile":0.45551641520878955,"raw_score":0.6552372473325437},{"trait_id":"need_stability","name":"Estabilidad","category":"needs","percentile":0.7890941903595212,"raw_score":0.7155622088047298},{"trait_id":"need_structure","name":"Estructura","category":"needs","percentile":0.8701561216649387,"raw_score":0.6872552118295897}],"values":[{"trait_id":"value_conservation","name":"Conservación","category":"values","percentile":0.7229840083480119,"raw_score":0.6823055252116184},{"trait_id":"value_openness_to_change","name":"Apertura al cambio","category":"values","percentile":0.25516943326837055,"raw_score":0.7776804808576244},{"trait_id":"value_hedonism","name":"Hedonismo","category":"values","percentile":0.2642599286231329,"raw_score":0.7968264374887243},{"trait_id":"value_self_enhancement","name":"Superación personal","category":"values","percentile":0.14635996017074898,"raw_score":0.6187436884883577},{"trait_id":"value_self_transcendence","name":"Autotranscendencia","category":"values","percentile":0.7717967307009796,"raw_score":0.8563743707155973}],"consumption_preferences":[{"consumption_preference_category_id":"consumption_preferences_shopping","name":"Purchasing Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_automobile_ownership_cost","name":"Likely to be sensitive to ownership cost when buying automobiles","score":0.0},{"consumption_preference_id":"consumption_preferences_automobile_safety","name":"Likely to prefer safety when buying automobiles","score":1.0},{"consumption_preference_id":"consumption_preferences_automobile_resale_value","name":"Likely to prefer resale value when buying automobiles","score":1.0},{"consumption_preference_id":"consumption_preferences_clothes_quality","name":"Likely to prefer quality when buying clothes","score":1.0},{"consumption_preference_id":"consumption_preferences_clothes_style","name":"Likely to prefer style when buying clothes","score":0.0},{"consumption_preference_id":"consumption_preferences_clothes_comfort","name":"Likely to prefer comfort when buying clothes","score":0.0},{"consumption_preference_id":"consumption_preferences_influence_brand_name","name":"Likely to be influenced by brand name when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_utility","name":"Likely to be influenced by product utility when making product purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_influence_online_ads","name":"Likely to be influenced by online ads when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_social_media","name":"Likely to be influenced by social media when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_influence_family_members","name":"Likely to be influenced by family when making product purchases","score":1.0},{"consumption_preference_id":"consumption_preferences_spur_of_moment","name":"Likely to indulge in spur of the moment purchases","score":0.5},{"consumption_preference_id":"consumption_preferences_credit_card_payment","name":"Likely to prefer using credit cards for shopping","score":1.0}]},{"consumption_preference_category_id":"consumption_preferences_health_and_activity","name":"Health & Activity Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_eat_out","name":"Likely to eat out frequently","score":0.0},{"consumption_preference_id":"consumption_preferences_fast_food_frequency","name":"Likely to eat fast food frequently","score":0.5},{"consumption_preference_id":"consumption_preferences_gym_membership","name":"Likely to have a gym membership","score":0.0},{"consumption_preference_id":"consumption_preferences_adventurous_sports","name":"Likely to like adventurous sports","score":1.0},{"consumption_preference_id":"consumption_preferences_outdoor","name":"Likely to like outdoor activities","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_environmental_concern","name":"Environmental Concern Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_concerned_environment","name":"Likely to be concerned about the environment","score":0.5}]},{"consumption_preference_category_id":"consumption_preferences_entrepreneurship","name":"Entrepreneurship Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_start_business","name":"Likely to consider starting a business in next few years","score":1.0}]},{"consumption_preference_category_id":"consumption_preferences_movie","name":"Movie Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_movie_romance","name":"Likely to like romance movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_adventure","name":"Likely to like adventure movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_horror","name":"Likely to like horror movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_musical","name":"Likely to like musical movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_historical","name":"Likely to like historical movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_science_fiction","name":"Likely to like science-fiction movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_war","name":"Likely to like war movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_drama","name":"Likely to like drama movies","score":1.0},{"consumption_preference_id":"consumption_preferences_movie_action","name":"Likely to like action movies","score":0.0},{"consumption_preference_id":"consumption_preferences_movie_documentary","name":"Likely to like documentary movies","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_music","name":"Music Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_music_rap","name":"Likely to like rap music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_country","name":"Likely to like country music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_r_b","name":"Likely to like R&B music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_hip_hop","name":"Likely to like hip hop music","score":1.0},{"consumption_preference_id":"consumption_preferences_music_live_event","name":"Likely to attend live musical events","score":0.0},{"consumption_preference_id":"consumption_preferences_music_playing","name":"Likely to have experience playing music","score":0.0},{"consumption_preference_id":"consumption_preferences_music_latin","name":"Likely to like Latin music","score":0.0},{"consumption_preference_id":"consumption_preferences_music_rock","name":"Likely to like rock music","score":0.5},{"consumption_preference_id":"consumption_preferences_music_classical","name":"Likely to like classical music","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_reading","name":"Reading Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_read_frequency","name":"Likely to read often","score":0.5},{"consumption_preference_id":"consumption_preferences_read_motive_enjoyment","name":"Likely to read for enjoyment","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_information","name":"Likely to read for information","score":0.0},{"consumption_preference_id":"consumption_preferences_books_entertainment_magazines","name":"Likely to read entertainment magazines","score":1.0},{"consumption_preference_id":"consumption_preferences_books_non_fiction","name":"Likely to read non-fiction books","score":1.0},{"consumption_preference_id":"consumption_preferences_read_motive_mandatory","name":"Likely to do mandatory reading only","score":0.0},{"consumption_preference_id":"consumption_preferences_read_motive_relaxation","name":"Likely to read for relaxation","score":1.0},{"consumption_preference_id":"consumption_preferences_books_financial_investing","name":"Likely to read financial investment books","score":0.0},{"consumption_preference_id":"consumption_preferences_books_autobiographies","name":"Likely to read autobiographical books","score":0.0}]},{"consumption_preference_category_id":"consumption_preferences_volunteering","name":"Volunteering Preferences","consumption_preferences":[{"consumption_preference_id":"consumption_preferences_volunteer","name":"Likely to volunteer for social causes","score":0.0},{"consumption_preference_id":"consumption_preferences_volunteering_time","name":"Likely to have spent time volunteering","score":0.0},{"consumption_preference_id":"consumption_preferences_volunteer_learning","name":"Likely to volunteer to learn about social causes","score":0.0}]}],"warnings":[]}
diff --git a/resources/personality-v3.json b/resources/personality-v3.json
deleted file mode 100755
index 5b6c5d1a1..000000000
--- a/resources/personality-v3.json
+++ /dev/null
@@ -1,6941 +0,0 @@
-{
- "contentItems": [
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- },
- {
- "content": "The Pastor and Imam represent the possibility of PEACE in the world. If they can do it. We can. Peace to All\ud83d\ude4f\ud83c\udffe #Belief",
- "contenttype": "text/plain",
- "created": 1445302749000,
- "id": "656273415280705536",
- "language": "en"
- },
- {
- "content": "We felt privileged to film the Hajj and explore the beauty of Islam.\n#99namesforGod #Belief",
- "contenttype": "text/plain",
- "created": 1445301110000,
- "id": "656266540967424000",
- "language": "en"
- },
- {
- "content": "Do you all believe in \"soul mates\"?\n#Belief",
- "contenttype": "text/plain",
- "created": 1445300138000,
- "id": "656262462426238976",
- "language": "en"
- },
- {
- "content": ".@RevEdBacon thank you so much for hosting tonight's #Belief at All Saints Church.",
- "contenttype": "text/plain",
- "created": 1445299749000,
- "id": "656260832628756480",
- "language": "en"
- },
- {
- "content": "This is one of the best love stories I've ever seen. #belief Ian and Larissa showing us the depths of love.#Belief",
- "contenttype": "text/plain",
- "created": 1445299614000,
- "id": "656260263604310016",
- "language": "en"
- },
- {
- "content": "Hey Everyone .. Tweeting from a bar in Atlanta with @SheriSalata OWN not in my hotel. Anything for #Belief",
- "contenttype": "text/plain",
- "created": 1445299326000,
- "id": "656259057758654464",
- "language": "en"
- },
- {
- "content": "RT @joshuadubois: When you see Ian & Larissa tonight on #BELIEF, you'll know what Christian love is all about. 8pmET, @OWNTV. Tune in! http\u2026",
- "contenttype": "text/plain",
- "created": 1445295716000,
- "id": "656243916224638976",
- "language": "en"
- },
- {
- "content": "RT @KimHaleDance: I Believe LAUGHTER. IS. CONTAGIOUS. What do you believe? See you tonight! 8\/7c.\u2764\ufe0f #Belief #Beliefin3words https:\/\/t.co\/x\u2026",
- "contenttype": "text/plain",
- "created": 1445295702000,
- "id": "656243854610337793",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: See the world through someone else\u2019s soul. The epic journey of #Belief continues tonight at 8\/7c.\nhttps:\/\/t.co\/UKKMHZuC0g",
- "contenttype": "text/plain",
- "created": 1445295668000,
- "id": "656243714507931648",
- "language": "en"
- },
- {
- "content": "RT @OWNTV: Mendel Hurwitz's inquisitive nature grounded him in his faith. See where it's taken him now: https:\/\/t.co\/2iWmNOxK9r https:\/\/t.c\u2026",
- "contenttype": "text/plain",
- "created": 1445295661000,
- "id": "656243684720050176",
- "language": "en"
- },
- {
- "content": "Thank you for opening up the heart space and letting #Belief in. Tonight it keeps getting better. See you at 8\/7c\nhttps:\/\/t.co\/E65vkTray9",
- "contenttype": "text/plain",
- "created": 1445279425000,
- "id": "656175584943341568",
- "language": "en"
- },
- {
- "content": "I believe in the @weightwatchers program so much I decided to invest, join the Board, and partner in #wwfamily evolution.",
- "contenttype": "text/plain",
- "created": 1445275802000,
- "id": "656160388526899200",
- "language": "en"
- },
- {
- "content": "RT @AVAETC: Debut episode of #BELIEF has now aired on both coasts. Trended for 4 hours. Brava, @Oprah + team. 6 beautiful nights to come. B\u2026",
- "contenttype": "text/plain",
- "created": 1445229489000,
- "id": "655966138279432192",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: 6 more epic nights of #Belief to come! See you tomorrow 8pET\/7pCT for the next installment! @OWNTV @Oprah",
- "contenttype": "text/plain",
- "created": 1445227342000,
- "id": "655957135688241152",
- "language": "en"
- },
- {
- "content": "RT @ledisi: I love how Ancestry and Tradition is honored throughout every story in #Belief @OWNTV @OWN Thank you @Oprah this is so importa\u2026",
- "contenttype": "text/plain",
- "created": 1445225935000,
- "id": "655951232981295104",
- "language": "en"
- },
- {
- "content": "RT @UN: Showing #Belief at the UN \"is a bigger dream than I had\" - @Oprah https:\/\/t.co\/VC4OqD8yub #Belief #GlobalGoals https:\/\/t.co\/LZyGuC7\u2026",
- "contenttype": "text/plain",
- "created": 1445225228000,
- "id": "655948267868426240",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: To seek, to question, to learn, to teach, to pass it on; some of the breathtaking themes running like water throughout #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445225008000,
- "id": "655947345197076480",
- "language": "en"
- },
- {
- "content": "RT @iamtikasumpter: #Belief had me in awe. Faith in the divine is the constant that links us together. It's all beautiful and righteous. Gi\u2026",
- "contenttype": "text/plain",
- "created": 1445224852000,
- "id": "655946689249828864",
- "language": "en"
- },
- {
- "content": "West Coast... Here we go. #Belief",
- "contenttype": "text/plain",
- "created": 1445224140000,
- "id": "655943701840048128",
- "language": "en"
- },
- {
- "content": "Big surprise at icanady watch party. Epic night. #Belief. So much more to come. https:\/\/t.co\/kBDtFwGyQs",
- "contenttype": "text/plain",
- "created": 1445220694000,
- "id": "655929249669378048",
- "language": "en"
- },
- {
- "content": "I loved the Mendel story so much because it represents right of passasge. \" bye bye to childhood\".#Belief",
- "contenttype": "text/plain",
- "created": 1445215032000,
- "id": "655905500056391682",
- "language": "en"
- },
- {
- "content": "RT @squee_machine: This is a visual feast! Completely gorgeous and I am transfixed. The colors, the composition, cinematography, vibe. #Bel\u2026",
- "contenttype": "text/plain",
- "created": 1445214538000,
- "id": "655903432079904768",
- "language": "en"
- },
- {
- "content": "RT @JamesTyphany: Looking at @ #Belief I really needed this in my life thanks @OWNTV",
- "contenttype": "text/plain",
- "created": 1445214534000,
- "id": "655903413385891840",
- "language": "en"
- },
- {
- "content": "Just surprised a \"watch party\" @icanady 's house. #Belief http:\/\/t.co\/Di0I3OooCh",
- "contenttype": "text/plain",
- "created": 1445214502000,
- "id": "655903277796732931",
- "language": "en"
- },
- {
- "content": "RT @MsLaWandaa: I love moments of sitting among elders and learning. I can feel this moment was special for @ReshThakkar #Belief",
- "contenttype": "text/plain",
- "created": 1445214498000,
- "id": "655903264374812672",
- "language": "en"
- },
- {
- "content": "RT @xonecole: \"Do you have to have religion or is being a good person...is that enough?\" #Belief",
- "contenttype": "text/plain",
- "created": 1445214339000,
- "id": "655902594171203584",
- "language": "en"
- },
- {
- "content": "RT @ChivonJohn: Very inspired by the stories on #belief https:\/\/t.co\/uMRCCfCWcY",
- "contenttype": "text/plain",
- "created": 1445214327000,
- "id": "655902545903140864",
- "language": "en"
- },
- {
- "content": "RT @KiranSArora: powerful personal story that many of us can connect to. searching for what's missing, spiritual liberation. @ReshThakkar #\u2026",
- "contenttype": "text/plain",
- "created": 1445214128000,
- "id": "655901708506103812",
- "language": "en"
- },
- {
- "content": "RT @createdbyerica: \"I'm willing to go as far as I have to go to get that feeling in my heart of being connected to the Divine.\" - Reshma @\u2026",
- "contenttype": "text/plain",
- "created": 1445213967000,
- "id": "655901033952993280",
- "language": "en"
- },
- {
- "content": "RT @UrbnHealthNP: I am enjoying this so much. #Belief",
- "contenttype": "text/plain",
- "created": 1445213904000,
- "id": "655900772467474435",
- "language": "en"
- },
- {
- "content": "RT @DrMABrown: Your relationship with #belief can be completely different than how others experience it",
- "contenttype": "text/plain",
- "created": 1445213901000,
- "id": "655900756604620800",
- "language": "en"
- },
- {
- "content": "RT @ckerfin: On another river on the other side of the world... transition between stories, drawing connections to different beliefs #Belie\u2026",
- "contenttype": "text/plain",
- "created": 1445213721000,
- "id": "655900002644987905",
- "language": "en"
- },
- {
- "content": "RT @nikfarrior: So profound. We are all born into #Belief @Oprah @OWN @OWNTV",
- "contenttype": "text/plain",
- "created": 1445213706000,
- "id": "655899942242775040",
- "language": "en"
- },
- {
- "content": "RT @fgaboys: @Oprah the start of #Belief is riveting and edifying. It makes you want more and inspires you too see what's coming up next. \u2026",
- "contenttype": "text/plain",
- "created": 1445213699000,
- "id": "655899910563217408",
- "language": "en"
- },
- {
- "content": "RT @MalikaGhosh: Here we GO- let's lose ourselves #belief NOW @Oprah JOY http:\/\/t.co\/vYSNLd3LvC",
- "contenttype": "text/plain",
- "created": 1445212831000,
- "id": "655896269542420480",
- "language": "en"
- },
- {
- "content": "RT @MastinKipp: .@Oprah and team are about to bring the Light with #Belief.",
- "contenttype": "text/plain",
- "created": 1445212747000,
- "id": "655895916675600384",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: 7 minutes, y'all! #BELIEF",
- "contenttype": "text/plain",
- "created": 1445212465000,
- "id": "655894734968217600",
- "language": "en"
- },
- {
- "content": "RT @LPToussaint: BELIEF defines experience.Choose wisely #Belief Tonight on OWN",
- "contenttype": "text/plain",
- "created": 1445211845000,
- "id": "655892134524878848",
- "language": "en"
- },
- {
- "content": "RT @TheBeBeWinans: Congratulations to my dear friend @Oprah on the launch of your series #BELIEF #Tonight on @OWNTV. Your friendship inspir\u2026",
- "contenttype": "text/plain",
- "created": 1445211835000,
- "id": "655892094905532416",
- "language": "en"
- },
- {
- "content": "RT @UzoAduba: Thirty minutes to @Oprah #Belief. Pretty sure it's about to be our favorite thing.",
- "contenttype": "text/plain",
- "created": 1445211833000,
- "id": "655892084314890240",
- "language": "en"
- },
- {
- "content": "RT @DeandresVoice: Moments away from the start of @SuperSoulSunday and the first night of the epic #Belief series on @OWNTV - are you ready\u2026",
- "contenttype": "text/plain",
- "created": 1445209201000,
- "id": "655881046102142978",
- "language": "en"
- },
- {
- "content": "RT @jennaldewan: I CANT WAIT FOR THIS TONIGHT!!!!!! Got my popcorn, got my tissues I am readyyyyyy! #Belief https:\/\/t.co\/WluTdeEqal",
- "contenttype": "text/plain",
- "created": 1445209181000,
- "id": "655880959535939584",
- "language": "en"
- },
- {
- "content": "RT @indiaarie: U heard about @Oprah passion project? It's called #Belief - I've see some & Its special! Tonight - 7c\/8e - 7 nights!! Whose\u2026",
- "contenttype": "text/plain",
- "created": 1445208945000,
- "id": "655879970732949504",
- "language": "en"
- },
- {
- "content": "Wow, I liked @TheRock before, now I really SEE how special he is. The daughter story was IT for me. So great! #MasterClass",
- "contenttype": "text/plain",
- "created": 1447639154000,
- "id": "666073008692314113",
- "language": "en"
- },
- {
- "content": ".@TheRock how did you Know to listen to your gut and Not go back to football? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447638226000,
- "id": "666069114889179136",
- "language": "en"
- },
- {
- "content": ".@TheRock moving back in with your parents so humbling. \" on the other side of your pain is something good if you can hold on\" #masterclass",
- "contenttype": "text/plain",
- "created": 1447638067000,
- "id": "666068446325665792",
- "language": "en"
- },
- {
- "content": "Wow aren't you loving @TheRock and his candor? #Masterclass",
- "contenttype": "text/plain",
- "created": 1447637459000,
- "id": "666065895932973057",
- "language": "en"
- },
- {
- "content": "RT @patt_t: @TheRock @Oprah @RichOnOWN @OWNTV this interview makes me like you as a fellow human even more for being so real.",
- "contenttype": "text/plain",
- "created": 1447637030000,
- "id": "666064097562247168",
- "language": "en"
- },
- {
- "content": "\"Be You\".. That's the best advice ever @TheRock #MastersClass",
- "contenttype": "text/plain",
- "created": 1447636205000,
- "id": "666060637181644800",
- "language": "en"
- },
- {
- "content": "Supersoulers let's lift our spirits pray and hold Paris in the Light\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1447602477000,
- "id": "665919171062927360",
- "language": "en"
- },
- {
- "content": "RT @DeepakChopra: What I learned in week 1: Become What You Believe 21-Day Meditation Experience - https:\/\/t.co\/kqaMaMqEUp #GoogleAlerts",
- "contenttype": "text/plain",
- "created": 1447098990000,
- "id": "663807393063538688",
- "language": "en"
- },
- {
- "content": "Watching Bryan Stevenson on #SuperSoulSunday! \"You are not the worst mistake you ever made\".\nAren't we glad about that.",
- "contenttype": "text/plain",
- "created": 1446998643000,
- "id": "663386507856736257",
- "language": "en"
- },
- {
- "content": ".@CherylStrayed BRAVE ENOUGH my new favorite thing! Gonna buy a copy for all my girls. #Perfectgift https:\/\/t.co\/gz1tnv8t8K",
- "contenttype": "text/plain",
- "created": 1446915955000,
- "id": "663039689360695296",
- "language": "en"
- },
- {
- "content": "Stevie Wonder singing \"Happy Birthday to you!\" to my dear mariashriver. A phenomenal woman and\u2026 https:\/\/t.co\/Ygm5eDIs4f",
- "contenttype": "text/plain",
- "created": 1446881193000,
- "id": "662893888080879616",
- "language": "en"
- },
- {
- "content": "It\u2019s my faaaaavorite time of the Year! For the first time you can shop the list on @amazon! https:\/\/t.co\/a6GMvVrhjN https:\/\/t.co\/sJlQMROq5U",
- "contenttype": "text/plain",
- "created": 1446744186000,
- "id": "662319239844380672",
- "language": "en"
- },
- {
- "content": "Incredible story \"the spirit of the Lord is on you\" thanks for sharing @smokey_robinson #Masterclass",
- "contenttype": "text/plain",
- "created": 1446428929000,
- "id": "660996956861280256",
- "language": "en"
- },
- {
- "content": "Wasnt that incredible story about @smokey_robinson 's dad leaving his family at 12. #MasterClass",
- "contenttype": "text/plain",
- "created": 1446426630000,
- "id": "660987310889041920",
- "language": "en"
- },
- {
- "content": "Gayle, Charlie, Nora @CBSThisMorning Congratulations! #1000thshow",
- "contenttype": "text/plain",
- "created": 1446220097000,
- "id": "660121050978611205",
- "language": "en"
- },
- {
- "content": "I believe your home should rise up to meet you. @TheEllenShow you nailed it with HOME. Tweethearts, grab a copy! https:\/\/t.co\/iFMnpRAsno",
- "contenttype": "text/plain",
- "created": 1446074433000,
- "id": "659510090748182528",
- "language": "en"
- },
- {
- "content": "Can I get a Witness?!\u270b\ud83c\udffe https:\/\/t.co\/tZ1QyAeSdE",
- "contenttype": "text/plain",
- "created": 1445821114000,
- "id": "658447593865945089",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow you're a treasure.\nYour truth set a lot of people free.\n#Masterclass",
- "contenttype": "text/plain",
- "created": 1445821003000,
- "id": "658447130026188800",
- "language": "en"
- },
- {
- "content": "Hope you all are enjoying @TheEllenShow on #MasterClass.",
- "contenttype": "text/plain",
- "created": 1445820161000,
- "id": "658443598313181188",
- "language": "en"
- },
- {
- "content": ".@GloriaSteinem, shero to women everywhere, on how far we\u2019ve come and how far we need to go. #SuperSoulSunday 7p\/6c.\nhttps:\/\/t.co\/3e7oxXW02J",
- "contenttype": "text/plain",
- "created": 1445811545000,
- "id": "658407457438363648",
- "language": "en"
- },
- {
- "content": "RT @TheEllenShow: I told a story from my @OWNTV's #MasterClass on my show. Normally I\u2019d save it all for Sunday, but @Oprah made me. https:\/\u2026",
- "contenttype": "text/plain",
- "created": 1445804181000,
- "id": "658376572521459712",
- "language": "en"
- },
- {
- "content": ".@TheEllenShow is a master teacher of living her truth & living authentically as herself. #MasterClass tonight 8\/7c.\nhttps:\/\/t.co\/iLT2KgRsSw",
- "contenttype": "text/plain",
- "created": 1445804072000,
- "id": "658376116575449088",
- "language": "en"
- },
- {
- "content": ".@SheriSalata , @jonnysinc @part2pictures . Tears of joy and gratitude to you and our entire #BeliefTeam We DID IT!! My heart is full.\ud83d\ude4f\ud83c\udffe\ud83d\ude4f\ud83c\udffe",
- "contenttype": "text/plain",
- "created": 1445734755000,
- "id": "658085377140363264",
- "language": "en"
- },
- {
- "content": "Donna and Bob saw the tape of their story just days before she passed. They appreciated it. #RIPDonna",
- "contenttype": "text/plain",
- "created": 1445734097000,
- "id": "658082618819280896",
- "language": "en"
- },
- {
- "content": "RT @rempower: .@Oprah this series allowed me to slide into people's lives around the world and see the same in them ... we all have a belie\u2026",
- "contenttype": "text/plain",
- "created": 1445732769000,
- "id": "658077046858383360",
- "language": "en"
- },
- {
- "content": "All the stories moved me, My favorite line \" I must pass the stories on to my grandson otherwise our people will loose their way. #Belief",
- "contenttype": "text/plain",
- "created": 1445732579000,
- "id": "658076253618991104",
- "language": "en"
- },
- {
- "content": ".@part2pictures some of your best imagery yet. Filming Alex on the rock.#Belief",
- "contenttype": "text/plain",
- "created": 1445731782000,
- "id": "658072908237934592",
- "language": "en"
- },
- {
- "content": "I just love Alex and his daring #Belief to live fully the present Moment.",
- "contenttype": "text/plain",
- "created": 1445731561000,
- "id": "658071980982206464",
- "language": "en"
- },
- {
- "content": "RT @GrowingOWN: Let's do this! #Belief finale tweet tweet party. Thank you @Oprah! \ud83d\ude4f",
- "contenttype": "text/plain",
- "created": 1445731248000,
- "id": "658070668785770496",
- "language": "en"
- },
- {
- "content": "RT @lizkinnell: The epic finale of #Belief on @OWNTV is about to start. 8\/et Are you ready? What do you Believe?",
- "contenttype": "text/plain",
- "created": 1445731081000,
- "id": "658069968534171648",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night of Belief. Belief runs all day tomorrow for bingers and final episode!",
- "contenttype": "text/plain",
- "created": 1445648630000,
- "id": "657724143115202560",
- "language": "en"
- },
- {
- "content": "RT @OWNingLight: #Belief is the ultimate travel map to mass acceptance. \ud83d\ude4f\ud83c\udffb\u2764\ufe0f\ud83c\udf0d @Oprah",
- "contenttype": "text/plain",
- "created": 1445647285000,
- "id": "657718501147197442",
- "language": "en"
- },
- {
- "content": "\" I can feel my heart opening and faith coming back in\".. What's better than that? #Belief",
- "contenttype": "text/plain",
- "created": 1445646903000,
- "id": "657716901951369218",
- "language": "en"
- },
- {
- "content": "Hey Belief team mates can yo believe how quickly the week has passed? #Belief",
- "contenttype": "text/plain",
- "created": 1445645633000,
- "id": "657711572492533760",
- "language": "en"
- },
- {
- "content": "Ran into @5SOS backstage. Fun times with @TheEllenShow today! https:\/\/t.co\/2PP3W3RzXc",
- "contenttype": "text/plain",
- "created": 1445618531000,
- "id": "657597898394173440",
- "language": "en"
- },
- {
- "content": "Thanks All for another great night of #BELIEF",
- "contenttype": "text/plain",
- "created": 1445572548000,
- "id": "657405031822430208",
- "language": "en"
- },
- {
- "content": "RT @3LWTV: #BecomeWhatYouBelieve New meditation w\/ @Oprah @DeepakChopra begins 11\/2 Register https:\/\/t.co\/x0R9HWTAX0 #Belief https:\/\/t.co\/\u2026",
- "contenttype": "text/plain",
- "created": 1445571500000,
- "id": "657400636745510912",
- "language": "en"
- },
- {
- "content": "Ok west coast let's do it! #belief",
- "contenttype": "text/plain",
- "created": 1445569367000,
- "id": "657391689439404033",
- "language": "en"
- },
- {
- "content": "Thank u kind gentleman who told me I had kale in my teeth. Was eating kale chips with Quincy Jones. Went straight to @LairdLife party.",
- "contenttype": "text/plain",
- "created": 1445569296000,
- "id": "657391393883619328",
- "language": "en"
- },
- {
- "content": "Hello west coast twitterati.. See you at 8 for #Belief",
- "contenttype": "text/plain",
- "created": 1445566144000,
- "id": "657378171872874496",
- "language": "en"
- },
- {
- "content": "Thank you all for another beautiful night.#Belief",
- "contenttype": "text/plain",
- "created": 1445475948000,
- "id": "656999861254918145",
- "language": "en"
- },
- {
- "content": "RT @PRanganathan: \"Transformation is the rule of the game. The universe is not standing still.\" - Marcelo @OWNTV @Oprah #Belief",
- "contenttype": "text/plain",
- "created": 1445475602000,
- "id": "656998409933451264",
- "language": "en"
- },
- {
- "content": "\"The Universe is not standing still.. The whole Universe is expanding\" I love the dance between science and spirituality! #Belief",
- "contenttype": "text/plain",
- "created": 1445475580000,
- "id": "656998320133398528",
- "language": "en"
- },
- {
- "content": "\"Without our prayers and our songs we won't be here\" Apache leader.#Belief",
- "contenttype": "text/plain",
- "created": 1445473768000,
- "id": "656990717504393216",
- "language": "en"
- },
- {
- "content": "Notice her mother crying. She knows its last tine she will ever see her daughter.#Belief",
- "contenttype": "text/plain",
- "created": 1445473150000,
- "id": "656988127433637888",
- "language": "en"
- },
- {
- "content": "This final trial is unbelievable. Every hair gets pulled from her head one at a time. Now that is Something!!#Belief",
- "contenttype": "text/plain",
- "created": 1445473063000,
- "id": "656987763644891136",
- "language": "en"
- },
- {
- "content": "\"What my faith gives me no one can match\"#Belief",
- "contenttype": "text/plain",
- "created": 1445472961000,
- "id": "656987336266223616",
- "language": "en"
- },
- {
- "content": "It's a devotion to faith beyond anything Ive seen. Fascinating.Jain nuns. #Belief",
- "contenttype": "text/plain",
- "created": 1445472531000,
- "id": "656985529951522816",
- "language": "en"
- },
- {
- "content": "I'd never heard of Jain monks and nuns before doing this series. #Belief",
- "contenttype": "text/plain",
- "created": 1445472393000,
- "id": "656984953037586433",
- "language": "en"
- },
- {
- "content": "Good evening Team #Belief the Tweet is on!",
- "contenttype": "text/plain",
- "created": 1445472098000,
- "id": "656983714883239937",
- "language": "en"
- },
- {
- "content": "Thanks everyone for another Epic #Belief night!",
- "contenttype": "text/plain",
- "created": 1445302792000,
- "id": "656273592485810176",
- "language": "en"
- }
- ]
-}
diff --git a/resources/personality-v3.txt b/resources/personality-v3.txt
deleted file mode 100644
index b11508a4e..000000000
--- a/resources/personality-v3.txt
+++ /dev/null
@@ -1,137 +0,0 @@
-Vice President Johnson, Mr. Speaker, Mr. Chief Justice, President Eisenhower,
-Vice President Nixon, President Truman, Reverend Clergy, fellow citizens:
-
-We observe today not a victory of party but a celebration of freedom --
-symbolizing an end as well as a beginning -- signifying renewal as well as
-change. For I have sworn before you and Almighty God the same solemn oath our
-forbears prescribed nearly a century and three-quarters ago.
-
-The world is very different now. For man holds in his mortal hands the power
-to abolish all forms of human poverty and all forms of human life. And yet
-the same revolutionary beliefs for which our forebears fought are still at
-issue around the globe -- the belief that the rights of man come not from the
-generosity of the state but from the hand of God.
-
-We dare not forget today that we are the heirs of that first revolution. Let
-the word go forth from this time and place, to friend and foe alike, that the
-torch has been passed to a new generation of Americans -- born in this century,
-tempered by war, disciplined by a hard and bitter peace, proud of our ancient
-heritage -- and unwilling to witness or permit the slow undoing of those human
-rights to which this nation has always been committed, and to which we are
-committed today at home and around the world.
-
-Let every nation know, whether it wishes us well or ill, that we shall pay
-any price, bear any burden, meet any hardship, support any friend, oppose
-any foe to assure the survival and the success of liberty.
-
-This much we pledge -- and more.
-
-To those old allies whose cultural and spiritual origins we share, we pledge
-the loyalty of faithful friends. United there is little we cannot do in a host
-of cooperative ventures. Divided there is little we can do -- for we dare not
-meet a powerful challenge at odds and split asunder.
-
-To those new states whom we welcome to the ranks of the free, we pledge our
-word that one form of colonial control shall not have passed away merely to
-be replaced by a far more iron tyranny. We shall not always expect to find
-them supporting our view. But we shall always hope to find them strongly
-supporting their own freedom -- and to remember that, in the past, those who
-foolishly sought power by riding the back of the tiger ended up inside.
-
-To those people in the huts and villages of half the globe struggling to
-break the bonds of mass misery, we pledge our best efforts to help them help
-themselves, for whatever period is required -- not because the communists may
-be doing it, not because we seek their votes, but because it is right. If a
-free society cannot help the many who are poor, it cannot save the few who
-are rich.
-
-To our sister republics south of our border, we offer a special pledge -- to
-convert our good words into good deeds -- in a new alliance for progress --
-to assist free men and free governments in casting off the chains of poverty.
-But this peaceful revolution of hope cannot become the prey of hostile powers.
-Let all our neighbors know that we shall join with them to oppose aggression
-or subversion anywhere in the Americas. And let every other power know that
-this Hemisphere intends to remain the master of its own house.
-
-To that world assembly of sovereign states, the United Nations, our last best
-hope in an age where the instruments of war have far outpaced the instruments
-of peace, we renew our pledge of support -- to prevent it from becoming merely
-a forum for invective -- to strengthen its shield of the new and the weak --
-and to enlarge the area in which its writ may run.
-
-Finally, to those nations who would make themselves our adversary, we offer
-not a pledge but a request: that both sides begin anew the quest for peace,
-before the dark powers of destruction unleashed by science engulf all humanity
-in planned or accidental self-destruction.
-
-We dare not tempt them with weakness. For only when our arms are sufficient
-beyond doubt can we be certain beyond doubt that they will never be employed.
-
-But neither can two great and powerful groups of nations take comfort from
-our present course -- both sides overburdened by the cost of modern weapons,
-both rightly alarmed by the steady spread of the deadly atom, yet both racing
-to alter that uncertain balance of terror that stays the hand of mankind's
-final war.
-
-So let us begin anew -- remembering on both sides that civility is not a sign
-of weakness, and sincerity is always subject to proof. Let us never negotiate
-out of fear. But let us never fear to negotiate.
-
-Let both sides explore what problems unite us instead of belaboring those
-problems which divide us.
-
-Let both sides, for the first time, formulate serious and precise proposals
-for the inspection and control of arms -- and bring the absolute power to
-destroy other nations under the absolute control of all nations.
-
-Let both sides seek to invoke the wonders of science instead of its terrors.
-Together let us explore the stars, conquer the deserts, eradicate disease,
-tap the ocean depths and encourage the arts and commerce.
-
-Let both sides unite to heed in all corners of the earth the command of
-Isaiah -- to "undo the heavy burdens ... (and) let the oppressed go free."
-
-And if a beachhead of cooperation may push back the jungle of suspicion, let
-both sides join in creating a new endeavor, not a new balance of power, but
-a new world of law, where the strong are just and the weak secure and the
-peace preserved.
-
-All this will not be finished in the first one hundred days. Nor will it be
-finished in the first one thousand days, nor in the life of this
-Administration, nor even perhaps in our lifetime on this planet. But let us
-begin.
-
-In your hands, my fellow citizens, more than mine, will rest the final success
-or failure of our course. Since this country was founded, each generation of
-Americans has been summoned to give testimony to its national loyalty. The
-graves of young Americans who answered the call to service surround the globe.
-
-Now the trumpet summons us again -- not as a call to bear arms, though arms we
-need -- not as a call to battle, though embattled we are -- but a call to bear
-the burden of a long twilight struggle, year in and year out, "rejoicing in
-hope, patient in tribulation" -- a struggle against the common enemies of man:
-tyranny, poverty, disease and war itself.
-
-Can we forge against these enemies a grand and global alliance, North and
-South, East and West, that can assure a more fruitful life for all mankind?
-Will you join in that historic effort?
-
-In the long history of the world, only a few generations have been granted
-the role of defending freedom in its hour of maximum danger. I do not shrink
-from this responsibility -- I welcome it. I do not believe that any of us
-would exchange places with any other people or any other generation. The
-energy, the faith, the devotion which we bring to this endeavor will light
-our country and all who serve it -- and the glow from that fire can truly
-light the world.
-
-And so, my fellow Americans: ask not what your country can do for you -- ask
-what you can do for your country.
-
-My fellow citizens of the world: ask not what America will do for you, but
-what together we can do for the freedom of man.
-
-Finally, whether you are citizens of America or citizens of the world, ask of
-us here the same high standards of strength and sacrifice which we ask of you.
-With a good conscience our only sure reward, with history the final judge of
-our deeds, let us go forth to lead the land we love, asking His blessing and
-His help, but knowing that here on earth God's work must truly be our own.
diff --git a/resources/personality.es.txt b/resources/personality.es.txt
deleted file mode 100644
index 950fdb28e..000000000
--- a/resources/personality.es.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero, adarga antigua, rocín flaco y galgo corredor. Una olla de algo más vaca que carnero, salpicón las más noches, duelos y quebrantos los sábados, lantejas los viernes, algún palomino de añadidura los domingos, consumían las tres partes de su hacienda. El resto della concluían sayo de velarte, calzas de velludo para las fiestas, con sus pantuflos de lo mesmo, y los días de entresemana se honraba con su vellorí de lo más fino. Tenía en su casa una ama que pasaba de los cuarenta, y una sobrina que no llegaba a los veinte, y un mozo de campo y plaza, que así ensillaba el rocín como tomaba la podadera. Frisaba la edad de nuestro hidalgo con los cincuenta años; era de complexión recia, seco de carnes, enjuto de rostro, gran madrugador y amigo de la caza. Quieren decir que tenía el sobrenombre de Quijada, o Quesada, que en esto hay alguna diferencia en los autores que deste caso escriben; aunque, por conjeturas verosímiles, se deja entender que se llamaba Quejana. Pero esto importa poco a nuestro cuento; basta que en la narración dél no se salga un punto de la verdad.
-Es, pues, de saber que este sobredicho hidalgo, los ratos que estaba ocioso, que eran los más del año, se daba a leer libros de caballerías, con tanta afición y gusto, que olvidó casi de todo punto el ejercicio de la caza, y aun la administración de su hacienda. Y llegó a tanto su curiosidad y desatino en esto, que vendió muchas hanegas de tierra de sembradura para comprar libros de caballerías en que leer, y así, llevó a su casa todos cuantos pudo haber dellos; y de todos, ningunos le parecían tan bien como los que compuso el famoso Feliciano de Silva, porque la claridad de su prosa y aquellas entricadas razones suyas le parecían de perlas, y más cuando llegaba a leer aquellos requiebros y cartas de desafíos, donde en muchas partes hallaba escrito: La razón de la sinrazón que a mi razón se hace, de tal manera mi razón enflaquece, que con razón me quejo de la vuestra fermosura. Y también cuando leía: ...los altos cielos que de vuestra divinidad divinamente con las estrellas os fortifican, y os hacen merecedora del merecimiento que merece la vuestra grandeza.
-Con estas razones perdía el pobre caballero el juicio, y desvelábase por entenderlas y desentrañarles el sentido, que no se lo sacara ni las entendiera el mesmo Aristóteles, si resucitara para sólo ello. No estaba muy bien con las heridas que don Belianís daba y recebía, porque se imaginaba que, por grandes maestros que le hubiesen curado, no dejaría de tener el rostro y todo el cuerpo lleno de cicatrices y señales. Pero, con todo, alababa en su autor aquel acabar su libro con la promesa de aquella inacabable aventura, y muchas veces le vino deseo de tomar la pluma y dalle fin al pie de la letra, como allí se promete; y sin duda alguna lo hiciera, y aun saliera con ello, si otros mayores y continuos pensamientos no se lo estorbaran. Tuvo muchas veces competencia con el cura de su lugar —que era hombre docto, graduado en Sigüenza—, sobre cuál había sido mejor caballero: Palmerín de Ingalaterra o Amadís de Gaula; mas maese Nicolás, barbero del mesmo pueblo, decía que ninguno llegaba al Caballero del Febo, y que si alguno se le podía comparar, era don Galaor, hermano de Amadís de Gaula, porque tenía muy acomodada condición para todo; que no era caballero melindroso, ni tan llorón como su hermano, y que en lo de la valentía no le iba en zaga.
-En resolución, él se enfrascó tanto en su letura, que se le pasaban las noches leyendo de claro en claro, y los días de turbio en turbio; y así, del poco dormir y del mucho leer, se le secó el celebro, de manera que vino a perder el juicio. Llenósele la fantasía de todo aquello que leía en los libros, así de encantamentos como de pendencias, batallas, desafíos, heridas, requiebros, amores, tormentas y disparates imposibles; y asentósele de tal modo en la imaginación que era verdad toda aquella máquina de aquellas sonadas soñadas invenciones que leía, que para él no había otra historia más cierta en el mundo. Decía él que el Cid Ruy Díaz había sido muy buen caballero, pero que no tenía que ver con el Caballero de la Ardiente Espada, que de sólo un revés había partido por medio dos fieros y descomunales gigantes. Mejor estaba con Bernardo del Carpio, porque en Roncesvalles había muerto a Roldán el encantado, valiéndose de la industria de Hércules, cuando ahogó a Anteo, el hijo de la Tierra, entre los brazos. Decía mucho bien del gigante Morgante, porque, con ser de aquella generación gigantea, que todos son soberbios y descomedidos, él solo era afable y bien criado. Pero, sobre todos, estaba bien con Reinaldos de Montalbán, y más cuando le veía salir de su castillo y robar cuantos topaba, y cuando en allende robó aquel ídolo de Mahoma que era todo de oro, según dice su historia. Diera él, por dar una mano de coces al traidor de Galalón, al ama que tenía, y aun a su sobrina de añadidura.
-En efeto, rematado ya su juicio, vino a dar en el más estraño pensamiento que jamás dio loco en el mundo; y fue que le pareció convenible y necesario, así para el aumento de su honra como para el servicio de su república, hacerse caballero andante, y irse por todo el mundo con sus armas y caballo a buscar las aventuras y a ejercitarse en todo aquello que él había leído que los caballeros andantes se ejercitaban, deshaciendo todo género de agravio, y poniéndose en ocasiones y peligros donde, acabándolos, cobrase eterno nombre y fama. Imaginábase el pobre ya coronado por el valor de su brazo, por lo menos, del imperio de Trapisonda; y así, con estos tan agradables pensamientos, llevado del estraño gusto que en ellos sentía, se dio priesa a poner en efeto lo que deseaba.
-Y lo primero que hizo fue limpiar unas armas que habían sido de sus bisabuelos, que, tomadas de orín y llenas de moho, luengos siglos había que estaban puestas y olvidadas en un rincón. Limpiólas y aderezólas lo mejor que pudo, pero vio que tenían una gran falta, y era que no tenían celada de encaje, sino morrión simple; mas a esto suplió su industria, porque de cartones hizo un modo de media celada, que, encajada con el morrión, hacían una apariencia de celada entera. Es verdad que para probar si era fuerte y podía estar al riesgo de una cuchillada, sacó su espada y le dio dos golpes, y con el primero y en un punto deshizo lo que había hecho en una semana; y no dejó de parecerle mal la facilidad con que la había hecho pedazos, y, por asegurarse deste peligro, la tornó a hacer de nuevo, poniéndole unas barras de hierro por de dentro, de tal manera que él quedó satisfecho de su fortaleza; y, sin querer hacer nueva experiencia della, la diputó y tuvo por celada finísima de encaje.
-Fue luego a ver su rocín, y, aunque tenía más cuartos que un real y más tachas que el caballo de Gonela, que tantum pellis et ossa fuit, le pareció que ni el Bucéfalo de Alejandro ni Babieca el del Cid con él se igualaban. Cuatro días se le pasaron en imaginar qué nombre le pondría; porque, según se decía él a sí mesmo, no era razón que caballo de caballero tan famoso, y tan bueno él por sí, estuviese sin nombre conocido; y ansí, procuraba acomodársele de manera que declarase quién había sido, antes que fuese de caballero andante, y lo que era entonces; pues estaba muy puesto en razón que, mudando su señor estado, mudase él también el nombre, y le cobrase famoso y de estruendo, como convenía a la nueva orden y al nuevo ejercicio que ya profesaba. Y así, después de muchos nombres que formó, borró y quitó, añadió, deshizo y tornó a hacer en su memoria e imaginación, al fin le vino a llamar Rocinante: nombre, a su parecer, alto, sonoro y significativo de lo que había sido cuando fue rocín, antes de lo que ahora era, que era antes y primero de todos los rocines del mundo.
-Puesto nombre, y tan a su gusto, a su caballo, quiso ponérsele a sí mismo, y en este pensamiento duró otros ocho días, y al cabo se vino a llamar don Quijote; de donde —como queda dicho— tomaron ocasión los autores desta tan verdadera historia que, sin duda, se debía de llamar Quijada, y no Quesada, como otros quisieron decir. Pero, acordándose que el valeroso Amadís no sólo se había contentado con llamarse Amadís a secas, sino que añadió el nombre de su reino y patria, por Hepila famosa, y se llamó Amadís de Gaula, así quiso, como buen caballero, añadir al suyo el nombre de la suya y llamarse don Quijote de la Mancha, con que, a su parecer, declaraba muy al vivo su linaje y patria, y la honraba con tomar el sobrenombre della.
-Limpias, pues, sus armas, hecho del morrión celada, puesto nombre a su rocín y confirmándose a sí mismo, se dio a entender que no le faltaba otra cosa sino buscar una dama de quien enamorarse; porque el caballero andante sin amores era árbol sin hojas y sin fruto y cuerpo sin alma. Decíase él a sí:
-— Si yo, por malos de mis pecados, o por mi buena suerte, me encuentro por ahí con algún gigante, como de ordinario les acontece a los caballeros andantes, y le derribo de un encuentro, o le parto por mitad del cuerpo, o, finalmente, le venzo y le rindo, ¿no será bien tener a quien enviarle presentado y que entre y se hinque de rodillas ante mi dulce señora, y diga con voz humilde y rendido: ''Yo, señora, soy el gigante Caraculiambro, señor de la ínsula Malindrania, a quien venció en singular batalla el jamás como se debe alabado caballero don Quijote de la Mancha, el cual me mandó que me presentase ante vuestra merced, para que la vuestra grandeza disponga de mí a su talante''?
-¡Oh, cómo se holgó nuestro buen caballero cuando hubo hecho este discurso, y más cuando halló a quien dar nombre de su dama! Y fue, a lo que se cree, que en un lugar cerca del suyo había una moza labradora de muy buen parecer, de quien él un tiempo anduvo enamorado, aunque, según se entiende, ella jamás lo supo, ni le dio cata dello. Llamábase Aldonza Lorenzo, y a ésta le pareció ser bien darle título de señora de sus pensamientos; y, buscándole nombre que no desdijese mucho del suyo, y que tirase y se encaminase al de princesa y gran señora, vino a llamarla Dulcinea del Toboso, porque era natural del Toboso; nombre, a su parecer, músico y peregrino y significativo, como todos los demás que a él y a sus cosas había puesto.
-
-
diff --git a/resources/personality.txt b/resources/personality.txt
deleted file mode 100644
index 9bdd68266..000000000
--- a/resources/personality.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Call me Ishmael. Some years ago-never mind how long precisely-having little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world. It is a way I have of driving off the spleen and regulating the circulation. Whenever I find myself growing grim about the mouth; whenever it is a damp, drizzly November in my soul; whenever I find myself involuntarily pausing before coffin warehouses, and bringing up the rear of every funeral I meet; and especially whenever my hypos get such an upper hand of me, that it requires a strong moral principle to prevent me from deliberately stepping into the street, and methodically knocking people's hats off-then, I account it high time to get to sea as soon as I can. This is my substitute for pistol and ball. With a philosophical flourish Cato throws himself upon his sword; I quietly take to the ship. There is nothing surprising in this. If they but knew it, almost all men in their degree, some time or other, cherish very nearly the same feelings towards the ocean with me.
-There now is your insular city of the Manhattoes, belted round by wharves as Indian isles by coral reefs-commerce surrounds it with her surf. Right and left, the streets take you waterward. Its extreme downtown is the battery, where that noble mole is washed by waves, and cooled by breezes, which a few hours previous were out of sight of land. Look at the crowds of water-gazers there.
-Circumambulate the city of a dreamy Sabbath afternoon. Go from Corlears Hook to Coenties Slip, and from thence, by Whitehall, northward. What do you see?-Posted like silent sentinels all around the town, stand thousands upon thousands of mortal men fixed in ocean reveries. Some leaning against the spiles; some seated upon the pier-heads; some looking over the bulwarks of ships from China; some high aloft in the rigging, as if striving to get a still better seaward peep. But these are all landsmen; of week days pent up in lath and plaster-tied to counters, nailed to benches, clinched to desks. How then is this? Are the green fields gone? What do they here?
-But look! here come more crowds, pacing straight for the water, and seemingly bound for a dive. Strange! Nothing will content them but the extremest limit of the land; loitering under the shady lee of yonder warehouses will not suffice. No. They must get just as nigh the water as they possibly can without falling in. And there they stand-miles of them-leagues. Inlanders all, they come from lanes and alleys, streets and avenues-north, east, south, and west. Yet here they all unite. Tell me, does the magnetic virtue of the needles of the compasses of all those ships attract them thither?
-Once more. Say you are in the country; in some high land of lakes. Take almost any path you please, and ten to one it carries you down in a dale, and leaves you there by a pool in the stream. There is magic in it. Let the most absent-minded of men be plunged in his deepest reveries-stand that man on his legs, set his feet a-going, and he will infallibly lead you to water, if water there be in all that region. Should you ever be athirst in the great American desert, try this experiment, if your caravan happen to be supplied with a metaphysical professor. Yes, as every one knows, meditation and water are wedded for ever.
-But here is an artist. He desires to paint you the dreamiest, shadiest, quietest, most enchanting bit of romantic landscape in all the valley of the Saco. What is the chief element he employs? There stand his trees, each with a hollow trunk, as if a hermit and a crucifix were within; and here sleeps his meadow, and there sleep his cattle; and up from yonder cottage goes a sleepy smoke. Deep into distant woodlands winds a mazy way, reaching to overlapping spurs of mountains bathed in their hill-side blue. But though the picture lies thus tranced, and though this pine-tree shakes down its sighs like leaves upon this shepherd's head, yet all were vain, unless the shepherd's eye were fixed upon the magic stream before him. Go visit the Prairies in June, when for scores on scores of miles you wade knee-deep among Tiger-lilies-what is the one charm wanting?-Water-there is not a drop of water there! Were Niagara but a cataract of sand, would you travel your thousand miles to see it? Why did the poor poet of Tennessee, upon suddenly receiving two handfuls of silver, deliberate whether to buy him a coat, which he sadly needed, or invest his money in a pedestrian trip to Rockaway Beach? Why is almost every robust healthy boy with a robust healthy soul in him, at some time or other crazy to go to sea? Why upon your first voyage as a passenger, did you yourself feel such a mystical vibration, when first told that you and your ship were now out of sight of land? Why did the old Persians hold the sea holy? Why did the Greeks give it a separate deity, and own brother of Jove? Surely all this is not without meaning. And still deeper the meaning of that story of Narcissus, who because he could not grasp the tormenting, mild image he saw in the fountain, plunged into it and was drowned. But that same image, we ourselves see in all rivers and oceans. It is the image of the ungraspable phantom of life; and this is the key to it all.
-Now, when I say that I am in the habit of going to sea whenever I begin to grow hazy about the eyes, and begin to be over conscious of my lungs, I do not mean to have it inferred that I ever go to sea as a passenger. For to go as a passenger you must needs have a purse, and a purse is but a rag unless you have something in it. Besides, passengers get sea-sick-grow quarrelsome-don't sleep of nights-do not enjoy themselves much, as a general thing;-no, I never go as a passenger; nor, though I am something of a salt, do I ever go to sea as a Commodore, or a Captain, or a Cook. I abandon the glory and distinction of such offices to those who like them. For my part, I abominate all honourable respectable toils, trials, and tribulations of every kind whatsoever. It is quite as much as I can do to take care of myself, without taking care of ships, barques, brigs, schooners, and what not. And as for going as cook,-though I confess there is considerable glory in that, a cook being a sort of officer on ship-board-yet, somehow, I never fancied broiling fowls;-though once broiled, judiciously buttered, and judgmatically salted and peppered, there is no one who will speak more respectfully, not to say reverentially, of a broiled fowl than I will. It is out of the idolatrous dotings of the old Egyptians upon broiled ibis and roasted river horse, that you see the mummies of those creatures in their huge bake-houses the pyramids.
-No, when I go to sea, I go as a simple sailor, right before the mast, plumb down into the forecastle, aloft there to the royal mast-head. True, they rather order me about some, and make me jump from spar to spar, like a grasshopper in a May meadow. And at first, this sort of thing is unpleasant enough. It touches one's sense of honour, particularly if you come of an old established family in the land, the Van Rensselaers, or Randolphs, or Hardicanutes. And more than all, if just previous to putting your hand into the tar-pot, you have been lording it as a country schoolmaster, making the tallest boys stand in awe of you. The transition is a keen one, I assure you, from a schoolmaster to a sailor, and requires a strong decoction of Seneca and the Stoics to enable you to grin and bear it. But even this wears off in time.
-What of it, if some old hunks of a sea-captain orders me to get a broom and sweep down the decks? What does that indignity amount to, weighed, I mean, in the scales of the New Testament? Do you think the archangel Gabriel thinks anything the less of me, because I promptly and respectfully obey that old hunks in that particular instance? Who ain't a slave? Tell me that. Well, then, however the old sea-captains may order me about-however they may thump and punch me about, I have the satisfaction of knowing that it is all right; that everybody else is one way or other served in much the same way-either in a physical or metaphysical point of view, that is; and so the universal thump is passed round, and all hands should rub each other's shoulder-blades, and be content.
-Again, I always go to sea as a sailor, because they make a point of paying me for my trouble, whereas they never pay passengers a single penny that I ever heard of. On the contrary, passengers themselves must pay. And there is all the difference in the world between paying and being paid. The act of paying is perhaps the most uncomfortable infliction that the two orchard thieves entailed upon us. But BEING PAID,-what will compare with it? The urbane activity with which a man receives money is really marvellous, considering that we so earnestly believe money to be the root of all earthly ills, and that on no account can a monied man enter heaven. Ah! how cheerfully we consign ourselves to perdition!
-Finally, I always go to sea as a sailor, because of the wholesome exercise and pure air of the fore-castle deck. For as in this world, head winds are far more prevalent than winds from astern (that is, if you never violate the Pythagorean maxim), so for the most part the Commodore on the quarter-deck gets his atmosphere at second hand from the sailors on the forecastle. He thinks he breathes it first; but not so. In much the same way do the commonalty lead their leaders in many other things, at the same time that the leaders little suspect it. But wherefore it was that after having repeatedly smelt the sea as a merchant sailor, I should now take it into my head to go on a whaling voyage; this the invisible police officer of the Fates, who has the constant surveillance of me, and secretly dogs me, and influences me in some unaccountable way-he can better answer than any one else. And, doubtless, my going on this whaling voyage, formed part of the grand programme of Providence that was drawn up a long time ago. It came in as a sort of brief interlude and solo between more extensive performances. I take it that this part of the bill must have run something like this:
-"GRAND CONTESTED ELECTION FOR THE PRESIDENCY OF THE UNITED STATES. "WHALING VOYAGE BY ONE ISHMAEL. "BLOODY BATTLE IN AFFGHANISTAN."
-Though I cannot tell why it was exactly that those stage managers, the Fates, put me down for this shabby part of a whaling voyage, when others were set down for magnificent parts in high tragedies, and short and easy parts in genteel comedies, and jolly parts in farces-though I cannot tell why this was exactly; yet, now that I recall all the circumstances, I think I can see a little into the springs and motives which being cunningly presented to me under various disguises, induced me to set about performing the part I did, besides cajoling me into the delusion that it was a choice resulting from my own unbiased freewill and discriminating judgment.
-Chief among these motives was the overwhelming idea of the great whale himself. Such a portentous and mysterious monster roused all my curiosity. Then the wild and distant seas where he rolled his island bulk; the undeliverable, nameless perils of the whale; these, with all the attending marvels of a thousand Patagonian sights and sounds, helped to sway me to my wish. With other men, perhaps, such things would not have been inducements; but as for me, I am tormented with an everlasting itch for things remote. I love to sail forbidden seas, and land on barbarous coasts. Not ignoring what is good, I am quick to perceive a horror, and could still be social with it-would they let me-since it is but well to be on friendly terms with all the inmates of the place one lodges in.
-By reason of these things, then, the whaling voyage was welcome; the great flood-gates of the wonder-world swung open, and in the wild conceits that swayed me to my purpose, two and two there floated into my inmost soul, endless processions of the whale, and, mid most of them all, one grand hooded phantom, like a snow hill in the air.
diff --git a/resources/speech_with_pause.wav b/resources/speech_with_pause.wav
new file mode 100644
index 000000000..783426cb5
Binary files /dev/null and b/resources/speech_with_pause.wav differ
diff --git a/resources/table_test.png b/resources/table_test.png
new file mode 100644
index 000000000..e709df6a1
Binary files /dev/null and b/resources/table_test.png differ
diff --git a/resources/tone-example-html.json b/resources/tone-example-html.json
deleted file mode 100755
index e663b6cfb..000000000
--- a/resources/tone-example-html.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "text": "Team, I know that times are tough!
Product sales have been disappointing for the past three quarters.
We have a competitive product, but we need to do a better job of selling it!
"
-}
diff --git a/resources/tone-example.json b/resources/tone-example.json
deleted file mode 100755
index c3cc7f90c..000000000
--- a/resources/tone-example.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "text": "Team, I know that times are tough! Product sales have been disappointing for the past three quarters. We have a competitive product, but we need to do a better job of selling it!"
-}
diff --git a/resources/tone-v3-expect1.json b/resources/tone-v3-expect1.json
deleted file mode 100644
index e41cf8529..000000000
--- a/resources/tone-v3-expect1.json
+++ /dev/null
@@ -1,8680 +0,0 @@
-{
- "document_tone": {
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.971214,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.546126,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.543228,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.072227,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.057439,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.53,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.003,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.55,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.241,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.513,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.467,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.749,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- "sentences_tone": [
- {
- "sentence_id": 0,
- "text": "Call me Ishmael.",
- "input_from": 0,
- "input_to": 16,
- "tone_categories": []
- },
- {
- "sentence_id": 1,
- "text": "Some years ago-never mind how long precisely-having little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world.",
- "input_from": 17,
- "input_to": 224,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.170393,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.350151,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.201739,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.114688,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.469036,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.114,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.728,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.406,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.166,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.284,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.375,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.92,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 2,
- "text": "It is a way I have of driving off the spleen and regulating the circulation.",
- "input_from": 225,
- "input_to": 301,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.335625,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.263686,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.429728,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.20467,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.139387,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.628,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.755,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.253,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.461,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.312,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 3,
- "text": "Whenever I find myself growing grim about the mouth; whenever it is a damp, drizzly November in my soul; whenever I find myself involuntarily pausing before coffin warehouses, and bringing up the rear of every funeral I meet; and especially whenever my hypos get such an upper hand of me, that it requires a strong moral principle to prevent me from deliberately stepping into the street, and methodically knocking people's hats off-then, I account it high time to get to sea as soon as I can.",
- "input_from": 302,
- "input_to": 795,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.53187,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.50254,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.36085,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.037935,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.158363,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.203,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.008,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.318,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.7,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.444,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.51,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.81,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 4,
- "text": "This is my substitute for pistol and ball.",
- "input_from": 796,
- "input_to": 838,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.175965,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.290521,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.215051,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.302646,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.259432,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.569,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.571,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.446,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.56,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.81,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 5,
- "text": "With a philosophical flourish Cato throws himself upon his sword; I quietly take to the ship.",
- "input_from": 839,
- "input_to": 932,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.183406,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.518299,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.150604,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.168203,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.307349,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.346,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.888,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.706,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.795,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.107,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 6,
- "text": "There is nothing surprising in this.",
- "input_from": 933,
- "input_to": 969,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.202684,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.331177,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.335063,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.249111,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.433038,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.35,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.044,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.795,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.935,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.739,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 7,
- "text": "If they but knew it, almost all men in their degree, some time or other, cherish very nearly the same feelings towards the ocean with me.",
- "input_from": 970,
- "input_to": 1107,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.263035,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.203018,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.108853,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.135628,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.430709,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.605,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.176,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.315,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.041,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.721,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.707,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.953,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 8,
- "text": "There now is your insular city of the Manhattoes, belted round by wharves as Indian isles by coral reefs-commerce surrounds it with her surf.",
- "input_from": 1108,
- "input_to": 1249,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.208645,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.505883,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.139235,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.123256,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.209934,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.591,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.655,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.587,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.5,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.115,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 9,
- "text": "Right and left, the streets take you waterward.",
- "input_from": 1250,
- "input_to": 1297,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.296232,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.248731,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.249263,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.315715,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.234019,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.249,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.813,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.832,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.817,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.017,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 10,
- "text": "Its extreme downtown is the battery, where that noble mole is washed by waves, and cooled by breezes, which a few hours previous were out of sight of land.",
- "input_from": 1298,
- "input_to": 1453,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.373581,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.556262,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.197002,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.108432,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.158906,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.778,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.484,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.311,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.301,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.261,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 11,
- "text": "Look at the crowds of water-gazers there.",
- "input_from": 1454,
- "input_to": 1495,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.098702,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.639292,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.2851,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.124082,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.294147,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.929,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.224,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.337,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.221,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.192,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 12,
- "text": "Circumambulate the city of a dreamy Sabbath afternoon.",
- "input_from": 1496,
- "input_to": 1550,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.169689,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.206569,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.181326,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.247856,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.395501,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.975,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.932,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.388,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.137,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.18,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 13,
- "text": "Go from Corlears Hook to Coenties Slip, and from thence, by Whitehall, northward.",
- "input_from": 1551,
- "input_to": 1632,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.207906,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.371378,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.280693,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.102245,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.416521,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.93,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.571,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.265,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.234,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.305,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 14,
- "text": "What do you see?-Posted like silent sentinels all around the town, stand thousands upon thousands of mortal men fixed in ocean reveries.",
- "input_from": 1633,
- "input_to": 1769,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.262753,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.696676,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.194555,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.15851,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.270896,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.082,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.351,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.201,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.722,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.628,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.347,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 15,
- "text": "Some leaning against the spiles; some seated upon the pier-heads; some looking over the bulwarks of ships from China; some high aloft in the rigging, as if striving to get a still better seaward peep.",
- "input_from": 1770,
- "input_to": 1970,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.382868,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.489318,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.205163,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.118944,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.425947,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.135,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.767,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.954,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.691,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.157,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.226,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.243,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 16,
- "text": "But these are all landsmen; of week days pent up in lath and plaster-tied to counters, nailed to benches, clinched to desks.",
- "input_from": 1971,
- "input_to": 2095,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.109781,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.348402,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.100454,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.439683,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.396121,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.493,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.871,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.405,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.274,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.258,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.671,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 17,
- "text": "How then is this?",
- "input_from": 2096,
- "input_to": 2113,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.289338,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.487263,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.184789,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.060132,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.370277,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.847,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.31,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.132,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.157,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.335,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.954,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 18,
- "text": "Are the green fields gone?",
- "input_from": 2114,
- "input_to": 2140,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.150856,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.364911,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.294397,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.153937,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.284773,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.418,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.897,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.157,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.342,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.102,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 19,
- "text": "What do they here?",
- "input_from": 2141,
- "input_to": 2159,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.298403,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.484869,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.244632,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.119957,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.282312,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.06,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.19,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.93,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.985,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.261,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 20,
- "text": "But look! here come more crowds, pacing straight for the water, and seemingly bound for a dive.",
- "input_from": 2160,
- "input_to": 2255,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.081729,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.366571,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.179309,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.336148,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.33228,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.459,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.38,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.891,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.286,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.371,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.222,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.528,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 21,
- "text": "Strange!",
- "input_from": 2256,
- "input_to": 2264,
- "tone_categories": []
- },
- {
- "sentence_id": 22,
- "text": "Nothing will content them but the extremest limit of the land; loitering under the shady lee of yonder warehouses will not suffice.",
- "input_from": 2265,
- "input_to": 2396,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.214428,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.400577,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.44209,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.079106,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.253806,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.721,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.202,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.274,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.214,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.671,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 23,
- "text": "No.",
- "input_from": 2397,
- "input_to": 2400,
- "tone_categories": []
- },
- {
- "sentence_id": 24,
- "text": "They must get just as nigh the water as they possibly can without falling in.",
- "input_from": 2401,
- "input_to": 2478,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.149916,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.438289,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.309294,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.103366,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.428773,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.451,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.556,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.076,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.546,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.511,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.826,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 25,
- "text": "And there they stand-miles of them-leagues.",
- "input_from": 2479,
- "input_to": 2522,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.195838,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.609443,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.237532,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.218651,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.238227,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.498,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.201,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.812,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.902,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.23,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 26,
- "text": "Inlanders all, they come from lanes and alleys, streets and avenues-north, east, south, and west.",
- "input_from": 2523,
- "input_to": 2620,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.20581,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.242848,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.156057,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.317132,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.284507,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.72,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.566,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.565,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.72,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.818,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.111,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 27,
- "text": "Yet here they all unite.",
- "input_from": 2621,
- "input_to": 2645,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.221384,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.169253,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.151174,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.250741,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.430384,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.987,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.277,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.267,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.932,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.954,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.287,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 28,
- "text": "Tell me, does the magnetic virtue of the needles of the compasses of all those ships attract them thither?",
- "input_from": 2646,
- "input_to": 2752,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.225617,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.571378,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.290246,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.164151,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.183171,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.597,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.465,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.524,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.823,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.429,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.364,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 29,
- "text": "Once more.",
- "input_from": 2753,
- "input_to": 2763,
- "tone_categories": []
- },
- {
- "sentence_id": 30,
- "text": "Say you are in the country; in some high land of lakes.",
- "input_from": 2764,
- "input_to": 2819,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.141122,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.421809,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.361904,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.202674,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.359623,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.614,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.603,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.387,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.919,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.785,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.035,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 31,
- "text": "Take almost any path you please, and ten to one it carries you down in a dale, and leaves you there by a pool in the stream.",
- "input_from": 2820,
- "input_to": 2944,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.279927,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.343172,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.36336,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.149495,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.305648,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.733,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.412,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.649,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.748,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.876,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.03,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 32,
- "text": "There is magic in it.",
- "input_from": 2945,
- "input_to": 2966,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.151178,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.362646,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.405931,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.202287,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.323321,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.58,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.153,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.393,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.743,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.511,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 33,
- "text": "Let the most absent-minded of men be plunged in his deepest reveries-stand that man on his legs, set his feet a-going, and he will infallibly lead you to water, if water there be in all that region.",
- "input_from": 2967,
- "input_to": 3165,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.073153,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.722682,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.45649,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.065335,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.408581,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.114,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.255,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.459,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.229,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.9,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.784,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.309,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 34,
- "text": "Should you ever be athirst in the great American desert, try this experiment, if your caravan happen to be supplied with a metaphysical professor.",
- "input_from": 3166,
- "input_to": 3312,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.252295,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.52585,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.234371,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.112877,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.175748,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.275,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.199,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.661,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.392,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.591,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.412,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.389,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 35,
- "text": "Yes, as every one knows, meditation and water are wedded for ever.",
- "input_from": 3313,
- "input_to": 3379,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.174186,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.248523,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.148391,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.25751,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.475705,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.675,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.786,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.749,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.316,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.261,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.133,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.636,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 36,
- "text": "But here is an artist.",
- "input_from": 3380,
- "input_to": 3402,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.188722,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.138485,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.171406,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.293563,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.528097,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.615,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.03,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.393,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.552,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.844,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 37,
- "text": "He desires to paint you the dreamiest, shadiest, quietest, most enchanting bit of romantic landscape in all the valley of the Saco.",
- "input_from": 3403,
- "input_to": 3534,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.115039,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.136932,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.228761,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.323535,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.433443,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.493,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.735,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.761,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.804,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.545,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.136,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 38,
- "text": "What is the chief element he employs?",
- "input_from": 3535,
- "input_to": 3572,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.398249,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.351877,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.410105,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.088988,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.129349,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.372,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.519,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.351,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.423,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.278,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 39,
- "text": "There stand his trees, each with a hollow trunk, as if a hermit and a crucifix were within; and here sleeps his meadow, and there sleep his cattle; and up from yonder cottage goes a sleepy smoke.",
- "input_from": 3573,
- "input_to": 3768,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.265136,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.796105,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.075884,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.126968,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.210043,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.114,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.601,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.518,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.753,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.872,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.214,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 40,
- "text": "Deep into distant woodlands winds a mazy way, reaching to overlapping spurs of mountains bathed in their hill-side blue.",
- "input_from": 3769,
- "input_to": 3889,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.118054,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.375256,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.54878,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.12193,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.235122,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.773,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.711,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.6,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.578,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.185,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 41,
- "text": "But though the picture lies thus tranced, and though this pine-tree shakes down its sighs like leaves upon this shepherd's head, yet all were vain, unless the shepherd's eye were fixed upon the magic stream before him.",
- "input_from": 3890,
- "input_to": 4108,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.441053,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.262616,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.138243,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.023707,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.530394,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.273,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.11,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.43,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.166,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.771,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.595,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.739,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 42,
- "text": "Go visit the Prairies in June, when for scores on scores of miles you wade knee-deep among Tiger-lilies-what is the one charm wanting?-Water-there is not a drop of water there!",
- "input_from": 4109,
- "input_to": 4285,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.37904,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.175941,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.260338,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.402414,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.162226,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.528,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.558,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.567,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.67,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.148,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 43,
- "text": "Were Niagara but a cataract of sand, would you travel your thousand miles to see it?",
- "input_from": 4286,
- "input_to": 4370,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.033412,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.429261,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.434537,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.380345,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.262543,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.139,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.067,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.756,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.768,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.568,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 44,
- "text": "Why did the poor poet of Tennessee, upon suddenly receiving two handfuls of silver, deliberate whether to buy him a coat, which he sadly needed, or invest his money in a pedestrian trip to Rockaway Beach?",
- "input_from": 4371,
- "input_to": 4575,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.267462,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.61645,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.060459,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.083649,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.46912,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.019,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.065,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.641,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.446,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.433,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.277,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.39,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 45,
- "text": "Why is almost every robust healthy boy with a robust healthy soul in him, at some time or other crazy to go to sea?",
- "input_from": 4576,
- "input_to": 4691,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.222434,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.151632,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.146582,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.199168,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.371779,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.614,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.671,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.173,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.692,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.379,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.772,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 46,
- "text": "Why upon your first voyage as a passenger, did you yourself feel such a mystical vibration, when first told that you and your ship were now out of sight of land?",
- "input_from": 4692,
- "input_to": 4853,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.289226,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.403452,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.388116,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.148897,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.177373,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.099,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.175,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.446,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.872,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.875,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.031,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 47,
- "text": "Why did the old Persians hold the sea holy?",
- "input_from": 4854,
- "input_to": 4897,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.156871,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.440361,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.372559,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.076162,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.261716,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.531,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.286,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.138,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.128,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.879,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 48,
- "text": "Why did the Greeks give it a separate deity, and own brother of Jove?",
- "input_from": 4898,
- "input_to": 4967,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.372514,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.425748,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.326713,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.097709,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.306402,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.652,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.508,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.517,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.384,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.498,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 49,
- "text": "Surely all this is not without meaning.",
- "input_from": 4968,
- "input_to": 5007,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.237539,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.227237,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.376581,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.069574,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.540447,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.886,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.997,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.401,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.001,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.832,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.376,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.992,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 50,
- "text": "And still deeper the meaning of that story of Narcissus, who because he could not grasp the tormenting, mild image he saw in the fountain, plunged into it and was drowned.",
- "input_from": 5008,
- "input_to": 5179,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.079256,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.671545,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.535755,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.023619,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.420106,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.732,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.449,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.708,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.133,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.591,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.445,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.739,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 51,
- "text": "But that same image, we ourselves see in all rivers and oceans.",
- "input_from": 5180,
- "input_to": 5243,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.066081,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.227092,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.2573,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.269542,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.654919,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.786,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.128,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.016,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.935,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.954,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.961,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 52,
- "text": "It is the image of the ungraspable phantom of life; and this is the key to it all.",
- "input_from": 5244,
- "input_to": 5326,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.046707,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.140698,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.645008,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.165148,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.333413,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.6,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.956,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.789,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.367,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.193,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.238,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 53,
- "text": "Now, when I say that I am in the habit of going to sea whenever I begin to grow hazy about the eyes, and begin to be over conscious of my lungs, I do not mean to have it inferred that I ever go to sea as a passenger.",
- "input_from": 5327,
- "input_to": 5543,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.405999,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.206239,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.216264,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.299023,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.319107,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.275,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.196,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.281,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.405,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.476,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.52,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.853,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 54,
- "text": "For to go as a passenger you must needs have a purse, and a purse is but a rag unless you have something in it.",
- "input_from": 5544,
- "input_to": 5655,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.242366,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.313293,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.391356,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.202589,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.276341,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.053,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.442,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.16,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.627,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.465,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.598,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 55,
- "text": "Besides, passengers get sea-sick-grow quarrelsome-don't sleep of nights-do not enjoy themselves much, as a general thing;-no, I never go as a passenger; nor, though I am something of a salt, do I ever go to sea as a Commodore, or a Captain, or a Cook.",
- "input_from": 5656,
- "input_to": 5907,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.393608,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.510843,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.296177,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.071568,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.302687,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.066,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.772,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.436,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.229,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.349,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.313,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.826,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 56,
- "text": "I abandon the glory and distinction of such offices to those who like them.",
- "input_from": 5908,
- "input_to": 5983,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.179585,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.479747,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.424013,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.246049,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.1726,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.289,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.153,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.276,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.762,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.847,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.888,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 57,
- "text": "For my part, I abominate all honourable respectable toils, trials, and tribulations of every kind whatsoever.",
- "input_from": 5984,
- "input_to": 6093,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.357501,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.34783,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.29798,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.095727,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.332466,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.847,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.352,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.402,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.368,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.441,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.932,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 58,
- "text": "It is quite as much as I can do to take care of myself, without taking care of ships, barques, brigs, schooners, and what not.",
- "input_from": 6094,
- "input_to": 6220,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.311786,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.246754,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.205102,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.310913,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.413132,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.223,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.179,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.352,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.507,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.909,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 59,
- "text": "And as for going as cook,-though I confess there is considerable glory in that, a cook being a sort of officer on ship-board-yet, somehow, I never fancied broiling fowls;-though once broiled, judiciously buttered, and judgmatically salted and peppered, there is no one who will speak more respectfully, not to say reverentially, of a broiled fowl than I will.",
- "input_from": 6221,
- "input_to": 6580,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.366891,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.435328,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.256416,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.037071,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.473926,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.487,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.25,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.564,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.453,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.73,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 60,
- "text": "It is out of the idolatrous dotings of the old Egyptians upon broiled ibis and roasted river horse, that you see the mummies of those creatures in their huge bake-houses the pyramids.",
- "input_from": 6581,
- "input_to": 6764,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.068849,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.670484,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.565229,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.072999,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.194397,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.713,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.521,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.694,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.546,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.136,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 61,
- "text": "No, when I go to sea, I go as a simple sailor, right before the mast, plumb down into the forecastle, aloft there to the royal mast-head.",
- "input_from": 6765,
- "input_to": 6902,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.202455,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.368053,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.487951,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.116903,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.240054,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.63,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.761,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.23,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.425,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.493,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 62,
- "text": "True, they rather order me about some, and make me jump from spar to spar, like a grasshopper in a May meadow.",
- "input_from": 6903,
- "input_to": 7013,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.114608,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.180568,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.127396,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.552524,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.21591,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.913,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.392,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.35,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.573,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.659,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.814,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 63,
- "text": "And at first, this sort of thing is unpleasant enough.",
- "input_from": 7014,
- "input_to": 7068,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.172106,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.323635,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.405992,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.168903,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.287174,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.715,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.885,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.218,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.059,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.19,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.894,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 64,
- "text": "It touches one's sense of honour, particularly if you come of an old established family in the land, the Van Rensselaers, or Randolphs, or Hardicanutes.",
- "input_from": 7069,
- "input_to": 7221,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.245789,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.355261,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.290643,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.112017,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.277867,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.866,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.571,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.495,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.291,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.557,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.608,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.671,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 65,
- "text": "And more than all, if just previous to putting your hand into the tar-pot, you have been lording it as a country schoolmaster, making the tallest boys stand in awe of you.",
- "input_from": 7222,
- "input_to": 7393,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.107358,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.727051,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.242276,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.116783,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.176322,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.155,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.08,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.612,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.35,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.806,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.569,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.213,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 66,
- "text": "The transition is a keen one, I assure you, from a schoolmaster to a sailor, and requires a strong decoction of Seneca and the Stoics to enable you to grin and bear it.",
- "input_from": 7394,
- "input_to": 7562,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.381463,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.493127,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.524356,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.061184,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.233586,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.155,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.298,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.583,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.916,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.746,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.729,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.109,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 67,
- "text": "But even this wears off in time.",
- "input_from": 7563,
- "input_to": 7595,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.15969,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.501692,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.184826,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.114453,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.347395,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.841,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.603,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.01,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.297,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.847,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.97,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 68,
- "text": "What of it, if some old hunks of a sea-captain orders me to get a broom and sweep down the decks?",
- "input_from": 7596,
- "input_to": 7693,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.349233,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.150259,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.448867,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.14003,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.233611,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.364,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.284,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.791,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.498,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.153,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.275,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.685,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 69,
- "text": "What does that indignity amount to, weighed, I mean, in the scales of the New Testament?",
- "input_from": 7694,
- "input_to": 7782,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.114981,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.341251,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.232329,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.372385,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.282898,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.815,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.571,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.329,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.18,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.68,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 70,
- "text": "Do you think the archangel Gabriel thinks anything the less of me, because I promptly and respectfully obey that old hunks in that particular instance?",
- "input_from": 7783,
- "input_to": 7934,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.117679,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.425065,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.606104,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.040868,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.459945,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.821,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.196,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.326,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.266,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.39,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.412,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.846,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 71,
- "text": "Who ain't a slave?",
- "input_from": 7935,
- "input_to": 7953,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.243072,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.332116,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.450842,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.11269,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.202439,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.9,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.932,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.606,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.194,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.061,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 72,
- "text": "Tell me that.",
- "input_from": 7954,
- "input_to": 7967,
- "tone_categories": []
- },
- {
- "sentence_id": 73,
- "text": "Well, then, however the old sea-captains may order me about-however they may thump and punch me about, I have the satisfaction of knowing that it is all right; that everybody else is one way or other served in much the same way-either in a physical or metaphysical point of view, that is; and so the universal thump is passed round, and all hands should rub each other's shoulder-blades, and be content.",
- "input_from": 7968,
- "input_to": 8371,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.600225,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.188614,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.342122,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.051428,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.309914,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.296,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.005,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.566,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.261,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.455,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.372,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.792,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 74,
- "text": "Again, I always go to sea as a sailor, because they make a point of paying me for my trouble, whereas they never pay passengers a single penny that I ever heard of.",
- "input_from": 8372,
- "input_to": 8536,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.399134,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.426051,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.191353,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.133474,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.336895,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.69,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.659,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.195,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.23,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.499,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.371,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.895,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 75,
- "text": "On the contrary, passengers themselves must pay.",
- "input_from": 8537,
- "input_to": 8585,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.176823,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.441884,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.245443,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.135024,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.265692,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.984,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.723,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.242,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.433,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.501,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.655,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.155,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 76,
- "text": "And there is all the difference in the world between paying and being paid.",
- "input_from": 8586,
- "input_to": 8661,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.20078,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.215978,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.097787,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.302586,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.483807,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.723,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.967,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.475,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.14,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.392,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.224,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 77,
- "text": "The act of paying is perhaps the most uncomfortable infliction that the two orchard thieves entailed upon us.",
- "input_from": 8662,
- "input_to": 8771,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.272819,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.637609,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.45609,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.038838,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.143311,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.346,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.667,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.494,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.286,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.288,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.525,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 78,
- "text": "But BEING PAID,-what will compare with it?",
- "input_from": 8772,
- "input_to": 8814,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.129291,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.168215,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.505291,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.172874,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.445413,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.978,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.525,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.027,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.07,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.214,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.957,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 79,
- "text": "The urbane activity with which a man receives money is really marvellous, considering that we so earnestly believe money to be the root of all earthly ills, and that on no account can a monied man enter heaven.",
- "input_from": 8815,
- "input_to": 9025,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.258157,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.362913,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.209787,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.224406,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.206747,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.623,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.284,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.637,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.265,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.563,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 80,
- "text": "Ah! how cheerfully we consign ourselves to perdition!",
- "input_from": 9026,
- "input_to": 9079,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.326175,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.279526,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.280562,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.081405,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.159875,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.031,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.18,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.959,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.979,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.862,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 81,
- "text": "Finally, I always go to sea as a sailor, because of the wholesome exercise and pure air of the fore-castle deck.",
- "input_from": 9080,
- "input_to": 9192,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.10393,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.110797,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.194602,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.691458,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.203747,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.563,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.543,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.775,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.663,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.283,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.19,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.655,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 82,
- "text": "For as in this world, head winds are far more prevalent than winds from astern (that is, if you never violate the Pythagorean maxim), so for the most part the Commodore on the quarter-deck gets his atmosphere at second hand from the sailors on the forecastle.",
- "input_from": 9193,
- "input_to": 9452,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.190926,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.563901,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.379399,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.039081,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.386472,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.066,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.167,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.835,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.569,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.551,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.391,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.166,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 83,
- "text": "He thinks he breathes it first; but not so.",
- "input_from": 9453,
- "input_to": 9496,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.146976,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.264925,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.429378,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.140581,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.474616,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.779,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.032,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.035,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.763,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.927,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.925,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 84,
- "text": "In much the same way do the commonalty lead their leaders in many other things, at the same time that the leaders little suspect it.",
- "input_from": 9497,
- "input_to": 9629,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.223986,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.530082,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.343833,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.063121,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.278164,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.257,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.571,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.899,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.752,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.411,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.255,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.23,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 85,
- "text": "But wherefore it was that after having repeatedly smelt the sea as a merchant sailor, I should now take it into my head to go on a whaling voyage; this the invisible police officer of the Fates, who has the constant surveillance of me, and secretly dogs me, and influences me in some unaccountable way-he can better answer than any one else.",
- "input_from": 9630,
- "input_to": 9971,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.421184,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.707532,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.530845,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.014768,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.092379,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.688,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.008,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.506,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.429,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.239,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.422,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.808,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 86,
- "text": "And, doubtless, my going on this whaling voyage, formed part of the grand programme of Providence that was drawn up a long time ago.",
- "input_from": 9972,
- "input_to": 10104,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.259643,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.433288,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.221091,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.234673,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.23225,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.199,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.509,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.569,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.443,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.581,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.565,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 87,
- "text": "It came in as a sort of brief interlude and solo between more extensive performances.",
- "input_from": 10105,
- "input_to": 10190,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.220237,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.488841,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.346516,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.32806,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.174339,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.451,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.922,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.534,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.24,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.432,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.536,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 88,
- "text": "I take it that this part of the bill must have run something like this:\n\"GRAND CONTESTED ELECTION FOR THE PRESIDENCY OF THE UNITED STATES.",
- "input_from": 10191,
- "input_to": 10329,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.118164,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.52867,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.454139,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.167206,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.198501,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.065,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.374,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.345,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.446,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.536,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.739,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 89,
- "text": "\"WHALING VOYAGE BY ONE ISHMAEL.",
- "input_from": 10330,
- "input_to": 10361,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.242882,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.212707,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.251869,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.217312,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.219939,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.912,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.571,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.035,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.119,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.401,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 90,
- "text": "\"BLOODY BATTLE IN AFFGHANISTAN.\"",
- "input_from": 10362,
- "input_to": 10394,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.467411,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.387246,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.297422,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.040942,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.214117,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.981,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.571,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.89,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.743,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.401,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 91,
- "text": "Though I cannot tell why it was exactly that those stage managers, the Fates, put me down for this shabby part of a whaling voyage, when others were set down for magnificent parts in high tragedies, and short and easy parts in genteel comedies, and jolly parts in farces-though I cannot tell why this was exactly; yet, now that I recall all the circumstances, I think I can see a little into the springs and motives which being cunningly presented to me under various disguises, induced me to set about performing the part I did, besides cajoling me into the delusion that it was a choice resulting from my own unbiased freewill and discriminating judgment.",
- "input_from": 10395,
- "input_to": 11052,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.530573,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.305188,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.287743,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.049307,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.240543,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.255,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.167,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.438,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.321,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.433,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.506,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.827,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 92,
- "text": "Chief among these motives was the overwhelming idea of the great whale himself.",
- "input_from": 11053,
- "input_to": 11132,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.07347,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.348998,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.403245,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.326948,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.136145,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.768,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.22,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.265,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.218,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.833,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 93,
- "text": "Such a portentous and mysterious monster roused all my curiosity.",
- "input_from": 11133,
- "input_to": 11198,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.287072,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.045712,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.428996,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.280744,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.139453,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.879,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.669,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.556,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.409,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.235,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.78,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 94,
- "text": "Then the wild and distant seas where he rolled his island bulk; the undeliverable, nameless perils of the whale; these, with all the attending marvels of a thousand Patagonian sights and sounds, helped to sway me to my wish.",
- "input_from": 11199,
- "input_to": 11423,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.201321,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.413132,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.328442,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.040351,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.432223,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.014,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.235,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.601,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.517,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.566,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.531,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.385,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 95,
- "text": "With other men, perhaps, such things would not have been inducements; but as for me, I am tormented with an everlasting itch for things remote.",
- "input_from": 11424,
- "input_to": 11567,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.47654,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.418817,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.133762,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.08858,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.419193,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.257,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0.196,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.447,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.057,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.497,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.378,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.956,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 96,
- "text": "I love to sail forbidden seas, and land on barbarous coasts.",
- "input_from": 11568,
- "input_to": 11628,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.099477,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.164791,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.149077,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.425919,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.384697,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.232,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.295,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.829,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.739,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.876,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 97,
- "text": "Not ignoring what is good, I am quick to perceive a horror, and could still be social with it-would they let me-since it is but well to be on friendly terms with all the inmates of the place one lodges in.",
- "input_from": 11629,
- "input_to": 11834,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.267446,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.220281,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.345987,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.061857,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.226209,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.591,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.184,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.514,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.39,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.825,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- },
- {
- "sentence_id": 98,
- "text": "By reason of these things, then, the whaling voyage was welcome; the great flood-gates of the wonder-world swung open, and in the wild conceits that swayed me to my purpose, two and two there floated into my inmost soul, endless processions of the whale, and, mid most of them all, one grand hooded phantom, like a snow hill in the air.",
- "input_from": 11835,
- "input_to": 12171,
- "tone_categories": [
- {
- "tones": [
- {
- "score": 0.096855,
- "tone_id": "anger",
- "tone_name": "Anger"
- },
- {
- "score": 0.111949,
- "tone_id": "disgust",
- "tone_name": "Disgust"
- },
- {
- "score": 0.630888,
- "tone_id": "fear",
- "tone_name": "Fear"
- },
- {
- "score": 0.172567,
- "tone_id": "joy",
- "tone_name": "Joy"
- },
- {
- "score": 0.180281,
- "tone_id": "sadness",
- "tone_name": "Sadness"
- }
- ],
- "category_id": "emotion_tone",
- "category_name": "Emotion Tone"
- },
- {
- "tones": [
- {
- "score": 0.275,
- "tone_id": "analytical",
- "tone_name": "Analytical"
- },
- {
- "score": 0.031,
- "tone_id": "confident",
- "tone_name": "Confident"
- },
- {
- "score": 0,
- "tone_id": "tentative",
- "tone_name": "Tentative"
- }
- ],
- "category_id": "language_tone",
- "category_name": "Language Tone"
- },
- {
- "tones": [
- {
- "score": 0.711,
- "tone_id": "openness_big5",
- "tone_name": "Openness"
- },
- {
- "score": 0.498,
- "tone_id": "conscientiousness_big5",
- "tone_name": "Conscientiousness"
- },
- {
- "score": 0.427,
- "tone_id": "extraversion_big5",
- "tone_name": "Extraversion"
- },
- {
- "score": 0.432,
- "tone_id": "agreeableness_big5",
- "tone_name": "Agreeableness"
- },
- {
- "score": 0.662,
- "tone_id": "emotional_range_big5",
- "tone_name": "Emotional Range"
- }
- ],
- "category_id": "social_tone",
- "category_name": "Social Tone"
- }
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/resources/tone-v3-expect2.json b/resources/tone-v3-expect2.json
deleted file mode 100644
index bbd93ad1d..000000000
--- a/resources/tone-v3-expect2.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "utterances_tone": [
- {
- "utterance_id": 0,
- "utterance_text": "I am very happy",
- "tones": [
- {
- "score": 0.875529,
- "tone_id": "polite",
- "tone_name": "polite"
- },
- {
- "score": 0.838693,
- "tone_id": "satisfied",
- "tone_name": "satisfied"
- },
- {
- "score": 0.844135,
- "tone_id": "sympathetic",
- "tone_name": "sympathetic"
- },
- {
- "score": 0.916255,
- "tone_id": "excited",
- "tone_name": "excited"
- }
- ]
- }
- ]
-}
diff --git a/resources/tts_audio.wav b/resources/tts_audio.wav
new file mode 100644
index 000000000..ba4760649
Binary files /dev/null and b/resources/tts_audio.wav differ
diff --git a/setup.py b/setup.py
index 6cb73f07b..6947cb83d 100644
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright 2016 IBM All Rights Reserved.
+# (C) Copyright IBM Corp. 2015, 2025.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,78 +13,41 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import print_function
from setuptools import setup
-from setuptools.command.test import test as TestCommand
-import os
-import sys
+from os import path
-__version__ = '3.0.4'
-
-if sys.argv[-1] == 'publish':
- # test server
- os.system('python setup.py register -r pypitest')
- os.system('python setup.py sdist upload -r pypitest')
-
- # production server
- os.system('python setup.py register -r pypi')
- os.system('python setup.py sdist upload -r pypi')
- sys.exit()
-
-# Convert README.md to README.rst for pypi
-try:
- from pypandoc import convert_file
-
- def read_md(f):
- return convert_file(f, 'rst')
-
- # read_md = lambda f: convert(f, 'rst')
-except:
- print('warning: pypandoc module not found, '
- 'could not convert Markdown to RST')
-
- def read_md(f):
- return open(f, 'rb').read().decode(encoding='utf-8')
- # read_md = lambda f: open(f, 'rb').read().decode(encoding='utf-8')
-
-
-class PyTest(TestCommand):
- def finalize_options(self):
- TestCommand.finalize_options(self)
- self.test_args = ['--strict', '--verbose', '--tb=long', 'test']
- self.test_suite = True
-
- def run_tests(self):
- import pytest
- errcode = pytest.main(self.test_args)
- sys.exit(errcode)
+__version__ = '11.2.0'
+# read contents of README file
+this_directory = path.abspath(path.dirname(__file__))
+with open(path.join(this_directory, 'README.md'), encoding='utf-8') as file:
+ readme_file = file.read()
setup(name='ibm-watson',
version=__version__,
description='Client library to use the IBM Watson Services',
+ packages=['ibm_watson'],
+ install_requires=['requests>=2.0, <3.0', 'python_dateutil>=2.5.3', 'websocket-client>=1.1.0', 'ibm_cloud_sdk_core>=3.3.6, == 3.*'],
+ tests_require=['responses', 'pytest', 'python_dotenv', 'pytest-rerunfailures'],
license='Apache 2.0',
- install_requires=['requests>=2.0, <3.0', 'python_dateutil>=2.5.3', 'websocket-client==0.48.0', 'ibm_cloud_sdk_core>=0.2.0'],
- tests_require=['responses', 'pytest', 'python_dotenv', 'pytest-rerunfailures', 'tox'],
- cmdclass={'test': PyTest},
author='IBM Watson',
author_email='watdevex@us.ibm.com',
- long_description=read_md('README.md'),
+ long_description=readme_file,
+ long_description_content_type='text/markdown',
url='https://github.com/watson-developer-cloud/python-sdk',
- packages=['ibm_watson'],
include_package_data=True,
- keywords='language, vision, question and answer' +
- ' tone_analyzer, natural language classifier,' +
- ' text to speech, language translation, ' +
+ keywords='language, question and answer,' +
+ ' tone_analyzer,' +
+ ' text to speech,' +
'language identification, concept expansion, machine translation, ' +
- 'personality insights, message resonance, watson developer cloud, ' +
+ 'message resonance, watson developer cloud, ' +
' wdc, watson, ibm, dialog, user modeling,' +
- 'tone analyzer, speech to text, visual recognition',
+ 'speech to text',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
- 'Development Status :: 4 - Beta',
+ 'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
diff --git a/test/integration/__init__.py b/test/integration/__init__.py
index 161119efe..949039f3b 100644
--- a/test/integration/__init__.py
+++ b/test/integration/__init__.py
@@ -1,5 +1,4 @@
# coding: utf-8
-from __future__ import print_function
from dotenv import load_dotenv, find_dotenv
# load the .env file containing your environment variables for the required
diff --git a/test/integration/test_assistant_v1.py b/test/integration/test_assistant_v1.py
new file mode 100644
index 000000000..306e5d489
--- /dev/null
+++ b/test/integration/test_assistant_v1.py
@@ -0,0 +1,117 @@
+# coding: utf-8
+from unittest import TestCase
+from ibm_cloud_sdk_core.authenticators import IAMAuthenticator, BearerTokenAuthenticator
+from os.path import abspath
+import os
+import ibm_watson
+import pytest
+import json
+
+@pytest.mark.skipif(os.getenv('ASSISTANT_APIKEY') is None,
+ reason='requires ASSISTANT_APIKEY')
+class TestAssistantV1(TestCase):
+
+ @classmethod
+ def setup_class(cls):
+
+ create_workspace_data = {
+ "name":
+ "test_workspace",
+ "description":
+ "integration tests",
+ "language":
+ "en",
+ "intents": [{
+ "intent": "hello",
+ "description": "string",
+ "examples": [{
+ "text": "good morning"
+ }]
+ }],
+ "entities": [{
+ "entity": "pizza_toppings",
+ "description": "Tasty pizza toppings",
+ "metadata": {
+ "property": "value"
+ }
+ }],
+ "counterexamples": [{
+ "text": "string"
+ }],
+ "metadata": {},
+ }
+
+ authenticator = IAMAuthenticator(os.getenv('ASSISTANT_APIKEY'))
+ cls.assistant = ibm_watson.AssistantV1(
+ version='2018-07-10',
+ authenticator=authenticator
+ )
+ cls.assistant.set_default_headers({
+ 'X-Watson-Learning-Opt-Out': '1',
+ 'X-Watson-Test': '1'
+ })
+
+ response = cls.assistant.create_workspace(
+ name=create_workspace_data['name'],
+ description=create_workspace_data['description'],
+ language='en',
+ intents=create_workspace_data['intents'],
+ entities=create_workspace_data['entities'],
+ counterexamples=create_workspace_data['counterexamples'],
+ metadata=create_workspace_data['metadata']).get_result()
+
+ cls.workspace_id = response['workspace_id']
+
+ examples = [{"text": "good morning"}]
+ response = cls.assistant.create_intent(
+ workspace_id=cls.workspace_id,
+ intent='test_intent',
+ description='Test intent.',
+ examples=examples).get_result()
+
+ @classmethod
+ def teardown_class(cls):
+ response = cls.assistant.delete_intent(workspace_id=cls.workspace_id, intent='updated_test_intent').get_result()
+ assert response is not None
+
+ response = cls.assistant.delete_workspace(cls.workspace_id).get_result()
+ assert response is not None
+
+ def test_workspace(self):
+ response = self.assistant.get_workspace(self.workspace_id, export=True).get_result()
+ assert response is not None
+
+ response = self.assistant.list_workspaces().get_result()
+ assert response is not None
+ print(json.dumps(response, indent=2))
+
+ response = self.assistant.message(self.workspace_id,
+ input={
+ 'text': 'What\'s the weather like?'
+ },
+ context={
+ 'metadata': {
+ 'deployment': 'myDeployment'
+ }
+ }).get_result()
+ assert response is not None
+
+ response = self.assistant.update_workspace(workspace_id=self.workspace_id, description='Updated test workspace.').get_result()
+ assert response is not None
+
+ def test_intent(self):
+ response = self.assistant.get_intent(
+ workspace_id=self.workspace_id, intent='test_intent', export=True).get_result()
+ assert response is not None
+
+ response = self.assistant.update_intent(
+ workspace_id=self.workspace_id,
+ intent='test_intent',
+ new_intent='updated_test_intent',
+ new_description='Updated test intent.').get_result()
+ assert response is not None
+
+ response = self.assistant.list_intents(
+ workspace_id=self.workspace_id, export=True).get_result()
+ assert response is not None
+ print(json.dumps(response, indent=2))
diff --git a/test/integration/test_assistant_v2.py b/test/integration/test_assistant_v2.py
new file mode 100644
index 000000000..db17bcae0
--- /dev/null
+++ b/test/integration/test_assistant_v2.py
@@ -0,0 +1,65 @@
+# coding: utf-8
+
+# Copyright 2019, 2024 IBM All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest import TestCase
+import ibm_watson
+from ibm_watson.assistant_v2 import MessageInput
+from ibm_watson.common import parse_sse_stream_data
+import pytest
+import json
+from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
+
+class TestAssistantV2(TestCase):
+
+ def setUp(self):
+
+ with open('./auth.json') as f:
+ data = json.load(f)
+ assistant_auth = data.get("assistantv2")
+ self.assistant_id = assistant_auth.get("assistantId")
+ self.environment_id = assistant_auth.get("environmentId")
+
+ self.authenticator = IAMAuthenticator(apikey=assistant_auth.get("apikey"))
+ self.assistant = ibm_watson.AssistantV2(version='2024-08-25', authenticator=self.authenticator)
+ self.assistant.set_service_url(assistant_auth.get("serviceUrl"))
+ self.assistant.set_default_headers({
+ 'X-Watson-Learning-Opt-Out': '1',
+ 'X-Watson-Test': '1'
+ })
+
+ def test_list_assistants(self):
+ response = self.assistant.list_assistants().get_result()
+ assert response is not None
+
+ def test_message_stream_stateless(self):
+ input = MessageInput(message_type="text", text="can you list the steps to create a custom extension?")
+ user_id = "Angelo"
+
+ response = self.assistant.message_stream_stateless(self.assistant_id, self.environment_id, input=input, user_id=user_id).get_result()
+
+ for data in parse_sse_stream_data(response):
+ # One of these items must exist
+ # assert "partial_item" in data_json or "complete_item" in data_json or "final_item" in data_json
+
+ if "partial_item" in data:
+ assert data["partial_item"]["text"] is not None
+ elif "complete_item" in data:
+ assert data["complete_item"]["text"] is not None
+ elif "final_response" in data:
+ assert data["final_response"] is not None
+ else:
+ pytest.fail("Should be impossible to get here")
+
diff --git a/test/integration/test_compare_comply_v1.py b/test/integration/test_compare_comply_v1.py
deleted file mode 100644
index 2683c42c3..000000000
--- a/test/integration/test_compare_comply_v1.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# coding: utf-8
-import pytest
-import ibm_watson
-import os
-from os.path import abspath
-from unittest import TestCase
-
-@pytest.mark.skipif(
- os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES')
-class IntegrationTestCompareComplyV1(TestCase):
- compare_comply = None
-
- @classmethod
- def setup_class(cls):
- cls.compare_comply = ibm_watson.CompareComplyV1(
- '2018-10-15')
- cls.compare_comply.set_default_headers({
- 'X-Watson-Learning-Opt-Out':
- '1',
- 'X-Watson-Test':
- '1'
- })
-
- def test_convert_to_html(self):
- contract = abspath('resources/contract_A.pdf')
- with open(contract, 'rb') as file:
- result = self.compare_comply.convert_to_html(file).get_result()
- assert result is not None
-
- def test_classify_elements(self):
- contract = abspath('resources/contract_A.pdf')
- with open(contract, 'rb') as file:
- result = self.compare_comply.classify_elements(file, 'application/pdf').get_result()
- assert result is not None
-
- def test_extract_tables(self):
- table = abspath('resources/contract_A.pdf')
- with open(table, 'rb') as file:
- result = self.compare_comply.extract_tables(file).get_result()
- assert result is not None
-
- def test_compare_documents(self):
- with open(os.path.join(os.path.dirname(__file__), '../../resources/contract_A.pdf'), 'rb') as file1, \
- open(os.path.join(os.path.dirname(__file__), '../../resources/contract_B.pdf'), 'rb') as file2:
- result = self.compare_comply.compare_documents(file1, file2).get_result()
-
- assert result is not None
-
- @pytest.mark.skip(reason="Temporarily skip")
- def test_feedback(self):
- feedback_data = {
- 'feedback_type': 'element_classification',
- 'document': {
- 'hash': '',
- 'title': 'doc title'
- },
- 'model_id': 'contracts',
- 'model_version': '11.00',
- 'location': {
- 'begin': '214',
- 'end': '237'
- },
- 'text': '1. IBM will provide a Senior Managing Consultant / expert resource, for up to 80 hours, to assist Florida Power & Light (FPL) with the creation of an IT infrastructure unit cost model for existing infrastructure.',
- 'original_labels': {
- 'types': [
- {
- 'label': {
- 'nature': 'Obligation',
- 'party': 'IBM'
- },
- 'provenance_ids': [
- '85f5981a-ba91-44f5-9efa-0bd22e64b7bc',
- 'ce0480a1-5ef1-4c3e-9861-3743b5610795'
- ]
- },
- {
- 'label': {
- 'nature': 'End User',
- 'party': 'Exclusion'
- },
- 'provenance_ids': [
- '85f5981a-ba91-44f5-9efa-0bd22e64b7bc',
- 'ce0480a1-5ef1-4c3e-9861-3743b5610795'
- ]
- }
- ],
- 'categories': [
- {
- 'label': 'Responsibilities',
- 'provenance_ids': []
- },
- {
- 'label': 'Amendments',
- 'provenance_ids': []
- }
- ]
- },
- 'updated_labels': {
- 'types': [
- {
- 'label': {
- 'nature': 'Obligation',
- 'party': 'IBM'
- }
- },
- {
- 'label': {
- 'nature': 'Disclaimer',
- 'party': 'Buyer'
- }
- }
- ],
- 'categories': [
- {
- 'label': 'Responsibilities'
- },
- {
- 'label': 'Audits'
- }
- ]
- }
- }
-
- add_feedback = self.compare_comply.add_feedback(
- feedback_data,
- 'wonder woman',
- 'test commment').get_result()
- assert add_feedback is not None
- assert add_feedback['feedback_id'] is not None
- feedback_id = add_feedback['feedback_id']
-
- self.compare_comply.set_default_headers({'x-watson-metadata': 'customer_id=sdk-test-customer-id'})
- get_feedback = self.compare_comply.get_feedback(feedback_id).get_result()
- assert get_feedback is not None
-
- list_feedback = self.compare_comply.list_feedback(
- feedback_type='element_classification').get_result()
- assert list_feedback is not None
-
- delete_feedback = self.compare_comply.delete_feedback(feedback_id).get_result()
- assert delete_feedback is not None
-
- @pytest.mark.skip(reason="Temporarily skip")
- def test_batches(self):
- list_batches = self.compare_comply.list_batches().get_result()
- assert list_batches is not None
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/cloud-object-storage-credentials-input.json'), 'rb') as input_credentials_file, \
- open(os.path.join(os.path.dirname(__file__), '../../resources/cloud-object-storage-credentials-output.json'), 'rb') as output_credentials_file:
- create_batch = self.compare_comply.create_batch(
- 'html_conversion',
- input_credentials_file,
- 'us-south',
- 'compare-comply-integration-test-bucket-input',
- output_credentials_file,
- 'us-south',
- 'compare-comply-integration-test-bucket-output').get_result()
-
- assert create_batch is not None
- assert create_batch['batch_id'] is not None
- batch_id = create_batch['batch_id']
-
- get_batch = self.compare_comply.get_batch(batch_id)
- assert get_batch is not None
-
- update_batch = self.compare_comply.update_batch(batch_id, 'rescan')
- assert update_batch is not None
diff --git a/test/integration/test_discovery_v1.py b/test/integration/test_discovery_v1.py
deleted file mode 100644
index 1f3a19f70..000000000
--- a/test/integration/test_discovery_v1.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# coding: utf-8
-from unittest import TestCase
-import os
-import ibm_watson
-import random
-import pytest
-
-@pytest.mark.skipif(
- os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES')
-class Discoveryv1(TestCase):
- def setUp(self):
- self.discovery = ibm_watson.DiscoveryV1(
- version='2018-08-01')
- self.discovery.set_default_headers({
- 'X-Watson-Learning-Opt-Out': '1',
- 'X-Watson-Test': '1'
- })
- self.environment_id = 'e15f6424-f887-4f50-b4ea-68267c36fc9c' # This environment is created for integration testing
- collections = self.discovery.list_collections(self.environment_id).get_result()['collections']
- self.collection_id = collections[0]['collection_id']
-
- for collection in collections:
- if collection['name'] == 'DO-NOT-DELETE-JAPANESE-COLLECTION':
- self.collection_id_JP = collection['collection_id']
-
- def tearDown(self):
- collections = self.discovery.list_collections(self.environment_id).get_result()['collections']
- for collection in collections:
- if not collection['name'].startswith('DO-NOT-DELETE'):
- self.discovery.delete_collection(self.environment_id, collection['collection_id'])
-
- def test_environments(self):
- envs = self.discovery.list_environments().get_result()
- assert envs is not None
- env = self.discovery.get_environment(
- envs['environments'][0]['environment_id']).get_result()
- assert env is not None
- fields = self.discovery.list_fields(self.environment_id,
- self.collection_id).get_result()
- assert fields is not None
-
- def test_configurations(self):
- configs = self.discovery.list_configurations(self.environment_id).get_result()
- assert configs is not None
-
- name = 'test' + random.choice('ABCDEFGHIJKLMNOPQ')
- new_configuration_id = self.discovery.create_configuration(
- self.environment_id, name,
- 'creating new config for python sdk').get_result()['configuration_id']
- assert new_configuration_id is not None
- self.discovery.get_configuration(self.environment_id,
- new_configuration_id).get_result()
-
- updated_config = self.discovery.update_configuration(
- self.environment_id, new_configuration_id, 'lala').get_result()
- assert updated_config['name'] == 'lala'
-
- deleted_config = self.discovery.delete_configuration(
- self.environment_id, new_configuration_id).get_result()
- assert deleted_config['status'] == 'deleted'
-
- def test_collections_and_expansions(self):
- name = 'Example collection for python' + random.choice('ABCDEFGHIJKLMNOPQ')
- new_collection_id = self.discovery.create_collection(
- self.environment_id,
- name,
- description="Integration test for python sdk").get_result()['collection_id']
- assert new_collection_id is not None
-
- self.discovery.get_collection(self.environment_id, new_collection_id)
- updated_collection = self.discovery.update_collection(
- self.environment_id, new_collection_id, name, description='Updating description').get_result()
- assert updated_collection['description'] == 'Updating description'
-
- self.discovery.create_expansions(self.environment_id,
- new_collection_id, [{
- 'input_terms': ['a'],
- 'expanded_terms': ['aa']
- }]).get_result()
- expansions = self.discovery.list_expansions(self.environment_id,
- new_collection_id).get_result()
- assert expansions['expansions']
- self.discovery.delete_expansions(self.environment_id,
- new_collection_id)
-
- deleted_collection = self.discovery.delete_collection(
- self.environment_id, new_collection_id).get_result()
- assert deleted_collection['status'] == 'deleted'
-
- def test_documents(self):
- with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo:
- add_doc = self.discovery.add_document(
- environment_id=self.environment_id,
- collection_id=self.collection_id,
- file=fileinfo).get_result()
- assert add_doc['document_id'] is not None
-
- doc_status = self.discovery.get_document_status(
- self.environment_id, self.collection_id, add_doc['document_id']).get_result()
- assert doc_status is not None
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo:
- update_doc = self.discovery.update_document(
- self.environment_id,
- self.collection_id,
- add_doc['document_id'],
- file=fileinfo,
- filename='newname.html').get_result()
- assert update_doc is not None
- delete_doc = self.discovery.delete_document(
- self.environment_id, self.collection_id, add_doc['document_id']).get_result()
- assert delete_doc['status'] == 'deleted'
-
- def test_queries(self):
- query_results = self.discovery.query(
- self.environment_id,
- self.collection_id,
- filter='extracted_metadata.sha1::9181d244*',
- return_fields='extracted_metadata.sha1').get_result()
- assert query_results is not None
-
- @pytest.mark.skip(reason="Temporary skipping because update_credentials fails")
- def test_credentials(self):
- credential_details = {
- 'credential_type': 'username_password',
- 'url': 'https://login.salesforce.com',
- 'username': 'user@email.com',
- 'password': 'xxx'
- }
- credentials = self.discovery.create_credentials(self.environment_id,
- 'salesforce',
- credential_details).get_result()
- assert credentials['credential_id'] is not None
- credential_id = credentials['credential_id']
-
- get_credentials = self.discovery.get_credentials(self.environment_id, credential_id).get_result()
- assert get_credentials['credential_id'] == credential_id
-
- list_credentials = self.discovery.list_credentials(self.environment_id).get_result()
- assert list_credentials is not None
-
- new_credential_details = {
- 'credential_type': 'username_password',
- 'url': 'https://logo.salesforce.com',
- 'username': 'user@email.com',
- 'password': 'xxx'
- }
- updated_credentials = self.discovery.update_credentials(self.environment_id, credential_id, 'salesforce', new_credential_details).get_result()
- assert updated_credentials is not None
-
- get_credentials = self.discovery.get_credentials(self.environment_id, credentials['credential_id']).get_result()
- assert get_credentials['credential_details']['url'] == new_credential_details['url']
-
- delete_credentials = self.discovery.delete_credentials(self.environment_id, credential_id).get_result()
- assert delete_credentials['credential_id'] is not None
-
- def test_create_event(self):
- # create test document
- with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo:
- add_doc = self.discovery.add_document(
- environment_id=self.environment_id,
- collection_id=self.collection_id,
- file=fileinfo).get_result()
- assert add_doc['document_id'] is not None
- document_id = add_doc['document_id']
-
- # make query to get session token
- query = self.discovery.query(self.environment_id,
- self.collection_id,
- natural_language_query='The content of the first chapter').get_result()
- assert query['session_token'] is not None
-
- # create_event
- event_data = {
- "environment_id": self.environment_id,
- "session_token": query['session_token'],
- "collection_id": self.collection_id,
- "document_id": document_id,
- }
- create_event_response = self.discovery.create_event('click', event_data).get_result()
- assert create_event_response['type'] == 'click'
-
- #delete the documment
- self.discovery.delete_document(self.environment_id,
- self.collection_id,
- document_id).get_result()
-
- def test_tokenization_dictionary(self):
- result = self.discovery.get_tokenization_dictionary_status(
- self.environment_id,
- self.collection_id_JP
- ).get_result()
- assert result['status'] is not None
-
- def test_feedback(self):
- response = self.discovery.get_metrics_event_rate('2018-08-13T14:39:59.309Z',
- '2018-08-14T14:39:59.309Z',
- 'document').get_result()
- assert response['aggregations'] is not None
-
- response = self.discovery.get_metrics_query('2018-08-13T14:39:59.309Z',
- '2018-08-14T14:39:59.309Z',
- 'document').get_result()
- assert response['aggregations'] is not None
-
- response = self.discovery.get_metrics_query_event('2018-08-13T14:39:59.309Z',
- '2018-08-14T14:39:59.309Z',
- 'document').get_result()
- assert response['aggregations'] is not None
-
- response = self.discovery.get_metrics_query_no_results('2018-07-13T14:39:59.309Z',
- '2018-08-14T14:39:59.309Z',
- 'document').get_result()
- assert response['aggregations'] is not None
-
- response = self.discovery.get_metrics_query_token_event(10).get_result()
- assert response['aggregations'] is not None
-
- response = self.discovery.query_log(count=2).get_result()
- assert response is not None
-
- @pytest.mark.skip(reason="Skip temporarily.")
- def test_stopword_operations(self):
- with open(os.path.join(os.path.dirname(__file__), '../../resources/stopwords.txt'), 'r') as stopwords_file:
- create_stopword_list_result = self.discovery.create_stopword_list(
- self.environment_id,
- self.collection_id,
- stopwords_file
- ).get_result()
- assert create_stopword_list_result is not None
-
- delete_stopword_list_result = self.discovery.delete_stopword_list(
- self.environment_id,
- self.collection_id
- ).get_result()
- assert delete_stopword_list_result is None
-
- def test_gateway_configuration(self):
- create_gateway_result = self.discovery.create_gateway(
- self.environment_id,
- 'test-gateway-configuration-python'
- ).get_result()
- assert create_gateway_result['gateway_id'] is not None
-
- get_gateway_result = self.discovery.get_gateway(
- self.environment_id,
- create_gateway_result['gateway_id']
- ).get_result()
- assert get_gateway_result is not None
-
- list_gateways_result = self.discovery.list_gateways(
- self.environment_id
- ).get_result()
- assert list_gateways_result is not None
-
- delete_gateways_result = self.discovery.delete_gateway(
- self.environment_id,
- create_gateway_result['gateway_id']
- ).get_result()
- assert delete_gateways_result is not None
diff --git a/test/integration/test_discovery_v2.py b/test/integration/test_discovery_v2.py
new file mode 100644
index 000000000..aa1a402cc
--- /dev/null
+++ b/test/integration/test_discovery_v2.py
@@ -0,0 +1,132 @@
+# coding: utf-8
+from unittest import TestCase
+from ibm_cloud_sdk_core.authenticators import IAMAuthenticator, BearerTokenAuthenticator
+from ibm_watson.discovery_v2 import CreateEnrichment, EnrichmentOptions
+from os.path import abspath
+import os
+import ibm_watson
+import pytest
+
+
+@pytest.mark.skipif(os.getenv('DISCOVERY_V2_APIKEY') is None,
+ reason='requires DISCOVERY_V2_APIKEY')
+class Discoveryv2(TestCase):
+ discovery = None
+ project_id = os.getenv('DISCOVERY_V2_PROJECT_ID') # This project is created for integration testing
+ collection_id = None
+ collection_name = 'python_test_collection'
+
+ @classmethod
+ def setup_class(cls):
+ authenticator = IAMAuthenticator(os.getenv('DISCOVERY_V2_APIKEY'))
+ cls.discovery = ibm_watson.DiscoveryV2(
+ version='2020-08-12',
+ authenticator=authenticator
+ )
+ cls.discovery.set_service_url(os.getenv('DISCOVERY_V2_URL'))
+ cls.discovery.set_default_headers({
+ 'X-Watson-Learning-Opt-Out': '1',
+ 'X-Watson-Test': '1'
+ })
+
+ collections = cls.discovery.list_collections(
+ cls.project_id).get_result()['collections']
+ for collection in collections:
+ if collection['name'] == cls.collection_name:
+ cls.collection_id = collection['collection_id']
+
+ if cls.collection_id is None:
+ print("Creating a new temporary collection")
+ cls.collection_id = cls.discovery.create_collection(
+ cls.project_id,
+ cls.collection_name,
+ description="Integration test for python sdk").get_result(
+ )['collection_id']
+
+ @classmethod
+ def teardown_class(cls):
+ collections = cls.discovery.list_collections(
+ cls.project_id).get_result()['collections']
+ for collection in collections:
+ if collection['name'] == cls.collection_name:
+ print('Deleting the temporary collection')
+ cls.discovery.delete_collection(cls.project_id,
+ cls.collection_id)
+ break
+
+ def test_projects(self):
+ projs = self.discovery.list_projects().get_result()
+ assert projs is not None
+ proj = self.discovery.get_project(
+ self.project_id).get_result()
+ assert proj is not None
+
+ def test_collections(self):
+ cols = self.discovery.list_collections(self.project_id).get_result()
+ assert cols is not None
+ col = self.discovery.get_collection(
+ self.project_id,
+ self.collection_id
+ ).get_result()
+ assert col is not None
+
+ def test_enrichments(self):
+ enrs = self.discovery.list_enrichments(self.project_id).get_result()
+ print(enrs)
+ assert enrs is not None
+
+ enrichmentOptions = EnrichmentOptions(
+ languages=["en"],
+ entity_type="keyword"
+ )
+ enrichment = CreateEnrichment(
+ name="python test enrichment",
+ description="test enrichment",
+ type="dictionary",
+ options=enrichmentOptions
+ )
+ with open(os.path.join(os.path.dirname(__file__), '../../resources/TestEnrichments.csv'), 'r') as fileinfo:
+ enr = self.discovery.create_enrichment(
+ project_id=self.project_id,
+ enrichment=enrichment._to_dict(),
+ file=fileinfo
+ ).get_result()
+ assert enr is not None
+ enrichment_id = enr["enrichment_id"]
+ enrichment = self.discovery.get_enrichment(
+ self.project_id,
+ enrichment_id
+ ).get_result()
+ assert enrichment is not None
+ enr = self.discovery.update_enrichment(
+ project_id=self.project_id,
+ enrichment_id=enrichment_id,
+ name="python test enrichment",
+ description="updated description"
+ ).get_result()
+ assert enr is not None
+ self.discovery.delete_enrichment(
+ self.project_id,
+ enrichment_id
+ )
+
+ # can only test in CPD
+ @pytest.mark.skip(reason="can only test in CPD")
+ def test_analyze(self):
+ authenticator = BearerTokenAuthenticator('')
+ discovery_cpd = ibm_watson.DiscoveryV2(
+ version='2020-08-12',
+ authenticator=authenticator
+ )
+ discovery_cpd.service_url = ""
+ discovery_cpd.set_disable_ssl_verification(True)
+ test_file = abspath('resources/problem.json')
+ with open(test_file, 'rb') as file:
+ result = discovery_cpd.analyze_document(
+ project_id="",
+ collection_id="",
+ file=file,
+ file_content_type="application/json"
+ ).get_result()
+ assert result is not None
+
diff --git a/test/integration/test_examples.py b/test/integration/test_examples.py
index 9e5e7d634..aafcbf83a 100644
--- a/test/integration/test_examples.py
+++ b/test/integration/test_examples.py
@@ -1,6 +1,5 @@
# coding=utf-8
-from __future__ import print_function
import re
import traceback
import pytest
@@ -9,11 +8,12 @@
from os.path import join, dirname
from glob import glob
-# tests to exclude
-excludes = ['authorization_v1.py', 'discovery_v1.ipynb', '__init__.py', 'microphone-speech-to-text.py']
+# tests to include
+includes = ['assistant_v1.py', 'natural_language_understanding_v1.py']
# examples path. /examples
-examples_path = join(dirname(__file__), '../', 'examples', '*.py')
+examples_path = join(dirname(__file__), '../../', 'examples', '*.py')
+
@pytest.mark.skipif(os.getenv('VCAP_SERVICES') is None,
reason='requires VCAP_SERVICES')
@@ -23,17 +23,13 @@ def test_examples():
for example in examples:
name = example.split('/')[-1]
- # exclude some tests cases like authorization
- if name in excludes:
+ if name not in includes:
continue
- # exclude tests if there are no credentials for that service
- service_name = name[:-6] if not name.startswith('visual_recognition')\
- else 'watson_vision_combined'
+ service_name = name[:-6]
if service_name not in vcap_services:
- print('%s does not have credentials in VCAP_SERVICES',
- service_name)
+ print('%s does not have credentials in VCAP_SERVICES', service_name)
continue
try:
diff --git a/test/integration/test_natural_language_classifier_v1.py b/test/integration/test_natural_language_classifier_v1.py
deleted file mode 100644
index f9122be1f..000000000
--- a/test/integration/test_natural_language_classifier_v1.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# coding: utf-8
-from unittest import TestCase
-import os
-import ibm_watson
-import pytest
-import json
-import time
-
-FIVE_SECONDS = 5
-
-@pytest.mark.skipif(os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES')
-class TestNaturalLanguageClassifierV1(TestCase):
- def setUp(self):
- self.natural_language_classifier = ibm_watson.NaturalLanguageClassifierV1()
- self.natural_language_classifier.set_default_headers({
- 'X-Watson-Learning-Opt-Out': '1',
- 'X-Watson-Test': '1'
- })
-
- # Create a classifier
- with open(os.path.join(os.path.dirname(__file__), '../../resources/weather_data_train.csv'), 'rb') as training_data:
- metadata = json.dumps({'name': 'my-classifier', 'language': 'en'})
- classifier = self.natural_language_classifier.create_classifier(
- metadata=metadata,
- training_data=training_data
- ).get_result()
- self.classifier_id = classifier['classifier_id']
-
- def tearDown(self):
- self.natural_language_classifier.delete_classifier(self.classifier_id)
-
- def test_list_classifier(self):
- list_classifiers = self.natural_language_classifier.list_classifiers().get_result()
- assert list_classifiers is not None
-
- @pytest.mark.skip(reason="The classifier takes more than a minute")
- def test_classify_text(self):
- iterations = 0
- while iterations < 15:
- status = self.natural_language_classifier.get_classifier(self.classifier_id).get_result()
- iterations += 1
- if status['status'] != 'Available':
- time.sleep(FIVE_SECONDS)
-
- if status['status'] != 'Available':
- assert False, 'Classifier is not available'
-
- classes = self.natural_language_classifier.classify(self.classifier_id, 'How hot will it be tomorrow?').get_result()
- assert classes is not None
-
- collection = ['{"text":"How hot will it be today?"}', '{"text":"Is it hot outside?"}']
- classes = self.natural_language_classifier.classify_collection(
- self.classifier_id, collection).get_result()
- assert classes is not None
diff --git a/test/integration/test_natural_language_understanding_v1.py b/test/integration/test_natural_language_understanding_v1.py
new file mode 100644
index 000000000..1255b5067
--- /dev/null
+++ b/test/integration/test_natural_language_understanding_v1.py
@@ -0,0 +1,31 @@
+# coding: utf-8
+from unittest import TestCase
+import os
+import ibm_watson
+import pytest
+import json
+import time
+from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions
+from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
+
+class TestNaturalLanguageUnderstandingV1(TestCase):
+
+ def setUp(self):
+
+ with open('./auth.json') as f:
+ data = json.load(f)
+ nlu_auth = data.get("nlu")
+
+ self.authenticator = IAMAuthenticator(nlu_auth.get("apikey"))
+ self.natural_language_understanding = ibm_watson.NaturalLanguageUnderstandingV1(version='2018-03-16', authenticator=self.authenticator)
+ self.natural_language_understanding.set_default_headers({
+ 'X-Watson-Learning-Opt-Out': '1',
+ 'X-Watson-Test': '1'
+ })
+
+ def test_analyze(self):
+ response = self.natural_language_understanding.analyze(
+ text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! '
+ 'Superman fears not Banner, but Wayne.',
+ features=Features(entities=EntitiesOptions(), keywords=KeywordsOptions())).get_result()
+ assert response is not None
diff --git a/test/integration/test_speech_to_text_v1.py b/test/integration/test_speech_to_text_v1.py
index 5dd4e5dfc..4defbea19 100644
--- a/test/integration/test_speech_to_text_v1.py
+++ b/test/integration/test_speech_to_text_v1.py
@@ -1,4 +1,3 @@
-from __future__ import print_function
from unittest import TestCase
import os
from ibm_watson.websocket import RecognizeCallback, AudioSource
@@ -6,8 +5,9 @@
import pytest
import threading
-@pytest.mark.skipif(
- os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES')
+
+@pytest.mark.skipif(os.getenv('SPEECH_TO_TEXT_APIKEY') is None,
+ reason='requires SPEECH_TO_TEXT_APIKEY')
class TestSpeechToTextV1(TestCase):
text_to_speech = None
custom_models = None
@@ -18,26 +18,26 @@ class TestSpeechToTextV1(TestCase):
def setup_class(cls):
cls.speech_to_text = ibm_watson.SpeechToTextV1()
cls.speech_to_text.set_default_headers({
- 'X-Watson-Learning-Opt-Out':
- '1',
- 'X-Watson-Test':
- '1'
+ 'X-Watson-Learning-Opt-Out': '1',
+ 'X-Watson-Test': '1'
})
- cls.custom_models = cls.speech_to_text.list_language_models().get_result()
+ cls.custom_models = cls.speech_to_text.list_language_models(
+ ).get_result()
cls.create_custom_model = cls.speech_to_text.create_language_model(
name="integration_test_model",
base_model_name="en-US_BroadbandModel").get_result()
- cls.customization_id = cls.create_custom_model['customization_id']
+ cls.customization_id = cls.create_custom_model.get('customization_id')
@classmethod
def teardown_class(cls):
cls.speech_to_text.delete_language_model(
- customization_id=cls.create_custom_model['customization_id'])
+ customization_id=cls.create_custom_model.get('customization_id'))
def test_models(self):
output = self.speech_to_text.list_models().get_result()
assert output is not None
- model = self.speech_to_text.get_model('ko-KR_BroadbandModel').get_result()
+ model = self.speech_to_text.get_model(
+ 'ko-KR_BroadbandModel').get_result()
assert model is not None
try:
self.speech_to_text.get_model('bogus')
@@ -45,14 +45,18 @@ def test_models(self):
assert 'X-global-transaction-id:' in str(e)
def test_create_custom_model(self):
- current_custom_models = self.speech_to_text.list_language_models().get_result()
+ current_custom_models = self.speech_to_text.list_language_models(
+ ).get_result()
assert len(current_custom_models['customizations']) - len(
- self.custom_models['customizations']) >= 1
+ self.custom_models.get('customizations')) >= 1
def test_recognize(self):
- with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file:
+ with open(
+ os.path.join(os.path.dirname(__file__),
+ '../../resources/speech.wav'), 'rb') as audio_file:
output = self.speech_to_text.recognize(
- audio=audio_file, content_type='audio/l16; rate=44100').get_result()
+ audio=audio_file,
+ content_type='audio/l16; rate=44100').get_result()
assert output['results'][0]['alternatives'][0][
'transcript'] == 'thunderstorms could produce large hail isolated tornadoes and heavy rain '
@@ -61,8 +65,9 @@ def test_recognitions(self):
assert output is not None
def test_custom_corpora(self):
- output = self.speech_to_text.list_corpora(self.customization_id).get_result()
- assert len(output['corpora']) == 0 # pylint: disable=len-as-condition
+ output = self.speech_to_text.list_corpora(
+ self.customization_id).get_result()
+ assert not output['corpora']
def test_acoustic_model(self):
list_models = self.speech_to_text.list_acoustic_models().get_result()
@@ -84,7 +89,9 @@ def test_acoustic_model(self):
get_acoustic_model['customization_id']).get_result()
def test_recognize_using_websocket(self):
+
class MyRecognizeCallback(RecognizeCallback):
+
def __init__(self):
RecognizeCallback.__init__(self)
self.error = None
@@ -93,22 +100,87 @@ def __init__(self):
def on_error(self, error):
self.error = error
- def on_transcription(self, transcript):
- self.transcript = transcript
+ def on_data(self, data):
+ self.data = data
test_callback = MyRecognizeCallback()
- with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file:
+ with open(
+ os.path.join(os.path.dirname(__file__),
+ '../../resources/speech.wav'), 'rb') as audio_file:
audio_source = AudioSource(audio_file, False)
- t = threading.Thread(target=self.speech_to_text.recognize_using_websocket, args=(audio_source, "audio/l16; rate=44100", test_callback))
+ t = threading.Thread(
+ target=self.speech_to_text.recognize_using_websocket,
+ args=(audio_source, "audio/l16; rate=44100", test_callback))
t.start()
t.join()
assert test_callback.error is None
- assert test_callback.transcript is not None
- assert test_callback.transcript[0]['transcript'] == 'thunderstorms could produce large hail isolated tornadoes and heavy rain '
+ assert test_callback.data is not None
+ assert test_callback.data['results'][0]['alternatives'][0]
+ ['transcript'] == 'thunderstorms could produce large hail isolated tornadoes and heavy rain '
+ def test_on_transcription_interim_results_false(self):
+ class MyRecognizeCallback(RecognizeCallback):
+ def __init__(self):
+ RecognizeCallback.__init__(self)
+ self.error = None
+ self.transcript = None
+ def on_error(self, error):
+ self.error = error
+ def on_transcription(self, transcript):
+ self.transcript = transcript
+ test_callback = MyRecognizeCallback()
+ with open(os.path.join(os.path.dirname(__file__), '../../resources/speech_with_pause.wav'), 'rb') as audio_file:
+ audio_source = AudioSource(audio_file, False)
+ self.speech_to_text.recognize_using_websocket(audio_source, "audio/wav", test_callback, model="en-US_Telephony",
+ interim_results=False, low_latency=False)
+ assert test_callback.error is None
+ assert test_callback.transcript is not None
+ assert test_callback.transcript[0][0]['transcript'] in ['isolated tornadoes ', 'isolated tornados ']
+ assert test_callback.transcript[1][0]['transcript'] == 'and heavy rain '
+ def test_on_transcription_interim_results_true(self):
+ class MyRecognizeCallback(RecognizeCallback):
+ def __init__(self):
+ RecognizeCallback.__init__(self)
+ self.error = None
+ self.transcript = None
+ def on_error(self, error):
+ self.error = error
+ def on_transcription(self, transcript):
+ self.transcript = transcript
+ assert transcript[0]['confidence'] is not None
+ assert transcript[0]['transcript'] is not None
+ test_callback = MyRecognizeCallback()
+ with open(os.path.join(os.path.dirname(__file__), '../../resources/speech_with_pause.wav'), 'rb') as audio_file:
+ audio_source = AudioSource(audio_file, False)
+ self.speech_to_text.recognize_using_websocket(audio_source, "audio/wav", test_callback, model="en-US_Telephony",
+ interim_results=True, low_latency=True)
+ assert test_callback.error is None
+ assert test_callback.transcript is not None
+ assert test_callback.transcript[0]['transcript'] == 'and heavy rain '
+ def test_on_transcription_interim_results_true_low_latency_false(self):
+ class MyRecognizeCallback(RecognizeCallback):
+ def __init__(self):
+ RecognizeCallback.__init__(self)
+ self.error = None
+ self.transcript = None
+ def on_error(self, error):
+ self.error = error
+ def on_transcription(self, transcript):
+ self.transcript = transcript
+ assert transcript[0]['confidence'] is not None
+ assert transcript[0]['transcript'] is not None
+ test_callback = MyRecognizeCallback()
+ with open(os.path.join(os.path.dirname(__file__), '../../resources/speech_with_pause.wav'), 'rb') as audio_file:
+ audio_source = AudioSource(audio_file, False)
+ self.speech_to_text.recognize_using_websocket(audio_source, "audio/wav", test_callback, model="en-US_Telephony",
+ interim_results=True, low_latency=False)
+ assert test_callback.error is None
+ assert test_callback.transcript is not None
+ assert test_callback.transcript[0]['transcript'] == 'and heavy rain '
+
def test_custom_grammars(self):
customization_id = None
- for custom_model in self.custom_models['customizations']:
+ for custom_model in self.custom_models.get('customizations'):
if custom_model['name'] == 'integration_test_model_for_grammar':
customization_id = custom_model['customization_id']
break
@@ -117,37 +189,37 @@ def test_custom_grammars(self):
print('Creating a new custom model')
create_custom_model_for_grammar = self.speech_to_text.create_language_model(
name="integration_test_model_for_grammar",
- base_model_name="en-US_BroadbandModel"
- ).get_result()
- customization_id = create_custom_model_for_grammar['customization_id']
+ base_model_name="en-US_BroadbandModel").get_result()
+ customization_id = create_custom_model_for_grammar[
+ 'customization_id']
grammars = self.speech_to_text.list_grammars(
- customization_id
- ).get_result()['grammars']
+ customization_id).get_result()['grammars']
if not grammars:
- with open(os.path.join(os.path.dirname(__file__), '../../resources/confirm-grammar.xml'), 'rb') as grammar_file:
+ with open(
+ os.path.join(os.path.dirname(__file__),
+ '../../resources/confirm-grammar.xml'),
+ 'rb') as grammar_file:
add_grammar_result = self.speech_to_text.add_grammar(
customization_id,
grammar_name='test-add-grammar-python',
grammar_file=grammar_file,
content_type='application/srgs+xml',
- allow_overwrite=True
- ).get_result()
+ allow_overwrite=True).get_result()
assert add_grammar_result is not None
get_grammar_result = self.speech_to_text.get_grammar(
customization_id,
- grammar_name='test-add-grammar-python'
- ).get_result()
+ grammar_name='test-add-grammar-python').get_result()
assert get_grammar_result is not None
else:
print('Deleting grammar')
- delete_grammar_result = self.speech_to_text.delete_grammar(
- customization_id,
- 'test-add-grammar-python'
- ).get_result()
- assert delete_grammar_result is not None
+ try:
+ self.speech_to_text.delete_grammar(
+ customization_id, 'test-add-grammar-python').get_result()
+ except ibm_watson.ApiException as ex:
+ print('Could not delete grammar: {0}'.format(ex.message))
try:
self.speech_to_text.delete_language_model(customization_id)
diff --git a/test/integration/test_text_to_speech_v1.py b/test/integration/test_text_to_speech_v1.py
index 6da05df7a..407abd68d 100644
--- a/test/integration/test_text_to_speech_v1.py
+++ b/test/integration/test_text_to_speech_v1.py
@@ -5,8 +5,9 @@
import pytest
import os
-@pytest.mark.skipif(
- os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES')
+
+@pytest.mark.skipif(os.getenv('TEXT_TO_SPEECH_APIKEY') is None,
+ reason='requires TEXT_TO_SPEECH_APIKEY')
class TestIntegrationTextToSpeechV1(unittest.TestCase):
text_to_speech = None
original_customizations = None
@@ -16,25 +17,25 @@ class TestIntegrationTextToSpeechV1(unittest.TestCase):
def setup_class(cls):
cls.text_to_speech = ibm_watson.TextToSpeechV1()
cls.text_to_speech.set_default_headers({
- 'X-Watson-Learning-Opt-Out':
- '1',
- 'X-Watson-Test':
- '1'
+ 'X-Watson-Learning-Opt-Out': '1',
+ 'X-Watson-Test': '1'
})
- cls.original_customizations = cls.text_to_speech.list_voice_models().get_result()
- cls.created_customization = cls.text_to_speech.create_voice_model(
+ cls.original_customizations = cls.text_to_speech.list_custom_models(
+ ).get_result()
+ cls.created_customization = cls.text_to_speech.create_custom_model(
name="test_integration_customization",
description="customization for tests").get_result()
@classmethod
def teardown_class(cls):
- custid = cls.created_customization['customization_id']
- cls.text_to_speech.delete_voice_model(customization_id=custid)
+ custid = cls.created_customization.get('customization_id')
+ cls.text_to_speech.delete_custom_model(customization_id=custid)
def test_voices(self):
output = self.text_to_speech.list_voices().get_result()
assert output['voices'] is not None
- voice = self.text_to_speech.get_voice(output['voices'][0]['name']).get_result()
+ voice = self.text_to_speech.get_voice(
+ output['voices'][0]['name']).get_result()
assert voice is not None
def test_speak(self):
@@ -49,28 +50,62 @@ def test_pronunciation(self):
assert output['pronunciation'] is not None
def test_customizations(self):
- old_length = len(self.original_customizations['customizations'])
- new_length = len(
- self.text_to_speech.list_voice_models().get_result()['customizations'])
+ old_length = len(self.original_customizations.get('customizations'))
+ new_length = len(self.text_to_speech.list_custom_models().get_result()
+ ['customizations'])
assert new_length - old_length >= 1
def test_custom_words(self):
- customization_id = self.created_customization['customization_id']
- words = self.text_to_speech.list_words(customization_id).get_result()['words']
- assert len(words) == 0 # pylint: disable=len-as-condition
- self.text_to_speech.add_word(
- customization_id, word="ACLs", translation="ackles")
+ customization_id = self.created_customization.get('customization_id')
+ words = self.text_to_speech.list_words(
+ customization_id).get_result()['words']
+ assert not words
+ self.text_to_speech.add_word(customization_id,
+ word="ACLs",
+ translation="ackles")
words = [{"word": "MACLs", "translation": "mackles"}]
self.text_to_speech.add_words(customization_id, words)
self.text_to_speech.delete_word(customization_id, 'ACLs')
- word = self.text_to_speech.get_word(customization_id, 'MACLs').get_result()
+ word = self.text_to_speech.get_word(customization_id,
+ 'MACLs').get_result()
assert word['translation'] == 'mackles'
+ def test_custom_prompts(self):
+ customization_id = self.created_customization.get('customization_id')
+ prompt_id = "Hello"
+ metadata = {
+ "prompt_text": "Hello how are you today?"
+ }
+
+ with open("resources/tts_audio.wav", "rb") as audio_file:
+ self.text_to_speech.add_custom_prompt(
+ customization_id, prompt_id, metadata, audio_file
+ ).get_result()
+ prompts = self.text_to_speech.list_custom_prompts(customization_id).get_result()
+ assert len(prompts) > 0
+ prompt = self.text_to_speech.get_custom_prompt(customization_id, prompt_id).get_result()
+ assert prompt["prompt_id"] == prompt_id
+ self.text_to_speech.delete_custom_prompt(customization_id, prompt_id)
+
+ def test_speaker_models(self):
+ speaker_name = "Angelo"
+
+ with open("resources/tts_audio.wav", "rb") as audio_file:
+ speaker_id = self.text_to_speech.create_speaker_model(
+ speaker_name, audio_file
+ ).get_result()["speaker_id"]
+ speaker_models = self.text_to_speech.list_speaker_models().get_result()
+ assert len(speaker_models) > 0
+ speaker_model = self.text_to_speech.get_speaker_model(speaker_id).get_result()
+ self.text_to_speech.delete_speaker_model(speaker_id)
+
def test_synthesize_using_websocket(self):
file = 'tongue_twister.wav'
+
class MySynthesizeCallback(SynthesizeCallback):
+
def __init__(self):
SynthesizeCallback.__init__(self)
self.fd = None
@@ -89,11 +124,46 @@ def on_close(self):
self.fd.close()
test_callback = MySynthesizeCallback()
- self.text_to_speech.synthesize_using_websocket('She sells seashells by the seashore',
- test_callback,
- accept='audio/wav',
- voice='en-GB_KateVoice'
- )
+ self.text_to_speech.synthesize_using_websocket(
+ 'She sells seashells by the seashore',
+ test_callback,
+ accept='audio/wav',
+ voice='en-GB_KateVoice')
+ assert test_callback.error is None
+ assert test_callback.fd is not None
+ assert os.stat(file).st_size > 0
+ os.remove(file)
+
+ # This is test will only be meaningful so long as en-AU_CraigVoice is a Neural type voice model
+ # Check this url for all Neutral type voice models: https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-voices#languageVoices
+ def test_synthesize_using_websocket_neural(self):
+ file = 'tongue_twister.wav'
+
+ class MySynthesizeCallback(SynthesizeCallback):
+
+ def __init__(self):
+ SynthesizeCallback.__init__(self)
+ self.fd = None
+ self.error = None
+
+ def on_connected(self):
+ self.fd = open(file, 'ab')
+
+ def on_error(self, error):
+ self.error = error
+
+ def on_audio_stream(self, audio_stream):
+ self.fd.write(audio_stream)
+
+ def on_close(self):
+ self.fd.close()
+
+ test_callback = MySynthesizeCallback()
+ self.text_to_speech.synthesize_using_websocket(
+ 'She sells seashells by the seashore',
+ test_callback,
+ accept='audio/wav',
+ voice='en-GB_JamesV3Voice')
assert test_callback.error is None
assert test_callback.fd is not None
assert os.stat(file).st_size > 0
diff --git a/test/integration/test_visual_recognition.py b/test/integration/test_visual_recognition.py
deleted file mode 100644
index 152ec9d24..000000000
--- a/test/integration/test_visual_recognition.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# coding: utf-8
-import pytest
-import ibm_watson
-import os
-from os.path import abspath
-from unittest import TestCase
-
-@pytest.mark.skipif(
- os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES')
-class IntegrationTestVisualRecognitionV3(TestCase):
- visual_recognition = None
- classifier_id = None
-
- @classmethod
- def setup_class(cls):
- cls.visual_recognition = ibm_watson.VisualRecognitionV3('2018-03-19')
- cls.visual_recognition.set_default_headers({
- 'X-Watson-Learning-Opt-Out':
- '1',
- 'X-Watson-Test':
- '1'
- })
- cls.classifier_id = 'sdkxtestxclassifierxdoxnotxdel_1089651138'
-
- def test_classify(self):
- dog_path = abspath('resources/dog.jpg')
- with open(dog_path, 'rb') as image_file:
- dog_results = self.visual_recognition.classify(
- images_file=image_file,
- threshold='0.1',
- classifier_ids=['default']).get_result()
- assert dog_results is not None
-
- def test_detect_faces(self):
- output = self.visual_recognition.detect_faces(
- url='https://www.ibm.com/ibm/ginni/images/ginni_bio_780x981_v4_03162016.jpg').get_result()
- assert output is not None
-
- @pytest.mark.skip(reason="Time consuming")
- def test_custom_classifier(self):
- with open(abspath('resources/cars.zip'), 'rb') as cars, \
- open(abspath('resources/trucks.zip'), 'rb') as trucks:
- classifier = self.visual_recognition.create_classifier(
- 'CarsVsTrucks',
- positive_examples={'cars': cars},
- negative_examples=trucks,
- ).get_result()
-
- assert classifier is not None
-
- classifier_id = classifier['classifier_id']
- output = self.visual_recognition.get_classifier(classifier_id).get_result()
- assert output is not None
-
- output = self.visual_recognition.delete_classifier(classifier_id).get_result()
-
- def test_core_ml_model(self):
- core_ml_model = self.visual_recognition.get_core_ml_model(self.classifier_id).get_result()
- assert core_ml_model.ok
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
index 161119efe..949039f3b 100644
--- a/test/unit/__init__.py
+++ b/test/unit/__init__.py
@@ -1,5 +1,4 @@
# coding: utf-8
-from __future__ import print_function
from dotenv import load_dotenv, find_dotenv
# load the .env file containing your environment variables for the required
diff --git a/test/unit/test_assistant_v1.py b/test/unit/test_assistant_v1.py
index 757349940..82781955f 100644
--- a/test/unit/test_assistant_v1.py
+++ b/test/unit/test_assistant_v1.py
@@ -1,1520 +1,13086 @@
-# coding: utf-8
+# -*- coding: utf-8 -*-
+# (C) Copyright IBM Corp. 2019, 2024.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit Tests for AssistantV1
+"""
+
+from datetime import datetime, timezone
+from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
+from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime
+import inspect
import json
-import datetime
-from dateutil.tz import tzutc
+import pytest
+import re
+import requests
import responses
-import ibm_watson
-from ibm_watson import ApiException
-from ibm_watson.assistant_v1 import Context, Counterexample, \
- CounterexampleCollection, Entity, EntityCollection, Example, \
- ExampleCollection, MessageInput, Intent, IntentCollection, Synonym, \
- SynonymCollection, Value, ValueCollection, Workspace, WorkspaceCollection
-
-platform_url = 'https://gateway.watsonplatform.net'
-service_path = '/assistant/api'
-base_url = '{0}{1}'.format(platform_url, service_path)
-
-#########################
-# counterexamples
-#########################
-
-
-@responses.activate
-def test_create_counterexample():
- endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "text": "I want financial advice today.",
- "created": "2016-07-11T16:39:01.774Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=201,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- counterexample = service.create_counterexample(
- workspace_id='boguswid', text='I want financial advice today.').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert counterexample == response
- # Verify that response can be converted to a Counterexample
- Counterexample._from_dict(counterexample)
-
-@responses.activate
-def test_rate_limit_exceeded():
- endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- error_code = 429
- error_msg = 'Rate limit exceeded'
- responses.add(
- responses.POST,
- url,
- body='Rate limit exceeded',
- status=429,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- try:
- service.create_counterexample(
- workspace_id='boguswid', text='I want financial advice today.')
- except ApiException as ex:
- assert len(responses.calls) == 1
- assert isinstance(ex, ApiException)
- assert error_code == ex.code
- assert error_msg in str(ex)
-
-@responses.activate
-def test_unknown_error():
- endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- error_msg = 'Unknown error'
- responses.add(
- responses.POST,
- url,
- status=407,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- try:
- service.create_counterexample(
- workspace_id='boguswid', text='I want financial advice today.')
- except ApiException as ex:
- assert len(responses.calls) == 1
- assert error_msg in str(ex)
-
-@responses.activate
-def test_delete_counterexample():
- endpoint = '/v1/workspaces/{0}/counterexamples/{1}'.format(
- 'boguswid', 'I%20want%20financial%20advice%20today')
- url = '{0}{1}'.format(base_url, endpoint)
- response = None
- responses.add(
- responses.DELETE,
- url,
- body=response,
- status=204,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- counterexample = service.delete_counterexample(
- workspace_id='boguswid', text='I want financial advice today').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert counterexample is None
-
-
-@responses.activate
-def test_get_counterexample():
- endpoint = '/v1/workspaces/{0}/counterexamples/{1}'.format(
- 'boguswid', 'What%20are%20you%20wearing%3F')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "text": "What are you wearing?",
- "created": "2016-07-11T23:53:59.153Z",
- "updated": "2016-12-07T18:53:59.153Z"
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- counterexample = service.get_counterexample(
- workspace_id='boguswid', text='What are you wearing?').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert counterexample == response
- # Verify that response can be converted to a Counterexample
- Counterexample._from_dict(counterexample)
-
-@responses.activate
-def test_list_counterexamples():
- endpoint = '/v1/workspaces/{0}/counterexamples'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "counterexamples": [{
- "text": "I want financial advice today.",
- "created": "2016-07-11T16:39:01.774Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }, {
- "text": "What are you wearing today",
- "created": "2016-07-11T16:39:01.774Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }],
- "pagination": {
- "refresh_url":
- "/v1/workspaces/pizza_app-e0f3/counterexamples?version=2017-12-18&page_limit=2",
- "next_url":
- "/v1/workspaces/pizza_app-e0f3/counterexamples?cursor=base64=&version=2017-12-18&page_limit=2"
- }
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- counterexamples = service.list_counterexamples(workspace_id='boguswid').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert counterexamples == response
- # Verify that response can be converted to a CounterexampleCollection
- CounterexampleCollection._from_dict(counterexamples)
-
-@responses.activate
-def test_update_counterexample():
- endpoint = '/v1/workspaces/{0}/counterexamples/{1}'.format(
- 'boguswid', 'What%20are%20you%20wearing%3F')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "text": "What are you wearing?",
- "created": "2016-07-11T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- counterexample = service.update_counterexample(
- workspace_id='boguswid',
- text='What are you wearing?',
- new_text='What are you wearing?').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert counterexample == response
- # Verify that response can be converted to a Counterexample
- Counterexample._from_dict(counterexample)
-
-#########################
-# entities
-#########################
-
-
-@responses.activate
-def test_create_entity():
- endpoint = '/v1/workspaces/{0}/entities'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "entity": "pizza_toppings",
- "description": "Tasty pizza toppings",
- "created": "2015-12-06T04:32:20.000Z",
- "updated": "2015-12-07T18:53:59.153Z",
- "metadata": {
- "property": "value"
+import urllib
+from ibm_watson.assistant_v1 import *
+
+version = 'testString'
+
+_service = AssistantV1(
+ authenticator=NoAuthAuthenticator(),
+ version=version,
+)
+
+_base_url = 'https://api.us-south.assistant.watson.cloud.ibm.com'
+_service.set_service_url(_base_url)
+
+
+def preprocess_url(operation_path: str):
+ """
+ Returns the request url associated with the specified operation path.
+ This will be base_url concatenated with a quoted version of operation_path.
+ The returned request URL is used to register the mock response so it needs
+ to match the request URL that is formed by the requests library.
+ """
+
+ # Form the request URL from the base URL and operation path.
+ request_url = _base_url + operation_path
+
+ # If the request url does NOT end with a /, then just return it as-is.
+ # Otherwise, return a regular expression that matches one or more trailing /.
+ if not request_url.endswith('/'):
+ return request_url
+ return re.compile(request_url.rstrip('/') + '/+')
+
+
+##############################################################################
+# Start of Service: Message
+##############################################################################
+# region
+
+
+class TestMessage:
+ """
+ Test Class for message
+ """
+
+ @responses.activate
+ def test_message_all_params(self):
+ """
+ message()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/message')
+ mock_response = '{"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a MessageInput model
+ message_input_model = {}
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ # Construct a dict representation of a RuntimeIntent model
+ runtime_intent_model = {}
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ # Construct a dict representation of a CaptureGroup model
+ capture_group_model = {}
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ # Construct a dict representation of a RuntimeEntityInterpretation model
+ runtime_entity_interpretation_model = {}
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ # Construct a dict representation of a RuntimeEntityAlternative model
+ runtime_entity_alternative_model = {}
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ # Construct a dict representation of a RuntimeEntityRole model
+ runtime_entity_role_model = {}
+ runtime_entity_role_model['type'] = 'date_from'
+
+ # Construct a dict representation of a RuntimeEntity model
+ runtime_entity_model = {}
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ # Construct a dict representation of a MessageContextMetadata model
+ message_context_metadata_model = {}
+ message_context_metadata_model['deployment'] = 'testString'
+ message_context_metadata_model['user_id'] = 'testString'
+
+ # Construct a dict representation of a Context model
+ context_model = {}
+ context_model['conversation_id'] = 'testString'
+ context_model['system'] = {'anyKey': 'anyValue'}
+ context_model['metadata'] = message_context_metadata_model
+ context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeVisitedDetails model
+ dialog_node_visited_details_model = {}
+ dialog_node_visited_details_model['dialog_node'] = 'testString'
+ dialog_node_visited_details_model['title'] = 'testString'
+ dialog_node_visited_details_model['conditions'] = 'testString'
+
+ # Construct a dict representation of a LogMessageSource model
+ log_message_source_model = {}
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ # Construct a dict representation of a LogMessage model
+ log_message_model = {}
+ log_message_model['level'] = 'info'
+ log_message_model['msg'] = 'testString'
+ log_message_model['code'] = 'testString'
+ log_message_model['source'] = log_message_source_model
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a RuntimeResponseGenericRuntimeResponseTypeText model
+ runtime_response_generic_model = {}
+ runtime_response_generic_model['response_type'] = 'text'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a OutputData model
+ output_data_model = {}
+ output_data_model['nodes_visited'] = ['testString']
+ output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model]
+ output_data_model['log_messages'] = [log_message_model]
+ output_data_model['generic'] = [runtime_response_generic_model]
+ output_data_model['foo'] = 'testString'
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ input = message_input_model
+ intents = [runtime_intent_model]
+ entities = [runtime_entity_model]
+ alternate_intents = False
+ context = context_model
+ output = output_data_model
+ user_id = 'testString'
+ nodes_visited_details = False
+
+ # Invoke method
+ response = _service.message(
+ workspace_id,
+ input=input,
+ intents=intents,
+ entities=entities,
+ alternate_intents=alternate_intents,
+ context=context,
+ output=output,
+ user_id=user_id,
+ nodes_visited_details=nodes_visited_details,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'nodes_visited_details={}'.format('true' if nodes_visited_details else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['input'] == message_input_model
+ assert req_body['intents'] == [runtime_intent_model]
+ assert req_body['entities'] == [runtime_entity_model]
+ assert req_body['alternate_intents'] == False
+ assert req_body['context'] == context_model
+ assert req_body['output'] == output_data_model
+ assert req_body['user_id'] == 'testString'
+
+ def test_message_all_params_with_retries(self):
+ # Enable retries and run test_message_all_params.
+ _service.enable_retries()
+ self.test_message_all_params()
+
+ # Disable retries and run test_message_all_params.
+ _service.disable_retries()
+ self.test_message_all_params()
+
+ @responses.activate
+ def test_message_required_params(self):
+ """
+ test_message_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/message')
+ mock_response = '{"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.message(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_message_required_params_with_retries(self):
+ # Enable retries and run test_message_required_params.
+ _service.enable_retries()
+ self.test_message_required_params()
+
+ # Disable retries and run test_message_required_params.
+ _service.disable_retries()
+ self.test_message_required_params()
+
+ @responses.activate
+ def test_message_value_error(self):
+ """
+ test_message_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/message')
+ mock_response = '{"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
}
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=201,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- entity = service.create_entity(
- workspace_id='boguswid',
- entity='pizza_toppings',
- description='Tasty pizza toppings',
- metadata={"property": "value"},
- values=None,
- fuzzy_match=None).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert entity == response
- # Verify that response can be converted to an Entity
- Entity._from_dict(entity)
-
-@responses.activate
-def test_delete_entity():
- endpoint = '/v1/workspaces/{0}/entities/{1}'.format('boguswid', 'pizza_toppings')
- url = '{0}{1}'.format(base_url, endpoint)
- response = ""
- responses.add(
- responses.DELETE,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- entity = service.delete_entity(workspace_id='boguswid', entity='pizza_toppings').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert entity == ""
-
-
-@responses.activate
-def test_get_entity():
- endpoint = '/v1/workspaces/{0}/entities/{1}'.format('boguswid', 'pizza_toppings')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "entity": "pizza_toppings",
- "description": "Tasty pizza toppings",
- "created": "2015-12-06T04:32:20.000Z",
- "updated": "2015-12-07T18:53:59.153Z",
- "metadata": {
- "property": "value"
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.message(**req_copy)
+
+ def test_message_value_error_with_retries(self):
+ # Enable retries and run test_message_value_error.
+ _service.enable_retries()
+ self.test_message_value_error()
+
+ # Disable retries and run test_message_value_error.
+ _service.disable_retries()
+ self.test_message_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Message
+##############################################################################
+
+##############################################################################
+# Start of Service: BulkClassify
+##############################################################################
+# region
+
+
+class TestBulkClassify:
+ """
+ Test Class for bulk_classify
+ """
+
+ @responses.activate
+ def test_bulk_classify_all_params(self):
+ """
+ bulk_classify()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/bulk_classify')
+ mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a BulkClassifyUtterance model
+ bulk_classify_utterance_model = {}
+ bulk_classify_utterance_model['text'] = 'testString'
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ input = [bulk_classify_utterance_model]
+
+ # Invoke method
+ response = _service.bulk_classify(
+ workspace_id,
+ input=input,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['input'] == [bulk_classify_utterance_model]
+
+ def test_bulk_classify_all_params_with_retries(self):
+ # Enable retries and run test_bulk_classify_all_params.
+ _service.enable_retries()
+ self.test_bulk_classify_all_params()
+
+ # Disable retries and run test_bulk_classify_all_params.
+ _service.disable_retries()
+ self.test_bulk_classify_all_params()
+
+ @responses.activate
+ def test_bulk_classify_required_params(self):
+ """
+ test_bulk_classify_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/bulk_classify')
+ mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.bulk_classify(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_bulk_classify_required_params_with_retries(self):
+ # Enable retries and run test_bulk_classify_required_params.
+ _service.enable_retries()
+ self.test_bulk_classify_required_params()
+
+ # Disable retries and run test_bulk_classify_required_params.
+ _service.disable_retries()
+ self.test_bulk_classify_required_params()
+
+ @responses.activate
+ def test_bulk_classify_value_error(self):
+ """
+ test_bulk_classify_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/bulk_classify')
+ mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "intents": [{"intent": "intent", "confidence": 10}]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
}
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- entity = service.get_entity(workspace_id='boguswid', entity='pizza_toppings', export=True).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert entity == response
- # Verify that response can be converted to an Entity
- Entity._from_dict(entity)
-
-
-@responses.activate
-def test_list_entities():
- endpoint = '/v1/workspaces/{0}/entities'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "entities": [{
- "entity": "pizza_toppings",
- "description": "Tasty pizza toppings",
- "created": "2015-12-06T04:32:20.000Z",
- "updated": "2015-12-07T18:53:59.153Z",
- "metadata": {
- "property": "value"
- }
- }],
- "pagination": {
- "refresh_url":
- "/v1/workspaces/pizza_app-e0f3/entities?version=2017-12-18&filter=name:pizza&include_count=true&page_limit=1",
- "next_url":
- "/v1/workspaces/pizza_app-e0f3/entities?cursor=base64=&version=2017-12-18&filter=name:pizza&page_limit=1",
- "total":
- 1,
- "matched":
- 1
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.bulk_classify(**req_copy)
+
+ def test_bulk_classify_value_error_with_retries(self):
+ # Enable retries and run test_bulk_classify_value_error.
+ _service.enable_retries()
+ self.test_bulk_classify_value_error()
+
+ # Disable retries and run test_bulk_classify_value_error.
+ _service.disable_retries()
+ self.test_bulk_classify_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: BulkClassify
+##############################################################################
+
+##############################################################################
+# Start of Service: Workspaces
+##############################################################################
+# region
+
+
+class TestListWorkspaces:
+ """
+ Test Class for list_workspaces
+ """
+
+ @responses.activate
+ def test_list_workspaces_all_params(self):
+ """
+ list_workspaces()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces')
+ mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ page_limit = 100
+ include_count = False
+ sort = 'name'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_workspaces(
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_workspaces_all_params_with_retries(self):
+ # Enable retries and run test_list_workspaces_all_params.
+ _service.enable_retries()
+ self.test_list_workspaces_all_params()
+
+ # Disable retries and run test_list_workspaces_all_params.
+ _service.disable_retries()
+ self.test_list_workspaces_all_params()
+
+ @responses.activate
+ def test_list_workspaces_required_params(self):
+ """
+ test_list_workspaces_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces')
+ mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_workspaces()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_workspaces_required_params_with_retries(self):
+ # Enable retries and run test_list_workspaces_required_params.
+ _service.enable_retries()
+ self.test_list_workspaces_required_params()
+
+ # Disable retries and run test_list_workspaces_required_params.
+ _service.disable_retries()
+ self.test_list_workspaces_required_params()
+
+ @responses.activate
+ def test_list_workspaces_value_error(self):
+ """
+ test_list_workspaces_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces')
+ mock_response = '{"workspaces": [{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
}
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- entities = service.list_entities(
- workspace_id='boguswid',
- export=True).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert entities == response
- # Verify that response can be converted to an EntityCollection
- EntityCollection._from_dict(entities)
-
-
-@responses.activate
-def test_update_entity():
- endpoint = '/v1/workspaces/{0}/entities/{1}'.format('boguswid', 'pizza_toppings')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "entity": "pizza_toppings",
- "description": "Tasty pizza toppings",
- "created": "2015-12-06T04:32:20.000Z",
- "updated": "2015-12-07T18:53:59.153Z",
- "metadata": {
- "property": "value"
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_workspaces(**req_copy)
+
+ def test_list_workspaces_value_error_with_retries(self):
+ # Enable retries and run test_list_workspaces_value_error.
+ _service.enable_retries()
+ self.test_list_workspaces_value_error()
+
+ # Disable retries and run test_list_workspaces_value_error.
+ _service.disable_retries()
+ self.test_list_workspaces_value_error()
+
+
+class TestCreateWorkspace:
+ """
+ Test Class for create_workspace
+ """
+
+ @responses.activate
+ def test_create_workspace_all_params(self):
+ """
+ create_workspace()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Construct a dict representation of a DialogNode model
+ dialog_node_model = {}
+ dialog_node_model['dialog_node'] = 'testString'
+ dialog_node_model['description'] = 'testString'
+ dialog_node_model['conditions'] = 'testString'
+ dialog_node_model['parent'] = 'testString'
+ dialog_node_model['previous_sibling'] = 'testString'
+ dialog_node_model['output'] = dialog_node_output_model
+ dialog_node_model['context'] = dialog_node_context_model
+ dialog_node_model['metadata'] = {'anyKey': 'anyValue'}
+ dialog_node_model['next_step'] = dialog_node_next_step_model
+ dialog_node_model['title'] = 'testString'
+ dialog_node_model['type'] = 'standard'
+ dialog_node_model['event_name'] = 'focus'
+ dialog_node_model['variable'] = 'testString'
+ dialog_node_model['actions'] = [dialog_node_action_model]
+ dialog_node_model['digress_in'] = 'not_available'
+ dialog_node_model['digress_out'] = 'allow_returning'
+ dialog_node_model['digress_out_slots'] = 'not_allowed'
+ dialog_node_model['user_label'] = 'testString'
+ dialog_node_model['disambiguation_opt_out'] = False
+
+ # Construct a dict representation of a Counterexample model
+ counterexample_model = {}
+ counterexample_model['text'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettingsTooling model
+ workspace_system_settings_tooling_model = {}
+ workspace_system_settings_tooling_model['store_generic_responses'] = True
+
+ # Construct a dict representation of a WorkspaceSystemSettingsDisambiguation model
+ workspace_system_settings_disambiguation_model = {}
+ workspace_system_settings_disambiguation_model['prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['enabled'] = False
+ workspace_system_settings_disambiguation_model['sensitivity'] = 'auto'
+ workspace_system_settings_disambiguation_model['randomize'] = True
+ workspace_system_settings_disambiguation_model['max_suggestions'] = 1
+ workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model
+ workspace_system_settings_system_entities_model = {}
+ workspace_system_settings_system_entities_model['enabled'] = False
+
+ # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model
+ workspace_system_settings_off_topic_model = {}
+ workspace_system_settings_off_topic_model['enabled'] = False
+
+ # Construct a dict representation of a WorkspaceSystemSettingsNlp model
+ workspace_system_settings_nlp_model = {}
+ workspace_system_settings_nlp_model['model'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettings model
+ workspace_system_settings_model = {}
+ workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model
+ workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model
+ workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'}
+ workspace_system_settings_model['spelling_suggestions'] = False
+ workspace_system_settings_model['spelling_auto_correct'] = False
+ workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model
+ workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model
+ workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model
+ workspace_system_settings_model['foo'] = 'testString'
+
+ # Construct a dict representation of a WebhookHeader model
+ webhook_header_model = {}
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ # Construct a dict representation of a Webhook model
+ webhook_model = {}
+ webhook_model['url'] = 'testString'
+ webhook_model['name'] = 'testString'
+ webhook_model['headers'] = [webhook_header_model]
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Construct a dict representation of a CreateIntent model
+ create_intent_model = {}
+ create_intent_model['intent'] = 'testString'
+ create_intent_model['description'] = 'testString'
+ create_intent_model['examples'] = [example_model]
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Construct a dict representation of a CreateEntity model
+ create_entity_model = {}
+ create_entity_model['entity'] = 'testString'
+ create_entity_model['description'] = 'testString'
+ create_entity_model['metadata'] = {'anyKey': 'anyValue'}
+ create_entity_model['fuzzy_match'] = True
+ create_entity_model['values'] = [create_value_model]
+
+ # Set up parameter values
+ name = 'testString'
+ description = 'testString'
+ language = 'testString'
+ dialog_nodes = [dialog_node_model]
+ counterexamples = [counterexample_model]
+ metadata = {'anyKey': 'anyValue'}
+ learning_opt_out = False
+ system_settings = workspace_system_settings_model
+ webhooks = [webhook_model]
+ intents = [create_intent_model]
+ entities = [create_entity_model]
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_workspace(
+ name=name,
+ description=description,
+ language=language,
+ dialog_nodes=dialog_nodes,
+ counterexamples=counterexamples,
+ metadata=metadata,
+ learning_opt_out=learning_opt_out,
+ system_settings=system_settings,
+ webhooks=webhooks,
+ intents=intents,
+ entities=entities,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['language'] == 'testString'
+ assert req_body['dialog_nodes'] == [dialog_node_model]
+ assert req_body['counterexamples'] == [counterexample_model]
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['learning_opt_out'] == False
+ assert req_body['system_settings'] == workspace_system_settings_model
+ assert req_body['webhooks'] == [webhook_model]
+ assert req_body['intents'] == [create_intent_model]
+ assert req_body['entities'] == [create_entity_model]
+
+ def test_create_workspace_all_params_with_retries(self):
+ # Enable retries and run test_create_workspace_all_params.
+ _service.enable_retries()
+ self.test_create_workspace_all_params()
+
+ # Disable retries and run test_create_workspace_all_params.
+ _service.disable_retries()
+ self.test_create_workspace_all_params()
+
+ @responses.activate
+ def test_create_workspace_required_params(self):
+ """
+ test_create_workspace_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Invoke method
+ response = _service.create_workspace()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_workspace_required_params_with_retries(self):
+ # Enable retries and run test_create_workspace_required_params.
+ _service.enable_retries()
+ self.test_create_workspace_required_params()
+
+ # Disable retries and run test_create_workspace_required_params.
+ _service.disable_retries()
+ self.test_create_workspace_required_params()
+
+ @responses.activate
+ def test_create_workspace_value_error(self):
+ """
+ test_create_workspace_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
}
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- entity = service.update_entity(
- workspace_id='boguswid',
- entity='pizza_toppings',
- new_entity='pizza_toppings').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert entity == response
- # Verify that response can be converted to an Entity
- Entity._from_dict(entity)
-
-
-#########################
-# examples
-#########################
-
-
-@responses.activate
-def test_create_example():
- endpoint = '/v1/workspaces/{0}/intents/{1}/examples'.format(
- 'boguswid', 'pizza_order')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "text": "Gimme a pizza with pepperoni",
- "created": "2016-07-11T16:39:01.774Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=201,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- example = service.create_example(
- workspace_id='boguswid',
- intent='pizza_order',
- text='Gimme a pizza with pepperoni',
- mentions=[{'entity': 'xxx', 'location': [0, 1]}]).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert example == response
- # Verify that response can be converted to an Example
- Example._from_dict(example)
-
-
-@responses.activate
-def test_delete_example():
- endpoint = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format(
- 'boguswid', 'pizza_order', 'Gimme%20a%20pizza%20with%20pepperoni')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {}
- responses.add(
- responses.DELETE,
- url,
- body=json.dumps(response),
- status=204,
- content_type='')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- example = service.delete_example(
- workspace_id='boguswid',
- intent='pizza_order',
- text='Gimme a pizza with pepperoni').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert example is None
-
-
-@responses.activate
-def test_get_example():
- endpoint = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format(
- 'boguswid', 'pizza_order', 'Gimme%20a%20pizza%20with%20pepperoni')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "text": "Gimme a pizza with pepperoni",
- "created": "2016-07-11T23:53:59.153Z",
- "updated": "2016-12-07T18:53:59.153Z"
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- example = service.get_example(
- workspace_id='boguswid',
- intent='pizza_order',
- text='Gimme a pizza with pepperoni').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert example == response
- # Verify that response can be converted to an Example
- Example._from_dict(example)
-
-
-@responses.activate
-def test_list_examples():
- endpoint = '/v1/workspaces/{0}/intents/{1}/examples'.format(
- 'boguswid', 'pizza_order')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "examples": [{
- "text": "Can I order a pizza?",
- "created": "2016-07-11T16:39:01.774Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }, {
- "text": "Gimme a pizza with pepperoni",
- "created": "2016-07-11T16:39:01.774Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }],
- "pagination": {
- "refresh_url":
- "/v1/workspaces/pizza_app-e0f3/intents/order/examples?version=2017-12-18&page_limit=2",
- "next_url":
- "/v1/workspaces/pizza_app-e0f3/intents/order/examples?cursor=base64=&version=2017-12-18&page_limit=2"
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_workspace(**req_copy)
+
+ def test_create_workspace_value_error_with_retries(self):
+ # Enable retries and run test_create_workspace_value_error.
+ _service.enable_retries()
+ self.test_create_workspace_value_error()
+
+ # Disable retries and run test_create_workspace_value_error.
+ _service.disable_retries()
+ self.test_create_workspace_value_error()
+
+
+class TestGetWorkspace:
+ """
+ Test Class for get_workspace
+ """
+
+ @responses.activate
+ def test_get_workspace_all_params(self):
+ """
+ get_workspace()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ export = False
+ include_audit = False
+ sort = 'stable'
+
+ # Invoke method
+ response = _service.get_workspace(
+ workspace_id,
+ export=export,
+ include_audit=include_audit,
+ sort=sort,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'export={}'.format('true' if export else 'false') in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+
+ def test_get_workspace_all_params_with_retries(self):
+ # Enable retries and run test_get_workspace_all_params.
+ _service.enable_retries()
+ self.test_get_workspace_all_params()
+
+ # Disable retries and run test_get_workspace_all_params.
+ _service.disable_retries()
+ self.test_get_workspace_all_params()
+
+ @responses.activate
+ def test_get_workspace_required_params(self):
+ """
+ test_get_workspace_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.get_workspace(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_workspace_required_params_with_retries(self):
+ # Enable retries and run test_get_workspace_required_params.
+ _service.enable_retries()
+ self.test_get_workspace_required_params()
+
+ # Disable retries and run test_get_workspace_required_params.
+ _service.disable_retries()
+ self.test_get_workspace_required_params()
+
+ @responses.activate
+ def test_get_workspace_value_error(self):
+ """
+ test_get_workspace_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
}
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- examples = service.list_examples(
- workspace_id='boguswid', intent='pizza_order').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert examples == response
- # Verify that response can be converted to an ExampleCollection
- ExampleCollection._from_dict(examples)
-
-
-@responses.activate
-def test_update_example():
- endpoint = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format(
- 'boguswid', 'pizza_order', 'Gimme%20a%20pizza%20with%20pepperoni')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "text": "Gimme a pizza with pepperoni",
- "created": "2016-07-11T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- example = service.update_example(
- workspace_id='boguswid',
- intent='pizza_order',
- text='Gimme a pizza with pepperoni',
- new_text='Gimme a pizza with pepperoni',
- new_mentions=[{'entity': 'xxx', 'location': [0, 1]}]).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert example == response
- # Verify that response can be converted to an Example
- Example._from_dict(example)
-
-
-#########################
-# intents
-#########################
-
-
-@responses.activate
-def test_create_intent():
- endpoint = '/v1/workspaces/{0}/intents'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "intent": "pizza_order",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z",
- "description": "User wants to start a new pizza order"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=201,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- intent = service.create_intent(
- workspace_id='boguswid',
- intent='pizza_order',
- description='User wants to start a new pizza order').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert intent == response
- # Verify that response can be converted to an Intent
- Intent._from_dict(intent)
-
-
-@responses.activate
-def test_delete_intent():
- endpoint = '/v1/workspaces/{0}/intents/{1}'.format('boguswid',
- 'pizza_order')
- url = '{0}{1}'.format(base_url, endpoint)
- response = None
- responses.add(
- responses.DELETE,
- url,
- body=json.dumps(response),
- status=204,
- content_type='')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- intent = service.delete_intent(
- workspace_id='boguswid', intent='pizza_order').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert intent is None
-
-
-@responses.activate
-def test_get_intent():
- endpoint = '/v1/workspaces/{0}/intents/{1}'.format('boguswid',
- 'pizza_order')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "intent": "pizza_order",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z",
- "description": "User wants to start a new pizza order"
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- intent = service.get_intent(
- workspace_id='boguswid', intent='pizza_order', export=False).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert intent == response
- # Verify that response can be converted to an Intent
- Intent._from_dict(intent)
-
-@responses.activate
-def test_list_intents():
- endpoint = '/v1/workspaces/{0}/intents'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "intents": [{
- "intent": "pizza_order",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z",
- "description": "User wants to start a new pizza order"
- }],
- "pagination": {
- "refresh_url":
- "/v1/workspaces/pizza_app-e0f3/intents?version=2017-12-18&page_limit=1",
- "next_url":
- "/v1/workspaces/pizza_app-e0f3/intents?cursor=base64=&version=2017-12-18&page_limit=1"
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_workspace(**req_copy)
+
+ def test_get_workspace_value_error_with_retries(self):
+ # Enable retries and run test_get_workspace_value_error.
+ _service.enable_retries()
+ self.test_get_workspace_value_error()
+
+ # Disable retries and run test_get_workspace_value_error.
+ _service.disable_retries()
+ self.test_get_workspace_value_error()
+
+
+class TestUpdateWorkspace:
+ """
+ Test Class for update_workspace
+ """
+
+ @responses.activate
+ def test_update_workspace_all_params(self):
+ """
+ update_workspace()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Construct a dict representation of a DialogNode model
+ dialog_node_model = {}
+ dialog_node_model['dialog_node'] = 'testString'
+ dialog_node_model['description'] = 'testString'
+ dialog_node_model['conditions'] = 'testString'
+ dialog_node_model['parent'] = 'testString'
+ dialog_node_model['previous_sibling'] = 'testString'
+ dialog_node_model['output'] = dialog_node_output_model
+ dialog_node_model['context'] = dialog_node_context_model
+ dialog_node_model['metadata'] = {'anyKey': 'anyValue'}
+ dialog_node_model['next_step'] = dialog_node_next_step_model
+ dialog_node_model['title'] = 'testString'
+ dialog_node_model['type'] = 'standard'
+ dialog_node_model['event_name'] = 'focus'
+ dialog_node_model['variable'] = 'testString'
+ dialog_node_model['actions'] = [dialog_node_action_model]
+ dialog_node_model['digress_in'] = 'not_available'
+ dialog_node_model['digress_out'] = 'allow_returning'
+ dialog_node_model['digress_out_slots'] = 'not_allowed'
+ dialog_node_model['user_label'] = 'testString'
+ dialog_node_model['disambiguation_opt_out'] = False
+
+ # Construct a dict representation of a Counterexample model
+ counterexample_model = {}
+ counterexample_model['text'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettingsTooling model
+ workspace_system_settings_tooling_model = {}
+ workspace_system_settings_tooling_model['store_generic_responses'] = True
+
+ # Construct a dict representation of a WorkspaceSystemSettingsDisambiguation model
+ workspace_system_settings_disambiguation_model = {}
+ workspace_system_settings_disambiguation_model['prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['enabled'] = False
+ workspace_system_settings_disambiguation_model['sensitivity'] = 'auto'
+ workspace_system_settings_disambiguation_model['randomize'] = True
+ workspace_system_settings_disambiguation_model['max_suggestions'] = 1
+ workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model
+ workspace_system_settings_system_entities_model = {}
+ workspace_system_settings_system_entities_model['enabled'] = False
+
+ # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model
+ workspace_system_settings_off_topic_model = {}
+ workspace_system_settings_off_topic_model['enabled'] = False
+
+ # Construct a dict representation of a WorkspaceSystemSettingsNlp model
+ workspace_system_settings_nlp_model = {}
+ workspace_system_settings_nlp_model['model'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettings model
+ workspace_system_settings_model = {}
+ workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model
+ workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model
+ workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'}
+ workspace_system_settings_model['spelling_suggestions'] = False
+ workspace_system_settings_model['spelling_auto_correct'] = False
+ workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model
+ workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model
+ workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model
+ workspace_system_settings_model['foo'] = 'testString'
+
+ # Construct a dict representation of a WebhookHeader model
+ webhook_header_model = {}
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ # Construct a dict representation of a Webhook model
+ webhook_model = {}
+ webhook_model['url'] = 'testString'
+ webhook_model['name'] = 'testString'
+ webhook_model['headers'] = [webhook_header_model]
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Construct a dict representation of a CreateIntent model
+ create_intent_model = {}
+ create_intent_model['intent'] = 'testString'
+ create_intent_model['description'] = 'testString'
+ create_intent_model['examples'] = [example_model]
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Construct a dict representation of a CreateEntity model
+ create_entity_model = {}
+ create_entity_model['entity'] = 'testString'
+ create_entity_model['description'] = 'testString'
+ create_entity_model['metadata'] = {'anyKey': 'anyValue'}
+ create_entity_model['fuzzy_match'] = True
+ create_entity_model['values'] = [create_value_model]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ language = 'testString'
+ dialog_nodes = [dialog_node_model]
+ counterexamples = [counterexample_model]
+ metadata = {'anyKey': 'anyValue'}
+ learning_opt_out = False
+ system_settings = workspace_system_settings_model
+ webhooks = [webhook_model]
+ intents = [create_intent_model]
+ entities = [create_entity_model]
+ append = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.update_workspace(
+ workspace_id,
+ name=name,
+ description=description,
+ language=language,
+ dialog_nodes=dialog_nodes,
+ counterexamples=counterexamples,
+ metadata=metadata,
+ learning_opt_out=learning_opt_out,
+ system_settings=system_settings,
+ webhooks=webhooks,
+ intents=intents,
+ entities=entities,
+ append=append,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'append={}'.format('true' if append else 'false') in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['language'] == 'testString'
+ assert req_body['dialog_nodes'] == [dialog_node_model]
+ assert req_body['counterexamples'] == [counterexample_model]
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['learning_opt_out'] == False
+ assert req_body['system_settings'] == workspace_system_settings_model
+ assert req_body['webhooks'] == [webhook_model]
+ assert req_body['intents'] == [create_intent_model]
+ assert req_body['entities'] == [create_entity_model]
+
+ def test_update_workspace_all_params_with_retries(self):
+ # Enable retries and run test_update_workspace_all_params.
+ _service.enable_retries()
+ self.test_update_workspace_all_params()
+
+ # Disable retries and run test_update_workspace_all_params.
+ _service.disable_retries()
+ self.test_update_workspace_all_params()
+
+ @responses.activate
+ def test_update_workspace_required_params(self):
+ """
+ test_update_workspace_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.update_workspace(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_update_workspace_required_params_with_retries(self):
+ # Enable retries and run test_update_workspace_required_params.
+ _service.enable_retries()
+ self.test_update_workspace_required_params()
+
+ # Disable retries and run test_update_workspace_required_params.
+ _service.disable_retries()
+ self.test_update_workspace_required_params()
+
+ @responses.activate
+ def test_update_workspace_value_error(self):
+ """
+ test_update_workspace_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
}
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- intents = service.list_intents(workspace_id='boguswid', export=False).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert intents == response
- # Verify that response can be converted to an IntentCollection
- IntentCollection._from_dict(intents)
-
-@responses.activate
-def test_update_intent():
- endpoint = '/v1/workspaces/{0}/intents/{1}'.format('boguswid',
- 'pizza_order')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "intent": "pizza_order",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z",
- "description": "User wants to start a new pizza order"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- intent = service.update_intent(
- workspace_id='boguswid',
- intent='pizza_order',
- new_intent='pizza_order',
- new_description='User wants to start a new pizza order').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert intent == response
- # Verify that response can be converted to an Intent
- Intent._from_dict(intent)
-
-def test_intent_models():
- intent = Intent(intent="pizza_order",
- created=datetime.datetime(2015, 12, 6, 23, 53, 59, 15300, tzinfo=tzutc()),
- updated=datetime.datetime(2015, 12, 7, 18, 53, 59, 15300, tzinfo=tzutc()),
- description="User wants to start a new pizza order")
- intentDict = intent._to_dict()
- check = Intent._from_dict(intentDict)
- assert intent == check
-
-
-#########################
-# logs
-#########################
-
-
-@responses.activate
-def test_list_logs():
- endpoint = '/v1/workspaces/{0}/logs'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "logs": [{
- "request": {
- "input": {
- "text": "Can you turn off the AC"
- },
- "context": {
- "conversation_id": "f2c7e362-4cc8-4761-8b0f-9ccd70c63bca",
- "system": {}
- }
- },
- "response": {
- "input": {
- "text": "Can you turn off the AC"
- },
- "context": {
- "conversation_id": "f2c7e362-4cc8-4761-8b0f-9ccd70c63bca",
- "system": {
- "dialog_stack": ["root"],
- "dialog_turn_counter": 1,
- "dialog_request_counter": 1
- },
- "defaultCounter": 0
- },
- "entities": [],
- "intents": [{
- "intent": "turn_off",
- "confidence": 0.9332477126694649
- }],
- "output": {
- "log_messages": [],
- "text": [
- "Hi. It looks like a nice drive today. What would you like me to do?"
- ],
- "nodes_visited": ["node_1_1467221909631"]
- }
- },
- "request_timestamp": "2016-07-16T09:22:38.960Z",
- "response_timestamp": "2016-07-16T09:22:39.011Z",
- "log_id": "e70d6c12-582d-47a8-a6a2-845120a1f232"
- }],
- "pagination": {
- "next_url":
- "/v1/workspaces/15fb0e8a-463d-4fec-86aa-a737d9c38a32/logs?cursor=dOfVSuh6fBpDuOxEL9m1S7JKDV7KLuBmRR+lQG1s1i/rVnBZ0ZBVCuy53ruHgPImC31gQv5prUsJ77e0Mj+6sGu/yfusHYF5&version=2016-07-11&filter=response.top_intent:turn_off&page_limit=1",
- "matched":
- 215
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_workspace(**req_copy)
+
+ def test_update_workspace_value_error_with_retries(self):
+ # Enable retries and run test_update_workspace_value_error.
+ _service.enable_retries()
+ self.test_update_workspace_value_error()
+
+ # Disable retries and run test_update_workspace_value_error.
+ _service.disable_retries()
+ self.test_update_workspace_value_error()
+
+
+class TestDeleteWorkspace:
+ """
+ Test Class for delete_workspace
+ """
+
+ @responses.activate
+ def test_delete_workspace_all_params(self):
+ """
+ delete_workspace()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_workspace(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_workspace_all_params_with_retries(self):
+ # Enable retries and run test_delete_workspace_all_params.
+ _service.enable_retries()
+ self.test_delete_workspace_all_params()
+
+ # Disable retries and run test_delete_workspace_all_params.
+ _service.disable_retries()
+ self.test_delete_workspace_all_params()
+
+ @responses.activate
+ def test_delete_workspace_value_error(self):
+ """
+ test_delete_workspace_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
}
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- logs = service.list_logs(
- workspace_id='boguswid').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert logs == response
-
-@responses.activate
-def test_list_all_logs():
- endpoint = '/v1/logs'
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "logs": [{
- "request": {
- "input": {
- "text": "Good morning"
- },
- "context": {
- "metadata": {
- "deployment": "deployment_1"
- }
- }
- },
- "response": {
- "intents": [{
- "intent": "hello",
- "confidence": 1
- }],
- "entities": [],
- "input": {
- "text": "Good morning"
- },
- "output": {
- "text": ["Hi! What can I do for you?"],
- "nodes_visited": ["node_2_1501875253968"],
- "log_messages": []
- },
- "context": {
- "metadata": {
- "deployment": "deployment_1"
- },
- "conversation_id": "81a43b48-7dca-4a7d-a0d7-6fed03fcee69",
- "system": {
- "dialog_stack": [{
- "dialog_node": "root"
- }],
- "dialog_turn_counter": 1,
- "dialog_request_counter": 1,
- "_node_output_map": {
- "node_2_1501875253968": [0]
- },
- "branch_exited": True,
- "branch_exited_reason": "completed"
- }
- }
- },
- "language": "en",
- "workspace_id": "9978a49e-ea89-4493-b33d-82298d3db20d",
- "request_timestamp": "2017-09-13T19:52:32.611Z",
- "response_timestamp": "2017-09-13T19:52:32.628Z",
- "log_id": "aa886a8a-bac5-4b91-8323-2fd61a69c9d3"
- }],
- "pagination": {}
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- logs = service.list_all_logs(
- 'language::en,request.context.metadata.deployment::deployment_1').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert logs == response
-
-
-#########################
-# message
-#########################
-
-
-@responses.activate
-def test_message():
-
- assistant = ibm_watson.AssistantV1(
- username="username", password="password", version='2016-09-20')
- assistant.set_default_headers({'x-watson-learning-opt-out': "true"})
-
- workspace_id = 'f8fdbc65-e0bd-4e43-b9f8-2975a366d4ec'
- message_url = '%s/v1/workspaces/%s/message' % (base_url, workspace_id)
- url1_str = '%s/v1/workspaces/%s/message?version=2016-09-20'
- message_url1 = url1_str % (base_url, workspace_id)
- message_response = {
- "context": {
- "conversation_id": "1b7b67c0-90ed-45dc-8508-9488bc483d5b",
- "system": {
- "dialog_stack": ["root"],
- "dialog_turn_counter": 1,
- "dialog_request_counter": 1
- }
- },
- "intents": [],
- "entities": [],
- "input": {},
- "output": {
- "text": "okay",
- "log_messages": []
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_workspace(**req_copy)
+
+ def test_delete_workspace_value_error_with_retries(self):
+ # Enable retries and run test_delete_workspace_value_error.
+ _service.enable_retries()
+ self.test_delete_workspace_value_error()
+
+ # Disable retries and run test_delete_workspace_value_error.
+ _service.disable_retries()
+ self.test_delete_workspace_value_error()
+
+
+class TestCreateWorkspaceAsync:
+ """
+ Test Class for create_workspace_async
+ """
+
+ @responses.activate
+ def test_create_workspace_async_all_params(self):
+ """
+ create_workspace_async()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces_async')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Construct a dict representation of a DialogNode model
+ dialog_node_model = {}
+ dialog_node_model['dialog_node'] = 'testString'
+ dialog_node_model['description'] = 'testString'
+ dialog_node_model['conditions'] = 'testString'
+ dialog_node_model['parent'] = 'testString'
+ dialog_node_model['previous_sibling'] = 'testString'
+ dialog_node_model['output'] = dialog_node_output_model
+ dialog_node_model['context'] = dialog_node_context_model
+ dialog_node_model['metadata'] = {'anyKey': 'anyValue'}
+ dialog_node_model['next_step'] = dialog_node_next_step_model
+ dialog_node_model['title'] = 'testString'
+ dialog_node_model['type'] = 'standard'
+ dialog_node_model['event_name'] = 'focus'
+ dialog_node_model['variable'] = 'testString'
+ dialog_node_model['actions'] = [dialog_node_action_model]
+ dialog_node_model['digress_in'] = 'not_available'
+ dialog_node_model['digress_out'] = 'allow_returning'
+ dialog_node_model['digress_out_slots'] = 'not_allowed'
+ dialog_node_model['user_label'] = 'testString'
+ dialog_node_model['disambiguation_opt_out'] = False
+
+ # Construct a dict representation of a Counterexample model
+ counterexample_model = {}
+ counterexample_model['text'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettingsTooling model
+ workspace_system_settings_tooling_model = {}
+ workspace_system_settings_tooling_model['store_generic_responses'] = True
+
+ # Construct a dict representation of a WorkspaceSystemSettingsDisambiguation model
+ workspace_system_settings_disambiguation_model = {}
+ workspace_system_settings_disambiguation_model['prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['enabled'] = False
+ workspace_system_settings_disambiguation_model['sensitivity'] = 'auto'
+ workspace_system_settings_disambiguation_model['randomize'] = True
+ workspace_system_settings_disambiguation_model['max_suggestions'] = 1
+ workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model
+ workspace_system_settings_system_entities_model = {}
+ workspace_system_settings_system_entities_model['enabled'] = False
+
+ # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model
+ workspace_system_settings_off_topic_model = {}
+ workspace_system_settings_off_topic_model['enabled'] = False
+
+ # Construct a dict representation of a WorkspaceSystemSettingsNlp model
+ workspace_system_settings_nlp_model = {}
+ workspace_system_settings_nlp_model['model'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettings model
+ workspace_system_settings_model = {}
+ workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model
+ workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model
+ workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'}
+ workspace_system_settings_model['spelling_suggestions'] = False
+ workspace_system_settings_model['spelling_auto_correct'] = False
+ workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model
+ workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model
+ workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model
+ workspace_system_settings_model['foo'] = 'testString'
+
+ # Construct a dict representation of a WebhookHeader model
+ webhook_header_model = {}
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ # Construct a dict representation of a Webhook model
+ webhook_model = {}
+ webhook_model['url'] = 'testString'
+ webhook_model['name'] = 'testString'
+ webhook_model['headers'] = [webhook_header_model]
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Construct a dict representation of a CreateIntent model
+ create_intent_model = {}
+ create_intent_model['intent'] = 'testString'
+ create_intent_model['description'] = 'testString'
+ create_intent_model['examples'] = [example_model]
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Construct a dict representation of a CreateEntity model
+ create_entity_model = {}
+ create_entity_model['entity'] = 'testString'
+ create_entity_model['description'] = 'testString'
+ create_entity_model['metadata'] = {'anyKey': 'anyValue'}
+ create_entity_model['fuzzy_match'] = True
+ create_entity_model['values'] = [create_value_model]
+
+ # Set up parameter values
+ name = 'testString'
+ description = 'testString'
+ language = 'testString'
+ dialog_nodes = [dialog_node_model]
+ counterexamples = [counterexample_model]
+ metadata = {'anyKey': 'anyValue'}
+ learning_opt_out = False
+ system_settings = workspace_system_settings_model
+ webhooks = [webhook_model]
+ intents = [create_intent_model]
+ entities = [create_entity_model]
+
+ # Invoke method
+ response = _service.create_workspace_async(
+ name=name,
+ description=description,
+ language=language,
+ dialog_nodes=dialog_nodes,
+ counterexamples=counterexamples,
+ metadata=metadata,
+ learning_opt_out=learning_opt_out,
+ system_settings=system_settings,
+ webhooks=webhooks,
+ intents=intents,
+ entities=entities,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['language'] == 'testString'
+ assert req_body['dialog_nodes'] == [dialog_node_model]
+ assert req_body['counterexamples'] == [counterexample_model]
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['learning_opt_out'] == False
+ assert req_body['system_settings'] == workspace_system_settings_model
+ assert req_body['webhooks'] == [webhook_model]
+ assert req_body['intents'] == [create_intent_model]
+ assert req_body['entities'] == [create_entity_model]
+
+ def test_create_workspace_async_all_params_with_retries(self):
+ # Enable retries and run test_create_workspace_async_all_params.
+ _service.enable_retries()
+ self.test_create_workspace_async_all_params()
+
+ # Disable retries and run test_create_workspace_async_all_params.
+ _service.disable_retries()
+ self.test_create_workspace_async_all_params()
+
+ @responses.activate
+ def test_create_workspace_async_required_params(self):
+ """
+ test_create_workspace_async_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces_async')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Invoke method
+ response = _service.create_workspace_async()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+
+ def test_create_workspace_async_required_params_with_retries(self):
+ # Enable retries and run test_create_workspace_async_required_params.
+ _service.enable_retries()
+ self.test_create_workspace_async_required_params()
+
+ # Disable retries and run test_create_workspace_async_required_params.
+ _service.disable_retries()
+ self.test_create_workspace_async_required_params()
+
+ @responses.activate
+ def test_create_workspace_async_value_error(self):
+ """
+ test_create_workspace_async_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces_async')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
}
- }
-
- responses.add(
- responses.POST,
- message_url,
- body=json.dumps(message_response),
- status=200,
- content_type='application/json')
-
- message = assistant.message(
- workspace_id=workspace_id,
- input={'text': 'Turn on the lights'},
- context=None).get_result()
-
- assert message is not None
- assert responses.calls[0].request.url == message_url1
- assert 'x-watson-learning-opt-out' in responses.calls[0].request.headers
- assert responses.calls[0].request.headers['x-watson-learning-opt-out'] == 'true'
- assert responses.calls[0].response.text == json.dumps(message_response)
-
- # test context
- responses.add(
- responses.POST,
- message_url,
- body=message_response,
- status=200,
- content_type='application/json')
-
- message_ctx = {
- 'context': {
- 'conversation_id': '1b7b67c0-90ed-45dc-8508-9488bc483d5b',
- 'system': {
- 'dialog_stack': ['root'],
- 'dialog_turn_counter': 2,
- 'dialog_request_counter': 1
- }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_workspace_async(**req_copy)
+
+ def test_create_workspace_async_value_error_with_retries(self):
+ # Enable retries and run test_create_workspace_async_value_error.
+ _service.enable_retries()
+ self.test_create_workspace_async_value_error()
+
+ # Disable retries and run test_create_workspace_async_value_error.
+ _service.disable_retries()
+ self.test_create_workspace_async_value_error()
+
+
+class TestUpdateWorkspaceAsync:
+ """
+ Test Class for update_workspace_async
+ """
+
+ @responses.activate
+ def test_update_workspace_async_all_params(self):
+ """
+ update_workspace_async()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces_async/testString')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Construct a dict representation of a DialogNode model
+ dialog_node_model = {}
+ dialog_node_model['dialog_node'] = 'testString'
+ dialog_node_model['description'] = 'testString'
+ dialog_node_model['conditions'] = 'testString'
+ dialog_node_model['parent'] = 'testString'
+ dialog_node_model['previous_sibling'] = 'testString'
+ dialog_node_model['output'] = dialog_node_output_model
+ dialog_node_model['context'] = dialog_node_context_model
+ dialog_node_model['metadata'] = {'anyKey': 'anyValue'}
+ dialog_node_model['next_step'] = dialog_node_next_step_model
+ dialog_node_model['title'] = 'testString'
+ dialog_node_model['type'] = 'standard'
+ dialog_node_model['event_name'] = 'focus'
+ dialog_node_model['variable'] = 'testString'
+ dialog_node_model['actions'] = [dialog_node_action_model]
+ dialog_node_model['digress_in'] = 'not_available'
+ dialog_node_model['digress_out'] = 'allow_returning'
+ dialog_node_model['digress_out_slots'] = 'not_allowed'
+ dialog_node_model['user_label'] = 'testString'
+ dialog_node_model['disambiguation_opt_out'] = False
+
+ # Construct a dict representation of a Counterexample model
+ counterexample_model = {}
+ counterexample_model['text'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettingsTooling model
+ workspace_system_settings_tooling_model = {}
+ workspace_system_settings_tooling_model['store_generic_responses'] = True
+
+ # Construct a dict representation of a WorkspaceSystemSettingsDisambiguation model
+ workspace_system_settings_disambiguation_model = {}
+ workspace_system_settings_disambiguation_model['prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['enabled'] = False
+ workspace_system_settings_disambiguation_model['sensitivity'] = 'auto'
+ workspace_system_settings_disambiguation_model['randomize'] = True
+ workspace_system_settings_disambiguation_model['max_suggestions'] = 1
+ workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettingsSystemEntities model
+ workspace_system_settings_system_entities_model = {}
+ workspace_system_settings_system_entities_model['enabled'] = False
+
+ # Construct a dict representation of a WorkspaceSystemSettingsOffTopic model
+ workspace_system_settings_off_topic_model = {}
+ workspace_system_settings_off_topic_model['enabled'] = False
+
+ # Construct a dict representation of a WorkspaceSystemSettingsNlp model
+ workspace_system_settings_nlp_model = {}
+ workspace_system_settings_nlp_model['model'] = 'testString'
+
+ # Construct a dict representation of a WorkspaceSystemSettings model
+ workspace_system_settings_model = {}
+ workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model
+ workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model
+ workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'}
+ workspace_system_settings_model['spelling_suggestions'] = False
+ workspace_system_settings_model['spelling_auto_correct'] = False
+ workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model
+ workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model
+ workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model
+ workspace_system_settings_model['foo'] = 'testString'
+
+ # Construct a dict representation of a WebhookHeader model
+ webhook_header_model = {}
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ # Construct a dict representation of a Webhook model
+ webhook_model = {}
+ webhook_model['url'] = 'testString'
+ webhook_model['name'] = 'testString'
+ webhook_model['headers'] = [webhook_header_model]
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Construct a dict representation of a CreateIntent model
+ create_intent_model = {}
+ create_intent_model['intent'] = 'testString'
+ create_intent_model['description'] = 'testString'
+ create_intent_model['examples'] = [example_model]
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Construct a dict representation of a CreateEntity model
+ create_entity_model = {}
+ create_entity_model['entity'] = 'testString'
+ create_entity_model['description'] = 'testString'
+ create_entity_model['metadata'] = {'anyKey': 'anyValue'}
+ create_entity_model['fuzzy_match'] = True
+ create_entity_model['values'] = [create_value_model]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ language = 'testString'
+ dialog_nodes = [dialog_node_model]
+ counterexamples = [counterexample_model]
+ metadata = {'anyKey': 'anyValue'}
+ learning_opt_out = False
+ system_settings = workspace_system_settings_model
+ webhooks = [webhook_model]
+ intents = [create_intent_model]
+ entities = [create_entity_model]
+ append = False
+
+ # Invoke method
+ response = _service.update_workspace_async(
+ workspace_id,
+ name=name,
+ description=description,
+ language=language,
+ dialog_nodes=dialog_nodes,
+ counterexamples=counterexamples,
+ metadata=metadata,
+ learning_opt_out=learning_opt_out,
+ system_settings=system_settings,
+ webhooks=webhooks,
+ intents=intents,
+ entities=entities,
+ append=append,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'append={}'.format('true' if append else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['language'] == 'testString'
+ assert req_body['dialog_nodes'] == [dialog_node_model]
+ assert req_body['counterexamples'] == [counterexample_model]
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['learning_opt_out'] == False
+ assert req_body['system_settings'] == workspace_system_settings_model
+ assert req_body['webhooks'] == [webhook_model]
+ assert req_body['intents'] == [create_intent_model]
+ assert req_body['entities'] == [create_entity_model]
+
+ def test_update_workspace_async_all_params_with_retries(self):
+ # Enable retries and run test_update_workspace_async_all_params.
+ _service.enable_retries()
+ self.test_update_workspace_async_all_params()
+
+ # Disable retries and run test_update_workspace_async_all_params.
+ _service.disable_retries()
+ self.test_update_workspace_async_all_params()
+
+ @responses.activate
+ def test_update_workspace_async_required_params(self):
+ """
+ test_update_workspace_async_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces_async/testString')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.update_workspace_async(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+
+ def test_update_workspace_async_required_params_with_retries(self):
+ # Enable retries and run test_update_workspace_async_required_params.
+ _service.enable_retries()
+ self.test_update_workspace_async_required_params()
+
+ # Disable retries and run test_update_workspace_async_required_params.
+ _service.disable_retries()
+ self.test_update_workspace_async_required_params()
+
+ @responses.activate
+ def test_update_workspace_async_value_error(self):
+ """
+ test_update_workspace_async_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces_async/testString')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_workspace_async(**req_copy)
+
+ def test_update_workspace_async_value_error_with_retries(self):
+ # Enable retries and run test_update_workspace_async_value_error.
+ _service.enable_retries()
+ self.test_update_workspace_async_value_error()
+
+ # Disable retries and run test_update_workspace_async_value_error.
+ _service.disable_retries()
+ self.test_update_workspace_async_value_error()
+
+
+class TestExportWorkspaceAsync:
+ """
+ Test Class for export_workspace_async
+ """
+
+ @responses.activate
+ def test_export_workspace_async_all_params(self):
+ """
+ export_workspace_async()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces_async/testString/export')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ include_audit = False
+ sort = 'stable'
+ verbose = False
+
+ # Invoke method
+ response = _service.export_workspace_async(
+ workspace_id,
+ include_audit=include_audit,
+ sort=sort,
+ verbose=verbose,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'verbose={}'.format('true' if verbose else 'false') in query_string
+
+ def test_export_workspace_async_all_params_with_retries(self):
+ # Enable retries and run test_export_workspace_async_all_params.
+ _service.enable_retries()
+ self.test_export_workspace_async_all_params()
+
+ # Disable retries and run test_export_workspace_async_all_params.
+ _service.disable_retries()
+ self.test_export_workspace_async_all_params()
+
+ @responses.activate
+ def test_export_workspace_async_required_params(self):
+ """
+ test_export_workspace_async_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces_async/testString/export')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.export_workspace_async(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_export_workspace_async_required_params_with_retries(self):
+ # Enable retries and run test_export_workspace_async_required_params.
+ _service.enable_retries()
+ self.test_export_workspace_async_required_params()
+
+ # Disable retries and run test_export_workspace_async_required_params.
+ _service.disable_retries()
+ self.test_export_workspace_async_required_params()
+
+ @responses.activate
+ def test_export_workspace_async_value_error(self):
+ """
+ test_export_workspace_async_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces_async/testString/export')
+ mock_response = '{"name": "name", "description": "description", "language": "language", "workspace_id": "workspace_id", "dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "metadata": {"anyKey": "anyValue"}, "learning_opt_out": false, "system_settings": {"tooling": {"store_generic_responses": false}, "disambiguation": {"prompt": "prompt", "none_of_the_above_prompt": "none_of_the_above_prompt", "enabled": false, "sensitivity": "auto", "randomize": false, "max_suggestions": 1, "suggestion_text_policy": "suggestion_text_policy"}, "human_agent_assist": {"anyKey": "anyValue"}, "spelling_suggestions": false, "spelling_auto_correct": false, "system_entities": {"enabled": false}, "off_topic": {"enabled": false}, "nlp": {"model": "model"}}, "status": "Available", "status_errors": [{"message": "message"}], "webhooks": [{"url": "url", "name": "name", "headers": [{"name": "name", "value": "value"}]}], "intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "counts": {"intent": 6, "entity": 6, "node": 4}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.export_workspace_async(**req_copy)
+
+ def test_export_workspace_async_value_error_with_retries(self):
+ # Enable retries and run test_export_workspace_async_value_error.
+ _service.enable_retries()
+ self.test_export_workspace_async_value_error()
+
+ # Disable retries and run test_export_workspace_async_value_error.
+ _service.disable_retries()
+ self.test_export_workspace_async_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Workspaces
+##############################################################################
+
+##############################################################################
+# Start of Service: Intents
+##############################################################################
+# region
+
+
+class TestListIntents:
+ """
+ Test Class for list_intents
+ """
+
+ @responses.activate
+ def test_list_intents_all_params(self):
+ """
+ list_intents()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents')
+ mock_response = '{"intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ export = False
+ page_limit = 100
+ include_count = False
+ sort = 'intent'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_intents(
+ workspace_id,
+ export=export,
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'export={}'.format('true' if export else 'false') in query_string
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_intents_all_params_with_retries(self):
+ # Enable retries and run test_list_intents_all_params.
+ _service.enable_retries()
+ self.test_list_intents_all_params()
+
+ # Disable retries and run test_list_intents_all_params.
+ _service.disable_retries()
+ self.test_list_intents_all_params()
+
+ @responses.activate
+ def test_list_intents_required_params(self):
+ """
+ test_list_intents_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents')
+ mock_response = '{"intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.list_intents(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_intents_required_params_with_retries(self):
+ # Enable retries and run test_list_intents_required_params.
+ _service.enable_retries()
+ self.test_list_intents_required_params()
+
+ # Disable retries and run test_list_intents_required_params.
+ _service.disable_retries()
+ self.test_list_intents_required_params()
+
+ @responses.activate
+ def test_list_intents_value_error(self):
+ """
+ test_list_intents_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents')
+ mock_response = '{"intents": [{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_intents(**req_copy)
+
+ def test_list_intents_value_error_with_retries(self):
+ # Enable retries and run test_list_intents_value_error.
+ _service.enable_retries()
+ self.test_list_intents_value_error()
+
+ # Disable retries and run test_list_intents_value_error.
+ _service.disable_retries()
+ self.test_list_intents_value_error()
+
+
+class TestCreateIntent:
+ """
+ Test Class for create_intent
+ """
+
+ @responses.activate
+ def test_create_intent_all_params(self):
+ """
+ create_intent()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents')
+ mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ description = 'testString'
+ examples = [example_model]
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_intent(
+ workspace_id,
+ intent,
+ description=description,
+ examples=examples,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['intent'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['examples'] == [example_model]
+
+ def test_create_intent_all_params_with_retries(self):
+ # Enable retries and run test_create_intent_all_params.
+ _service.enable_retries()
+ self.test_create_intent_all_params()
+
+ # Disable retries and run test_create_intent_all_params.
+ _service.disable_retries()
+ self.test_create_intent_all_params()
+
+ @responses.activate
+ def test_create_intent_required_params(self):
+ """
+ test_create_intent_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents')
+ mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ description = 'testString'
+ examples = [example_model]
+
+ # Invoke method
+ response = _service.create_intent(
+ workspace_id,
+ intent,
+ description=description,
+ examples=examples,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['intent'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['examples'] == [example_model]
+
+ def test_create_intent_required_params_with_retries(self):
+ # Enable retries and run test_create_intent_required_params.
+ _service.enable_retries()
+ self.test_create_intent_required_params()
+
+ # Disable retries and run test_create_intent_required_params.
+ _service.disable_retries()
+ self.test_create_intent_required_params()
+
+ @responses.activate
+ def test_create_intent_value_error(self):
+ """
+ test_create_intent_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents')
+ mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ description = 'testString'
+ examples = [example_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "intent": intent,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_intent(**req_copy)
+
+ def test_create_intent_value_error_with_retries(self):
+ # Enable retries and run test_create_intent_value_error.
+ _service.enable_retries()
+ self.test_create_intent_value_error()
+
+ # Disable retries and run test_create_intent_value_error.
+ _service.disable_retries()
+ self.test_create_intent_value_error()
+
+
+class TestGetIntent:
+ """
+ Test Class for get_intent
+ """
+
+ @responses.activate
+ def test_get_intent_all_params(self):
+ """
+ get_intent()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString')
+ mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ export = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_intent(
+ workspace_id,
+ intent,
+ export=export,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'export={}'.format('true' if export else 'false') in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_intent_all_params_with_retries(self):
+ # Enable retries and run test_get_intent_all_params.
+ _service.enable_retries()
+ self.test_get_intent_all_params()
+
+ # Disable retries and run test_get_intent_all_params.
+ _service.disable_retries()
+ self.test_get_intent_all_params()
+
+ @responses.activate
+ def test_get_intent_required_params(self):
+ """
+ test_get_intent_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString')
+ mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+
+ # Invoke method
+ response = _service.get_intent(
+ workspace_id,
+ intent,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_intent_required_params_with_retries(self):
+ # Enable retries and run test_get_intent_required_params.
+ _service.enable_retries()
+ self.test_get_intent_required_params()
+
+ # Disable retries and run test_get_intent_required_params.
+ _service.disable_retries()
+ self.test_get_intent_required_params()
+
+ @responses.activate
+ def test_get_intent_value_error(self):
+ """
+ test_get_intent_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString')
+ mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "intent": intent,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_intent(**req_copy)
+
+ def test_get_intent_value_error_with_retries(self):
+ # Enable retries and run test_get_intent_value_error.
+ _service.enable_retries()
+ self.test_get_intent_value_error()
+
+ # Disable retries and run test_get_intent_value_error.
+ _service.disable_retries()
+ self.test_get_intent_value_error()
+
+
+class TestUpdateIntent:
+ """
+ Test Class for update_intent
+ """
+
+ @responses.activate
+ def test_update_intent_all_params(self):
+ """
+ update_intent()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString')
+ mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ new_intent = 'testString'
+ new_description = 'testString'
+ new_examples = [example_model]
+ append = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.update_intent(
+ workspace_id,
+ intent,
+ new_intent=new_intent,
+ new_description=new_description,
+ new_examples=new_examples,
+ append=append,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'append={}'.format('true' if append else 'false') in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['intent'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['examples'] == [example_model]
+
+ def test_update_intent_all_params_with_retries(self):
+ # Enable retries and run test_update_intent_all_params.
+ _service.enable_retries()
+ self.test_update_intent_all_params()
+
+ # Disable retries and run test_update_intent_all_params.
+ _service.disable_retries()
+ self.test_update_intent_all_params()
+
+ @responses.activate
+ def test_update_intent_required_params(self):
+ """
+ test_update_intent_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString')
+ mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ new_intent = 'testString'
+ new_description = 'testString'
+ new_examples = [example_model]
+
+ # Invoke method
+ response = _service.update_intent(
+ workspace_id,
+ intent,
+ new_intent=new_intent,
+ new_description=new_description,
+ new_examples=new_examples,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['intent'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['examples'] == [example_model]
+
+ def test_update_intent_required_params_with_retries(self):
+ # Enable retries and run test_update_intent_required_params.
+ _service.enable_retries()
+ self.test_update_intent_required_params()
+
+ # Disable retries and run test_update_intent_required_params.
+ _service.disable_retries()
+ self.test_update_intent_required_params()
+
+ @responses.activate
+ def test_update_intent_value_error(self):
+ """
+ test_update_intent_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString')
+ mock_response = '{"intent": "intent", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a dict representation of a Example model
+ example_model = {}
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ new_intent = 'testString'
+ new_description = 'testString'
+ new_examples = [example_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "intent": intent,
}
- }
- message = assistant.message(
- workspace_id=workspace_id,
- input={'text': 'Turn on the lights'},
- context=json.dumps(message_ctx['context'])).get_result()
-
- assert message is not None
- assert responses.calls[1].request.url == message_url1
- assert responses.calls[1].response.text == json.dumps(message_response)
-
- assert len(responses.calls) == 2
-
-@responses.activate
-def test_message_with_models():
-
- assistant = ibm_watson.AssistantV1(
- username="username", password="password", version='2016-09-20')
- assistant.set_default_headers({'x-watson-learning-opt-out': "true"})
-
- workspace_id = 'f8fdbc65-e0bd-4e43-b9f8-2975a366d4ec'
- message_url = '%s/v1/workspaces/%s/message' % (base_url, workspace_id)
- url1_str = '%s/v1/workspaces/%s/message?version=2016-09-20'
- message_url1 = url1_str % (base_url, workspace_id)
- message_response = {
- "context": {
- "conversation_id": "1b7b67c0-90ed-45dc-8508-9488bc483d5b",
- "system": {
- "dialog_stack": ["root"],
- "dialog_turn_counter": 1,
- "dialog_request_counter": 1
- }
- },
- "intents": [],
- "entities": [],
- "input": {},
- "output": {
- "text": "okay",
- "log_messages": []
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_intent(**req_copy)
+
+ def test_update_intent_value_error_with_retries(self):
+ # Enable retries and run test_update_intent_value_error.
+ _service.enable_retries()
+ self.test_update_intent_value_error()
+
+ # Disable retries and run test_update_intent_value_error.
+ _service.disable_retries()
+ self.test_update_intent_value_error()
+
+
+class TestDeleteIntent:
+ """
+ Test Class for delete_intent
+ """
+
+ @responses.activate
+ def test_delete_intent_all_params(self):
+ """
+ delete_intent()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+
+ # Invoke method
+ response = _service.delete_intent(
+ workspace_id,
+ intent,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_intent_all_params_with_retries(self):
+ # Enable retries and run test_delete_intent_all_params.
+ _service.enable_retries()
+ self.test_delete_intent_all_params()
+
+ # Disable retries and run test_delete_intent_all_params.
+ _service.disable_retries()
+ self.test_delete_intent_all_params()
+
+ @responses.activate
+ def test_delete_intent_value_error(self):
+ """
+ test_delete_intent_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "intent": intent,
}
- }
-
- responses.add(
- responses.POST,
- message_url,
- body=json.dumps(message_response),
- status=200,
- content_type='application/json')
-
- message = assistant.message(
- workspace_id=workspace_id,
- input=MessageInput(text='Turn on the lights'),
- context=None).get_result()
-
- assert message is not None
- assert responses.calls[0].request.url == message_url1
- assert 'x-watson-learning-opt-out' in responses.calls[0].request.headers
- assert responses.calls[0].request.headers['x-watson-learning-opt-out'] == 'true'
- assert responses.calls[0].response.text == json.dumps(message_response)
-
- # test context
- responses.add(
- responses.POST,
- message_url,
- body=message_response,
- status=200,
- content_type='application/json')
-
- message_ctx = Context._from_dict(message_response['context'])
- message = assistant.message(
- workspace_id=workspace_id,
- input=MessageInput(text='Turn on the lights'),
- context=message_ctx).get_result()
-
- assert message is not None
- assert responses.calls[1].request.url == message_url1
- assert responses.calls[1].response.text == json.dumps(message_response)
-
- assert len(responses.calls) == 2
-
-
-#########################
-# synonyms
-#########################
-
-
-@responses.activate
-def test_create_synonym():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format(
- 'boguswid', 'aeiou', 'vowel')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "synonym": "aeiou",
- "created": "2000-01-23T04:56:07.000+00:00",
- "updated": "2000-01-23T04:56:07.000+00:00"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=201,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- synonym = service.create_synonym(
- workspace_id='boguswid', entity='aeiou', value='vowel', synonym='a').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert synonym == response
- # Verify that response can be converted to a Synonym
- Synonym._from_dict(synonym)
-
-@responses.activate
-def test_delete_synonym():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format(
- 'boguswid', 'aeiou', 'vowel', 'a')
- url = '{0}{1}'.format(base_url, endpoint)
- response = None
- responses.add(
- responses.DELETE,
- url,
- body=json.dumps(response),
- status=204,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- synonym = service.delete_synonym(
- workspace_id='boguswid', entity='aeiou', value='vowel', synonym='a').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert synonym is None
-
-
-@responses.activate
-def test_get_synonym():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format(
- 'boguswid', 'grilling', 'bbq', 'barbecue')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "synonym": "barbecue",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- synonym = service.get_synonym(
- workspace_id='boguswid', entity='grilling', value='bbq', synonym='barbecue').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert synonym == response
- # Verify that response can be converted to a Synonym
- Synonym._from_dict(synonym)
-
-
-@responses.activate
-def test_list_synonyms():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format(
- 'boguswid', 'grilling', 'bbq')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "synonyms": [{
- "synonym": "BBQ sauce",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }, {
- "synonym": "barbecue",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }],
- "pagination": {
- "refresh_url":
- "/v1/workspaces/pizza_app-e0f3/entities/sauce/values/types/synonyms?version=2017-12-18&filter=name:b&include_count=true&page_limit=2",
- "next_url":
- "/v1/workspaces/pizza_app-e0f3/entities/sauce/values/types/synonyms?cursor=base64=&version=2017-12-18&filter=name:b&page_limit=2",
- "total":
- 8,
- "matched":
- 2
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_intent(**req_copy)
+
+ def test_delete_intent_value_error_with_retries(self):
+ # Enable retries and run test_delete_intent_value_error.
+ _service.enable_retries()
+ self.test_delete_intent_value_error()
+
+ # Disable retries and run test_delete_intent_value_error.
+ _service.disable_retries()
+ self.test_delete_intent_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Intents
+##############################################################################
+
+##############################################################################
+# Start of Service: Examples
+##############################################################################
+# region
+
+
+class TestListExamples:
+ """
+ Test Class for list_examples
+ """
+
+ @responses.activate
+ def test_list_examples_all_params(self):
+ """
+ list_examples()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples')
+ mock_response = '{"examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ page_limit = 100
+ include_count = False
+ sort = 'text'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_examples(
+ workspace_id,
+ intent,
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_examples_all_params_with_retries(self):
+ # Enable retries and run test_list_examples_all_params.
+ _service.enable_retries()
+ self.test_list_examples_all_params()
+
+ # Disable retries and run test_list_examples_all_params.
+ _service.disable_retries()
+ self.test_list_examples_all_params()
+
+ @responses.activate
+ def test_list_examples_required_params(self):
+ """
+ test_list_examples_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples')
+ mock_response = '{"examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+
+ # Invoke method
+ response = _service.list_examples(
+ workspace_id,
+ intent,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_examples_required_params_with_retries(self):
+ # Enable retries and run test_list_examples_required_params.
+ _service.enable_retries()
+ self.test_list_examples_required_params()
+
+ # Disable retries and run test_list_examples_required_params.
+ _service.disable_retries()
+ self.test_list_examples_required_params()
+
+ @responses.activate
+ def test_list_examples_value_error(self):
+ """
+ test_list_examples_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples')
+ mock_response = '{"examples": [{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "intent": intent,
}
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- synonyms = service.list_synonyms(
- workspace_id='boguswid',
- entity='grilling',
- value='bbq').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert synonyms == response
- # Verify that response can be converted to a SynonymCollection
- SynonymCollection._from_dict(synonyms)
-
-
-@responses.activate
-def test_update_synonym():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format(
- 'boguswid', 'grilling', 'bbq', 'barbecue')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "synonym": "barbecue",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- synonym = service.update_synonym(
- workspace_id='boguswid', entity='grilling', value='bbq', synonym='barbecue', new_synonym='barbecue').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert synonym == response
- # Verify that response can be converted to a Synonym
- Synonym._from_dict(synonym)
-
-
-#########################
-# values
-#########################
-
-
-@responses.activate
-def test_create_value():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values'.format('boguswid', 'grilling')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "metadata": "{}",
- "created": "2000-01-23T04:56:07.000+00:00",
- "value": "aeiou",
- "type": "synonyms",
- "updated": "2000-01-23T04:56:07.000+00:00"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=201,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- value = service.create_value(
- workspace_id='boguswid',
- entity='grilling',
- value='aeiou').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert value == response
- # Verify that response can be converted to a Value
- Value._from_dict(value)
-
-
-@responses.activate
-def test_delete_value():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format(
- 'boguswid', 'grilling', 'bbq')
- url = '{0}{1}'.format(base_url, endpoint)
- response = ""
- responses.add(
- responses.DELETE,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- value = service.delete_value(
- workspace_id='boguswid', entity='grilling', value='bbq').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert value == ""
-
-
-@responses.activate
-def test_get_value():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format(
- 'boguswid', 'grilling', 'bbq')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "value": "BBQ sauce",
- "metadata": {
- "code": 1422
- },
- "type": "synonyms",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- value = service.get_value(
- workspace_id='boguswid', entity='grilling', value='bbq', export=True).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert value == response
- # Verify that response can be converted to a Value
- Value._from_dict(value)
-
-
-@responses.activate
-def test_list_values():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values'.format('boguswid', 'grilling')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "values": [{
- "value": "BBQ sauce",
- "metadata": {
- "code": 1422
- },
- "type": "synonyms",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-07T18:53:59.153Z"
- }],
- "pagination": {
- "refresh_url":
- "/v1/workspaces/pizza_app-e0f3/entities/sauce/values?version=2017-12-18&filter=name:pizza&include_count=true&page_limit=1",
- "next_url":
- "/v1/workspaces/pizza_app-e0f3/sauce/values?cursor=base64=&version=2017-12-18&filter=name:pizza&page_limit=1",
- "total":
- 1,
- "matched":
- 1
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_examples(**req_copy)
+
+ def test_list_examples_value_error_with_retries(self):
+ # Enable retries and run test_list_examples_value_error.
+ _service.enable_retries()
+ self.test_list_examples_value_error()
+
+ # Disable retries and run test_list_examples_value_error.
+ _service.disable_retries()
+ self.test_list_examples_value_error()
+
+
+class TestCreateExample:
+ """
+ Test Class for create_example
+ """
+
+ @responses.activate
+ def test_create_example_all_params(self):
+ """
+ create_example()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples')
+ mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+ mentions = [mention_model]
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_example(
+ workspace_id,
+ intent,
+ text,
+ mentions=mentions,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+ assert req_body['mentions'] == [mention_model]
+
+ def test_create_example_all_params_with_retries(self):
+ # Enable retries and run test_create_example_all_params.
+ _service.enable_retries()
+ self.test_create_example_all_params()
+
+ # Disable retries and run test_create_example_all_params.
+ _service.disable_retries()
+ self.test_create_example_all_params()
+
+ @responses.activate
+ def test_create_example_required_params(self):
+ """
+ test_create_example_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples')
+ mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+ mentions = [mention_model]
+
+ # Invoke method
+ response = _service.create_example(
+ workspace_id,
+ intent,
+ text,
+ mentions=mentions,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+ assert req_body['mentions'] == [mention_model]
+
+ def test_create_example_required_params_with_retries(self):
+ # Enable retries and run test_create_example_required_params.
+ _service.enable_retries()
+ self.test_create_example_required_params()
+
+ # Disable retries and run test_create_example_required_params.
+ _service.disable_retries()
+ self.test_create_example_required_params()
+
+ @responses.activate
+ def test_create_example_value_error(self):
+ """
+ test_create_example_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples')
+ mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+ mentions = [mention_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "intent": intent,
+ "text": text,
}
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- values = service.list_values(
- workspace_id='boguswid',
- entity='grilling',
- export=True).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert values == response
- # Verify that response can be converted to a ValueCollection
- ValueCollection._from_dict(values)
-
-
-@responses.activate
-def test_update_value():
- endpoint = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format(
- 'boguswid', 'grilling', 'bbq')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "value": "BBQ sauce",
- "metadata": {
- "code": 1422
- },
- "type": "synonyms",
- "created": "2015-12-06T23:53:59.153Z",
- "updated": "2015-12-06T23:53:59.153Z"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-04-21')
- value = service.update_value(
- workspace_id='boguswid',
- entity='grilling',
- value='bbq',
- new_value='BBQ sauce',
- new_metadata={"code": 1422},
- new_synonyms=None).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert value == response
- # Verify that response can be converted to a Value
- Value._from_dict(value)
-
-
-#########################
-# workspaces
-#########################
-
-
-@responses.activate
-def test_create_workspace():
- endpoint = '/v1/workspaces'
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "name": "Pizza app",
- "created": "2015-12-06T23:53:59.153Z",
- "language": "en",
- "metadata": {},
- "updated": "2015-12-06T23:53:59.153Z",
- "description": "Pizza app",
- "workspace_id": "pizza_app-e0f3",
- "learning_opt_out": True
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=201,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- workspace = service.create_workspace(
- name='Pizza app', description='Pizza app', language='en', metadata={},
- system_settings={'tooling': {'store_generic_responses' : True, 'disambiguation': {'prompt': 'Hello world', 'enabled': True}}}).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert workspace == response
- # Verify that response can be converted to a Workspace
- Workspace._from_dict(workspace)
-
-@responses.activate
-def test_delete_workspace():
- endpoint = '/v1/workspaces/{0}'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {}
- responses.add(
- responses.DELETE,
- url,
- body=json.dumps(response),
- status=204,
- content_type='')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- workspace = service.delete_workspace(workspace_id='boguswid').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert workspace is None
-
-
-@responses.activate
-def test_get_workspace():
- endpoint = '/v1/workspaces/{0}'.format('boguswid')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "name": "Pizza app",
- "created": "2015-12-06T23:53:59.153Z",
- "language": "en",
- "metadata": {},
- "updated": "2015-12-06T23:53:59.153Z",
- "description": "Pizza app",
- "status": "Available",
- "learning_opt_out": False,
- "workspace_id": "pizza_app-e0f3"
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- workspace = service.get_workspace(workspace_id='boguswid', export=True, sort='stable').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert workspace == response
- # Verify that response can be converted to a Workspace
- Workspace._from_dict(workspace)
-
-
-@responses.activate
-def test_list_workspaces():
- endpoint = '/v1/workspaces'
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "workspaces": [{
- "name": "Pizza app",
- "created": "2015-12-06T23:53:59.153Z",
- "language": "en",
- "metadata": {},
- "updated": "2015-12-06T23:53:59.153Z",
- "description": "Pizza app",
- "workspace_id": "pizza_app-e0f3",
- "learning_opt_out": True
- }],
- "pagination": {
- "refresh_url":
- "/v1/workspaces?version=2016-01-24&page_limit=1",
- "next_url":
- "/v1/workspaces?cursor=base64=&version=2016-01-24&page_limit=1"
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_example(**req_copy)
+
+ def test_create_example_value_error_with_retries(self):
+ # Enable retries and run test_create_example_value_error.
+ _service.enable_retries()
+ self.test_create_example_value_error()
+
+ # Disable retries and run test_create_example_value_error.
+ _service.disable_retries()
+ self.test_create_example_value_error()
+
+
+class TestGetExample:
+ """
+ Test Class for get_example
+ """
+
+ @responses.activate
+ def test_get_example_all_params(self):
+ """
+ get_example()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString')
+ mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_example(
+ workspace_id,
+ intent,
+ text,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_example_all_params_with_retries(self):
+ # Enable retries and run test_get_example_all_params.
+ _service.enable_retries()
+ self.test_get_example_all_params()
+
+ # Disable retries and run test_get_example_all_params.
+ _service.disable_retries()
+ self.test_get_example_all_params()
+
+ @responses.activate
+ def test_get_example_required_params(self):
+ """
+ test_get_example_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString')
+ mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+
+ # Invoke method
+ response = _service.get_example(
+ workspace_id,
+ intent,
+ text,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_example_required_params_with_retries(self):
+ # Enable retries and run test_get_example_required_params.
+ _service.enable_retries()
+ self.test_get_example_required_params()
+
+ # Disable retries and run test_get_example_required_params.
+ _service.disable_retries()
+ self.test_get_example_required_params()
+
+ @responses.activate
+ def test_get_example_value_error(self):
+ """
+ test_get_example_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString')
+ mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "intent": intent,
+ "text": text,
}
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- workspaces = service.list_workspaces().get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert workspaces == response
- # Verify that response can be converted to a WorkspaceCollection
- WorkspaceCollection._from_dict(workspaces)
-
-
-@responses.activate
-def test_update_workspace():
- endpoint = '/v1/workspaces/{0}'.format('pizza_app-e0f3')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- "name": "Pizza app",
- "created": "2015-12-06T23:53:59.153Z",
- "language": "en",
- "metadata": {},
- "updated": "2015-12-06T23:53:59.153Z",
- "description": "Pizza app",
- "workspace_id": "pizza_app-e0f3",
- "learning_opt_out": True
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV1(
- username='username', password='password', version='2017-02-03')
- workspace = service.update_workspace(
- workspace_id='pizza_app-e0f3',
- name='Pizza app',
- description='Pizza app',
- language='en',
- metadata={},
- system_settings={'tooling': {'store_generic_responses' : True, 'disambiguation': {'prompt': 'Hello world', 'enabled': True}}}).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert workspace == response
- # Verify that response can be converted to a Workspace
- Workspace._from_dict(workspace)
-
-@responses.activate
-def test_dialog_nodes():
- url = 'https://gateway.watsonplatform.net/assistant/api/v1/workspaces/id/dialog_nodes'
- responses.add(
- responses.GET,
- url,
- body='{ "application/json": { "dialog_node": "location-atm" }}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.POST,
- "{0}?version=2017-05-26".format(url),
- body='{ "application/json": { "dialog_node": "location-done" }}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.DELETE,
- "{0}/location-done?version=2017-05-26".format(url),
- body='{"description": "deleted successfully"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- "{0}/location-done?version=2017-05-26".format(url),
- body='{ "application/json": { "dialog_node": "location-atm" }}',
- status=200,
- content_type='application/json')
-
- assistant = ibm_watson.AssistantV1('2017-05-26', username="username", password="password")
-
- assistant.create_dialog_node('id', 'location-done', user_label='xxx')
- assert responses.calls[0].response.json()['application/json']['dialog_node'] == 'location-done'
-
- assistant.delete_dialog_node('id', 'location-done')
- assert responses.calls[1].response.json() == {"description": "deleted successfully"}
-
- assistant.get_dialog_node('id', 'location-done')
- assert responses.calls[2].response.json() == {"application/json": {"dialog_node": "location-atm"}}
-
- assistant.list_dialog_nodes('id')
- assert responses.calls[3].response.json() == {"application/json": {"dialog_node": "location-atm"}}
-
- assert len(responses.calls) == 4
-
-@responses.activate
-def test_delete_user_data():
- url = 'https://gateway.watsonplatform.net/assistant/api/v1/user_data'
- responses.add(
- responses.DELETE,
- url,
- body=None,
- status=204,
- content_type='application_json')
-
- assistant = ibm_watson.AssistantV1('2017-05-26', username="username", password="password")
-
- response = assistant.delete_user_data('id').get_result()
- assert response is None
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_list_mentions():
- url = 'https://gateway.watsonplatform.net/assistant/api/v1/workspaces/workspace_id/entities/entity1/mentions'
- responses.add(
- responses.GET,
- url,
- body='[{"entity": "xxx"}]',
- status=200,
- content_type='application_json')
-
- assistant = ibm_watson.AssistantV1('2017-05-26', username="username", password="password")
-
- response = assistant.list_mentions('workspace_id', 'entity1').get_result()
- assert response == [{"entity": "xxx"}]
- assert len(responses.calls) == 1
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_example(**req_copy)
+
+ def test_get_example_value_error_with_retries(self):
+ # Enable retries and run test_get_example_value_error.
+ _service.enable_retries()
+ self.test_get_example_value_error()
+
+ # Disable retries and run test_get_example_value_error.
+ _service.disable_retries()
+ self.test_get_example_value_error()
+
+
+class TestUpdateExample:
+ """
+ Test Class for update_example
+ """
+
+ @responses.activate
+ def test_update_example_all_params(self):
+ """
+ update_example()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString')
+ mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+ new_text = 'testString'
+ new_mentions = [mention_model]
+ include_audit = False
+
+ # Invoke method
+ response = _service.update_example(
+ workspace_id,
+ intent,
+ text,
+ new_text=new_text,
+ new_mentions=new_mentions,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+ assert req_body['mentions'] == [mention_model]
+
+ def test_update_example_all_params_with_retries(self):
+ # Enable retries and run test_update_example_all_params.
+ _service.enable_retries()
+ self.test_update_example_all_params()
+
+ # Disable retries and run test_update_example_all_params.
+ _service.disable_retries()
+ self.test_update_example_all_params()
+
+ @responses.activate
+ def test_update_example_required_params(self):
+ """
+ test_update_example_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString')
+ mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+ new_text = 'testString'
+ new_mentions = [mention_model]
+
+ # Invoke method
+ response = _service.update_example(
+ workspace_id,
+ intent,
+ text,
+ new_text=new_text,
+ new_mentions=new_mentions,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+ assert req_body['mentions'] == [mention_model]
+
+ def test_update_example_required_params_with_retries(self):
+ # Enable retries and run test_update_example_required_params.
+ _service.enable_retries()
+ self.test_update_example_required_params()
+
+ # Disable retries and run test_update_example_required_params.
+ _service.disable_retries()
+ self.test_update_example_required_params()
+
+ @responses.activate
+ def test_update_example_value_error(self):
+ """
+ test_update_example_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString')
+ mock_response = '{"text": "text", "mentions": [{"entity": "entity", "location": [8]}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a Mention model
+ mention_model = {}
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+ new_text = 'testString'
+ new_mentions = [mention_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "intent": intent,
+ "text": text,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_example(**req_copy)
+
+ def test_update_example_value_error_with_retries(self):
+ # Enable retries and run test_update_example_value_error.
+ _service.enable_retries()
+ self.test_update_example_value_error()
+
+ # Disable retries and run test_update_example_value_error.
+ _service.disable_retries()
+ self.test_update_example_value_error()
+
+
+class TestDeleteExample:
+ """
+ Test Class for delete_example
+ """
+
+ @responses.activate
+ def test_delete_example_all_params(self):
+ """
+ delete_example()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+
+ # Invoke method
+ response = _service.delete_example(
+ workspace_id,
+ intent,
+ text,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_example_all_params_with_retries(self):
+ # Enable retries and run test_delete_example_all_params.
+ _service.enable_retries()
+ self.test_delete_example_all_params()
+
+ # Disable retries and run test_delete_example_all_params.
+ _service.disable_retries()
+ self.test_delete_example_all_params()
+
+ @responses.activate
+ def test_delete_example_value_error(self):
+ """
+ test_delete_example_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/intents/testString/examples/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ intent = 'testString'
+ text = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "intent": intent,
+ "text": text,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_example(**req_copy)
+
+ def test_delete_example_value_error_with_retries(self):
+ # Enable retries and run test_delete_example_value_error.
+ _service.enable_retries()
+ self.test_delete_example_value_error()
+
+ # Disable retries and run test_delete_example_value_error.
+ _service.disable_retries()
+ self.test_delete_example_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Examples
+##############################################################################
+
+##############################################################################
+# Start of Service: Counterexamples
+##############################################################################
+# region
+
+
+class TestListCounterexamples:
+ """
+ Test Class for list_counterexamples
+ """
+
+ @responses.activate
+ def test_list_counterexamples_all_params(self):
+ """
+ list_counterexamples()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples')
+ mock_response = '{"counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ page_limit = 100
+ include_count = False
+ sort = 'text'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_counterexamples(
+ workspace_id,
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_counterexamples_all_params_with_retries(self):
+ # Enable retries and run test_list_counterexamples_all_params.
+ _service.enable_retries()
+ self.test_list_counterexamples_all_params()
+
+ # Disable retries and run test_list_counterexamples_all_params.
+ _service.disable_retries()
+ self.test_list_counterexamples_all_params()
+
+ @responses.activate
+ def test_list_counterexamples_required_params(self):
+ """
+ test_list_counterexamples_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples')
+ mock_response = '{"counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.list_counterexamples(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_counterexamples_required_params_with_retries(self):
+ # Enable retries and run test_list_counterexamples_required_params.
+ _service.enable_retries()
+ self.test_list_counterexamples_required_params()
+
+ # Disable retries and run test_list_counterexamples_required_params.
+ _service.disable_retries()
+ self.test_list_counterexamples_required_params()
+
+ @responses.activate
+ def test_list_counterexamples_value_error(self):
+ """
+ test_list_counterexamples_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples')
+ mock_response = '{"counterexamples": [{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_counterexamples(**req_copy)
+
+ def test_list_counterexamples_value_error_with_retries(self):
+ # Enable retries and run test_list_counterexamples_value_error.
+ _service.enable_retries()
+ self.test_list_counterexamples_value_error()
+
+ # Disable retries and run test_list_counterexamples_value_error.
+ _service.disable_retries()
+ self.test_list_counterexamples_value_error()
+
+
+class TestCreateCounterexample:
+ """
+ Test Class for create_counterexample
+ """
+
+ @responses.activate
+ def test_create_counterexample_all_params(self):
+ """
+ create_counterexample()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples')
+ mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_counterexample(
+ workspace_id,
+ text,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+
+ def test_create_counterexample_all_params_with_retries(self):
+ # Enable retries and run test_create_counterexample_all_params.
+ _service.enable_retries()
+ self.test_create_counterexample_all_params()
+
+ # Disable retries and run test_create_counterexample_all_params.
+ _service.disable_retries()
+ self.test_create_counterexample_all_params()
+
+ @responses.activate
+ def test_create_counterexample_required_params(self):
+ """
+ test_create_counterexample_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples')
+ mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+
+ # Invoke method
+ response = _service.create_counterexample(
+ workspace_id,
+ text,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+
+ def test_create_counterexample_required_params_with_retries(self):
+ # Enable retries and run test_create_counterexample_required_params.
+ _service.enable_retries()
+ self.test_create_counterexample_required_params()
+
+ # Disable retries and run test_create_counterexample_required_params.
+ _service.disable_retries()
+ self.test_create_counterexample_required_params()
+
+ @responses.activate
+ def test_create_counterexample_value_error(self):
+ """
+ test_create_counterexample_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples')
+ mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "text": text,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_counterexample(**req_copy)
+
+ def test_create_counterexample_value_error_with_retries(self):
+ # Enable retries and run test_create_counterexample_value_error.
+ _service.enable_retries()
+ self.test_create_counterexample_value_error()
+
+ # Disable retries and run test_create_counterexample_value_error.
+ _service.disable_retries()
+ self.test_create_counterexample_value_error()
+
+
+class TestGetCounterexample:
+ """
+ Test Class for get_counterexample
+ """
+
+ @responses.activate
+ def test_get_counterexample_all_params(self):
+ """
+ get_counterexample()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples/testString')
+ mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_counterexample(
+ workspace_id,
+ text,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_counterexample_all_params_with_retries(self):
+ # Enable retries and run test_get_counterexample_all_params.
+ _service.enable_retries()
+ self.test_get_counterexample_all_params()
+
+ # Disable retries and run test_get_counterexample_all_params.
+ _service.disable_retries()
+ self.test_get_counterexample_all_params()
+
+ @responses.activate
+ def test_get_counterexample_required_params(self):
+ """
+ test_get_counterexample_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples/testString')
+ mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+
+ # Invoke method
+ response = _service.get_counterexample(
+ workspace_id,
+ text,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_counterexample_required_params_with_retries(self):
+ # Enable retries and run test_get_counterexample_required_params.
+ _service.enable_retries()
+ self.test_get_counterexample_required_params()
+
+ # Disable retries and run test_get_counterexample_required_params.
+ _service.disable_retries()
+ self.test_get_counterexample_required_params()
+
+ @responses.activate
+ def test_get_counterexample_value_error(self):
+ """
+ test_get_counterexample_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples/testString')
+ mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "text": text,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_counterexample(**req_copy)
+
+ def test_get_counterexample_value_error_with_retries(self):
+ # Enable retries and run test_get_counterexample_value_error.
+ _service.enable_retries()
+ self.test_get_counterexample_value_error()
+
+ # Disable retries and run test_get_counterexample_value_error.
+ _service.disable_retries()
+ self.test_get_counterexample_value_error()
+
+
+class TestUpdateCounterexample:
+ """
+ Test Class for update_counterexample
+ """
+
+ @responses.activate
+ def test_update_counterexample_all_params(self):
+ """
+ update_counterexample()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples/testString')
+ mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+ new_text = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.update_counterexample(
+ workspace_id,
+ text,
+ new_text=new_text,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+
+ def test_update_counterexample_all_params_with_retries(self):
+ # Enable retries and run test_update_counterexample_all_params.
+ _service.enable_retries()
+ self.test_update_counterexample_all_params()
+
+ # Disable retries and run test_update_counterexample_all_params.
+ _service.disable_retries()
+ self.test_update_counterexample_all_params()
+
+ @responses.activate
+ def test_update_counterexample_required_params(self):
+ """
+ test_update_counterexample_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples/testString')
+ mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+ new_text = 'testString'
+
+ # Invoke method
+ response = _service.update_counterexample(
+ workspace_id,
+ text,
+ new_text=new_text,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+
+ def test_update_counterexample_required_params_with_retries(self):
+ # Enable retries and run test_update_counterexample_required_params.
+ _service.enable_retries()
+ self.test_update_counterexample_required_params()
+
+ # Disable retries and run test_update_counterexample_required_params.
+ _service.disable_retries()
+ self.test_update_counterexample_required_params()
+
+ @responses.activate
+ def test_update_counterexample_value_error(self):
+ """
+ test_update_counterexample_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples/testString')
+ mock_response = '{"text": "text", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+ new_text = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "text": text,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_counterexample(**req_copy)
+
+ def test_update_counterexample_value_error_with_retries(self):
+ # Enable retries and run test_update_counterexample_value_error.
+ _service.enable_retries()
+ self.test_update_counterexample_value_error()
+
+ # Disable retries and run test_update_counterexample_value_error.
+ _service.disable_retries()
+ self.test_update_counterexample_value_error()
+
+
+class TestDeleteCounterexample:
+ """
+ Test Class for delete_counterexample
+ """
+
+ @responses.activate
+ def test_delete_counterexample_all_params(self):
+ """
+ delete_counterexample()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+
+ # Invoke method
+ response = _service.delete_counterexample(
+ workspace_id,
+ text,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_counterexample_all_params_with_retries(self):
+ # Enable retries and run test_delete_counterexample_all_params.
+ _service.enable_retries()
+ self.test_delete_counterexample_all_params()
+
+ # Disable retries and run test_delete_counterexample_all_params.
+ _service.disable_retries()
+ self.test_delete_counterexample_all_params()
+
+ @responses.activate
+ def test_delete_counterexample_value_error(self):
+ """
+ test_delete_counterexample_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/counterexamples/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ text = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "text": text,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_counterexample(**req_copy)
+
+ def test_delete_counterexample_value_error_with_retries(self):
+ # Enable retries and run test_delete_counterexample_value_error.
+ _service.enable_retries()
+ self.test_delete_counterexample_value_error()
+
+ # Disable retries and run test_delete_counterexample_value_error.
+ _service.disable_retries()
+ self.test_delete_counterexample_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Counterexamples
+##############################################################################
+
+##############################################################################
+# Start of Service: Entities
+##############################################################################
+# region
+
+
+class TestListEntities:
+ """
+ Test Class for list_entities
+ """
+
+ @responses.activate
+ def test_list_entities_all_params(self):
+ """
+ list_entities()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities')
+ mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ export = False
+ page_limit = 100
+ include_count = False
+ sort = 'entity'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_entities(
+ workspace_id,
+ export=export,
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'export={}'.format('true' if export else 'false') in query_string
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_entities_all_params_with_retries(self):
+ # Enable retries and run test_list_entities_all_params.
+ _service.enable_retries()
+ self.test_list_entities_all_params()
+
+ # Disable retries and run test_list_entities_all_params.
+ _service.disable_retries()
+ self.test_list_entities_all_params()
+
+ @responses.activate
+ def test_list_entities_required_params(self):
+ """
+ test_list_entities_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities')
+ mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.list_entities(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_entities_required_params_with_retries(self):
+ # Enable retries and run test_list_entities_required_params.
+ _service.enable_retries()
+ self.test_list_entities_required_params()
+
+ # Disable retries and run test_list_entities_required_params.
+ _service.disable_retries()
+ self.test_list_entities_required_params()
+
+ @responses.activate
+ def test_list_entities_value_error(self):
+ """
+ test_list_entities_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities')
+ mock_response = '{"entities": [{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_entities(**req_copy)
+
+ def test_list_entities_value_error_with_retries(self):
+ # Enable retries and run test_list_entities_value_error.
+ _service.enable_retries()
+ self.test_list_entities_value_error()
+
+ # Disable retries and run test_list_entities_value_error.
+ _service.disable_retries()
+ self.test_list_entities_value_error()
+
+
+class TestCreateEntity:
+ """
+ Test Class for create_entity
+ """
+
+ @responses.activate
+ def test_create_entity_all_params(self):
+ """
+ create_entity()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities')
+ mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ description = 'testString'
+ metadata = {'anyKey': 'anyValue'}
+ fuzzy_match = True
+ values = [create_value_model]
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_entity(
+ workspace_id,
+ entity,
+ description=description,
+ metadata=metadata,
+ fuzzy_match=fuzzy_match,
+ values=values,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['entity'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['fuzzy_match'] == True
+ assert req_body['values'] == [create_value_model]
+
+ def test_create_entity_all_params_with_retries(self):
+ # Enable retries and run test_create_entity_all_params.
+ _service.enable_retries()
+ self.test_create_entity_all_params()
+
+ # Disable retries and run test_create_entity_all_params.
+ _service.disable_retries()
+ self.test_create_entity_all_params()
+
+ @responses.activate
+ def test_create_entity_required_params(self):
+ """
+ test_create_entity_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities')
+ mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ description = 'testString'
+ metadata = {'anyKey': 'anyValue'}
+ fuzzy_match = True
+ values = [create_value_model]
+
+ # Invoke method
+ response = _service.create_entity(
+ workspace_id,
+ entity,
+ description=description,
+ metadata=metadata,
+ fuzzy_match=fuzzy_match,
+ values=values,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['entity'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['fuzzy_match'] == True
+ assert req_body['values'] == [create_value_model]
+
+ def test_create_entity_required_params_with_retries(self):
+ # Enable retries and run test_create_entity_required_params.
+ _service.enable_retries()
+ self.test_create_entity_required_params()
+
+ # Disable retries and run test_create_entity_required_params.
+ _service.disable_retries()
+ self.test_create_entity_required_params()
+
+ @responses.activate
+ def test_create_entity_value_error(self):
+ """
+ test_create_entity_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities')
+ mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ description = 'testString'
+ metadata = {'anyKey': 'anyValue'}
+ fuzzy_match = True
+ values = [create_value_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_entity(**req_copy)
+
+ def test_create_entity_value_error_with_retries(self):
+ # Enable retries and run test_create_entity_value_error.
+ _service.enable_retries()
+ self.test_create_entity_value_error()
+
+ # Disable retries and run test_create_entity_value_error.
+ _service.disable_retries()
+ self.test_create_entity_value_error()
+
+
+class TestGetEntity:
+ """
+ Test Class for get_entity
+ """
+
+ @responses.activate
+ def test_get_entity_all_params(self):
+ """
+ get_entity()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString')
+ mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ export = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_entity(
+ workspace_id,
+ entity,
+ export=export,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'export={}'.format('true' if export else 'false') in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_entity_all_params_with_retries(self):
+ # Enable retries and run test_get_entity_all_params.
+ _service.enable_retries()
+ self.test_get_entity_all_params()
+
+ # Disable retries and run test_get_entity_all_params.
+ _service.disable_retries()
+ self.test_get_entity_all_params()
+
+ @responses.activate
+ def test_get_entity_required_params(self):
+ """
+ test_get_entity_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString')
+ mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+
+ # Invoke method
+ response = _service.get_entity(
+ workspace_id,
+ entity,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_entity_required_params_with_retries(self):
+ # Enable retries and run test_get_entity_required_params.
+ _service.enable_retries()
+ self.test_get_entity_required_params()
+
+ # Disable retries and run test_get_entity_required_params.
+ _service.disable_retries()
+ self.test_get_entity_required_params()
+
+ @responses.activate
+ def test_get_entity_value_error(self):
+ """
+ test_get_entity_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString')
+ mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_entity(**req_copy)
+
+ def test_get_entity_value_error_with_retries(self):
+ # Enable retries and run test_get_entity_value_error.
+ _service.enable_retries()
+ self.test_get_entity_value_error()
+
+ # Disable retries and run test_get_entity_value_error.
+ _service.disable_retries()
+ self.test_get_entity_value_error()
+
+
+class TestUpdateEntity:
+ """
+ Test Class for update_entity
+ """
+
+ @responses.activate
+ def test_update_entity_all_params(self):
+ """
+ update_entity()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString')
+ mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ new_entity = 'testString'
+ new_description = 'testString'
+ new_metadata = {'anyKey': 'anyValue'}
+ new_fuzzy_match = True
+ new_values = [create_value_model]
+ append = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.update_entity(
+ workspace_id,
+ entity,
+ new_entity=new_entity,
+ new_description=new_description,
+ new_metadata=new_metadata,
+ new_fuzzy_match=new_fuzzy_match,
+ new_values=new_values,
+ append=append,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'append={}'.format('true' if append else 'false') in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['entity'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['fuzzy_match'] == True
+ assert req_body['values'] == [create_value_model]
+
+ def test_update_entity_all_params_with_retries(self):
+ # Enable retries and run test_update_entity_all_params.
+ _service.enable_retries()
+ self.test_update_entity_all_params()
+
+ # Disable retries and run test_update_entity_all_params.
+ _service.disable_retries()
+ self.test_update_entity_all_params()
+
+ @responses.activate
+ def test_update_entity_required_params(self):
+ """
+ test_update_entity_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString')
+ mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ new_entity = 'testString'
+ new_description = 'testString'
+ new_metadata = {'anyKey': 'anyValue'}
+ new_fuzzy_match = True
+ new_values = [create_value_model]
+
+ # Invoke method
+ response = _service.update_entity(
+ workspace_id,
+ entity,
+ new_entity=new_entity,
+ new_description=new_description,
+ new_metadata=new_metadata,
+ new_fuzzy_match=new_fuzzy_match,
+ new_values=new_values,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['entity'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['fuzzy_match'] == True
+ assert req_body['values'] == [create_value_model]
+
+ def test_update_entity_required_params_with_retries(self):
+ # Enable retries and run test_update_entity_required_params.
+ _service.enable_retries()
+ self.test_update_entity_required_params()
+
+ # Disable retries and run test_update_entity_required_params.
+ _service.disable_retries()
+ self.test_update_entity_required_params()
+
+ @responses.activate
+ def test_update_entity_value_error(self):
+ """
+ test_update_entity_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString')
+ mock_response = '{"entity": "entity", "description": "description", "metadata": {"anyKey": "anyValue"}, "fuzzy_match": false, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a CreateValue model
+ create_value_model = {}
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ new_entity = 'testString'
+ new_description = 'testString'
+ new_metadata = {'anyKey': 'anyValue'}
+ new_fuzzy_match = True
+ new_values = [create_value_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_entity(**req_copy)
+
+ def test_update_entity_value_error_with_retries(self):
+ # Enable retries and run test_update_entity_value_error.
+ _service.enable_retries()
+ self.test_update_entity_value_error()
+
+ # Disable retries and run test_update_entity_value_error.
+ _service.disable_retries()
+ self.test_update_entity_value_error()
+
+
+class TestDeleteEntity:
+ """
+ Test Class for delete_entity
+ """
+
+ @responses.activate
+ def test_delete_entity_all_params(self):
+ """
+ delete_entity()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+
+ # Invoke method
+ response = _service.delete_entity(
+ workspace_id,
+ entity,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_entity_all_params_with_retries(self):
+ # Enable retries and run test_delete_entity_all_params.
+ _service.enable_retries()
+ self.test_delete_entity_all_params()
+
+ # Disable retries and run test_delete_entity_all_params.
+ _service.disable_retries()
+ self.test_delete_entity_all_params()
+
+ @responses.activate
+ def test_delete_entity_value_error(self):
+ """
+ test_delete_entity_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_entity(**req_copy)
+
+ def test_delete_entity_value_error_with_retries(self):
+ # Enable retries and run test_delete_entity_value_error.
+ _service.enable_retries()
+ self.test_delete_entity_value_error()
+
+ # Disable retries and run test_delete_entity_value_error.
+ _service.disable_retries()
+ self.test_delete_entity_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Entities
+##############################################################################
+
+##############################################################################
+# Start of Service: Mentions
+##############################################################################
+# region
+
+
+class TestListMentions:
+ """
+ Test Class for list_mentions
+ """
+
+ @responses.activate
+ def test_list_mentions_all_params(self):
+ """
+ list_mentions()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/mentions')
+ mock_response = '{"examples": [{"text": "text", "intent": "intent", "location": [8]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ export = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_mentions(
+ workspace_id,
+ entity,
+ export=export,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'export={}'.format('true' if export else 'false') in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_mentions_all_params_with_retries(self):
+ # Enable retries and run test_list_mentions_all_params.
+ _service.enable_retries()
+ self.test_list_mentions_all_params()
+
+ # Disable retries and run test_list_mentions_all_params.
+ _service.disable_retries()
+ self.test_list_mentions_all_params()
+
+ @responses.activate
+ def test_list_mentions_required_params(self):
+ """
+ test_list_mentions_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/mentions')
+ mock_response = '{"examples": [{"text": "text", "intent": "intent", "location": [8]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+
+ # Invoke method
+ response = _service.list_mentions(
+ workspace_id,
+ entity,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_mentions_required_params_with_retries(self):
+ # Enable retries and run test_list_mentions_required_params.
+ _service.enable_retries()
+ self.test_list_mentions_required_params()
+
+ # Disable retries and run test_list_mentions_required_params.
+ _service.disable_retries()
+ self.test_list_mentions_required_params()
+
+ @responses.activate
+ def test_list_mentions_value_error(self):
+ """
+ test_list_mentions_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/mentions')
+ mock_response = '{"examples": [{"text": "text", "intent": "intent", "location": [8]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_mentions(**req_copy)
+
+ def test_list_mentions_value_error_with_retries(self):
+ # Enable retries and run test_list_mentions_value_error.
+ _service.enable_retries()
+ self.test_list_mentions_value_error()
+
+ # Disable retries and run test_list_mentions_value_error.
+ _service.disable_retries()
+ self.test_list_mentions_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Mentions
+##############################################################################
+
+##############################################################################
+# Start of Service: Values
+##############################################################################
+# region
+
+
+class TestListValues:
+ """
+ Test Class for list_values
+ """
+
+ @responses.activate
+ def test_list_values_all_params(self):
+ """
+ list_values()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values')
+ mock_response = '{"values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ export = False
+ page_limit = 100
+ include_count = False
+ sort = 'value'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_values(
+ workspace_id,
+ entity,
+ export=export,
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'export={}'.format('true' if export else 'false') in query_string
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_values_all_params_with_retries(self):
+ # Enable retries and run test_list_values_all_params.
+ _service.enable_retries()
+ self.test_list_values_all_params()
+
+ # Disable retries and run test_list_values_all_params.
+ _service.disable_retries()
+ self.test_list_values_all_params()
+
+ @responses.activate
+ def test_list_values_required_params(self):
+ """
+ test_list_values_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values')
+ mock_response = '{"values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+
+ # Invoke method
+ response = _service.list_values(
+ workspace_id,
+ entity,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_values_required_params_with_retries(self):
+ # Enable retries and run test_list_values_required_params.
+ _service.enable_retries()
+ self.test_list_values_required_params()
+
+ # Disable retries and run test_list_values_required_params.
+ _service.disable_retries()
+ self.test_list_values_required_params()
+
+ @responses.activate
+ def test_list_values_value_error(self):
+ """
+ test_list_values_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values')
+ mock_response = '{"values": [{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_values(**req_copy)
+
+ def test_list_values_value_error_with_retries(self):
+ # Enable retries and run test_list_values_value_error.
+ _service.enable_retries()
+ self.test_list_values_value_error()
+
+ # Disable retries and run test_list_values_value_error.
+ _service.disable_retries()
+ self.test_list_values_value_error()
+
+
+class TestCreateValue:
+ """
+ Test Class for create_value
+ """
+
+ @responses.activate
+ def test_create_value_all_params(self):
+ """
+ create_value()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values')
+ mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ metadata = {'anyKey': 'anyValue'}
+ type = 'synonyms'
+ synonyms = ['testString']
+ patterns = ['testString']
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_value(
+ workspace_id,
+ entity,
+ value,
+ metadata=metadata,
+ type=type,
+ synonyms=synonyms,
+ patterns=patterns,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['value'] == 'testString'
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['type'] == 'synonyms'
+ assert req_body['synonyms'] == ['testString']
+ assert req_body['patterns'] == ['testString']
+
+ def test_create_value_all_params_with_retries(self):
+ # Enable retries and run test_create_value_all_params.
+ _service.enable_retries()
+ self.test_create_value_all_params()
+
+ # Disable retries and run test_create_value_all_params.
+ _service.disable_retries()
+ self.test_create_value_all_params()
+
+ @responses.activate
+ def test_create_value_required_params(self):
+ """
+ test_create_value_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values')
+ mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ metadata = {'anyKey': 'anyValue'}
+ type = 'synonyms'
+ synonyms = ['testString']
+ patterns = ['testString']
+
+ # Invoke method
+ response = _service.create_value(
+ workspace_id,
+ entity,
+ value,
+ metadata=metadata,
+ type=type,
+ synonyms=synonyms,
+ patterns=patterns,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['value'] == 'testString'
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['type'] == 'synonyms'
+ assert req_body['synonyms'] == ['testString']
+ assert req_body['patterns'] == ['testString']
+
+ def test_create_value_required_params_with_retries(self):
+ # Enable retries and run test_create_value_required_params.
+ _service.enable_retries()
+ self.test_create_value_required_params()
+
+ # Disable retries and run test_create_value_required_params.
+ _service.disable_retries()
+ self.test_create_value_required_params()
+
+ @responses.activate
+ def test_create_value_value_error(self):
+ """
+ test_create_value_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values')
+ mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ metadata = {'anyKey': 'anyValue'}
+ type = 'synonyms'
+ synonyms = ['testString']
+ patterns = ['testString']
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ "value": value,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_value(**req_copy)
+
+ def test_create_value_value_error_with_retries(self):
+ # Enable retries and run test_create_value_value_error.
+ _service.enable_retries()
+ self.test_create_value_value_error()
+
+ # Disable retries and run test_create_value_value_error.
+ _service.disable_retries()
+ self.test_create_value_value_error()
+
+
+class TestGetValue:
+ """
+ Test Class for get_value
+ """
+
+ @responses.activate
+ def test_get_value_all_params(self):
+ """
+ get_value()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString')
+ mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ export = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_value(
+ workspace_id,
+ entity,
+ value,
+ export=export,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'export={}'.format('true' if export else 'false') in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_value_all_params_with_retries(self):
+ # Enable retries and run test_get_value_all_params.
+ _service.enable_retries()
+ self.test_get_value_all_params()
+
+ # Disable retries and run test_get_value_all_params.
+ _service.disable_retries()
+ self.test_get_value_all_params()
+
+ @responses.activate
+ def test_get_value_required_params(self):
+ """
+ test_get_value_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString')
+ mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+
+ # Invoke method
+ response = _service.get_value(
+ workspace_id,
+ entity,
+ value,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_value_required_params_with_retries(self):
+ # Enable retries and run test_get_value_required_params.
+ _service.enable_retries()
+ self.test_get_value_required_params()
+
+ # Disable retries and run test_get_value_required_params.
+ _service.disable_retries()
+ self.test_get_value_required_params()
+
+ @responses.activate
+ def test_get_value_value_error(self):
+ """
+ test_get_value_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString')
+ mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ "value": value,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_value(**req_copy)
+
+ def test_get_value_value_error_with_retries(self):
+ # Enable retries and run test_get_value_value_error.
+ _service.enable_retries()
+ self.test_get_value_value_error()
+
+ # Disable retries and run test_get_value_value_error.
+ _service.disable_retries()
+ self.test_get_value_value_error()
+
+
+class TestUpdateValue:
+ """
+ Test Class for update_value
+ """
+
+ @responses.activate
+ def test_update_value_all_params(self):
+ """
+ update_value()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString')
+ mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ new_value = 'testString'
+ new_metadata = {'anyKey': 'anyValue'}
+ new_type = 'synonyms'
+ new_synonyms = ['testString']
+ new_patterns = ['testString']
+ append = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.update_value(
+ workspace_id,
+ entity,
+ value,
+ new_value=new_value,
+ new_metadata=new_metadata,
+ new_type=new_type,
+ new_synonyms=new_synonyms,
+ new_patterns=new_patterns,
+ append=append,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'append={}'.format('true' if append else 'false') in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['value'] == 'testString'
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['type'] == 'synonyms'
+ assert req_body['synonyms'] == ['testString']
+ assert req_body['patterns'] == ['testString']
+
+ def test_update_value_all_params_with_retries(self):
+ # Enable retries and run test_update_value_all_params.
+ _service.enable_retries()
+ self.test_update_value_all_params()
+
+ # Disable retries and run test_update_value_all_params.
+ _service.disable_retries()
+ self.test_update_value_all_params()
+
+ @responses.activate
+ def test_update_value_required_params(self):
+ """
+ test_update_value_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString')
+ mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ new_value = 'testString'
+ new_metadata = {'anyKey': 'anyValue'}
+ new_type = 'synonyms'
+ new_synonyms = ['testString']
+ new_patterns = ['testString']
+
+ # Invoke method
+ response = _service.update_value(
+ workspace_id,
+ entity,
+ value,
+ new_value=new_value,
+ new_metadata=new_metadata,
+ new_type=new_type,
+ new_synonyms=new_synonyms,
+ new_patterns=new_patterns,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['value'] == 'testString'
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['type'] == 'synonyms'
+ assert req_body['synonyms'] == ['testString']
+ assert req_body['patterns'] == ['testString']
+
+ def test_update_value_required_params_with_retries(self):
+ # Enable retries and run test_update_value_required_params.
+ _service.enable_retries()
+ self.test_update_value_required_params()
+
+ # Disable retries and run test_update_value_required_params.
+ _service.disable_retries()
+ self.test_update_value_required_params()
+
+ @responses.activate
+ def test_update_value_value_error(self):
+ """
+ test_update_value_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString')
+ mock_response = '{"value": "value", "metadata": {"anyKey": "anyValue"}, "type": "synonyms", "synonyms": ["synonym"], "patterns": ["pattern"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ new_value = 'testString'
+ new_metadata = {'anyKey': 'anyValue'}
+ new_type = 'synonyms'
+ new_synonyms = ['testString']
+ new_patterns = ['testString']
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ "value": value,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_value(**req_copy)
+
+ def test_update_value_value_error_with_retries(self):
+ # Enable retries and run test_update_value_value_error.
+ _service.enable_retries()
+ self.test_update_value_value_error()
+
+ # Disable retries and run test_update_value_value_error.
+ _service.disable_retries()
+ self.test_update_value_value_error()
+
+
+class TestDeleteValue:
+ """
+ Test Class for delete_value
+ """
+
+ @responses.activate
+ def test_delete_value_all_params(self):
+ """
+ delete_value()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+
+ # Invoke method
+ response = _service.delete_value(
+ workspace_id,
+ entity,
+ value,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_value_all_params_with_retries(self):
+ # Enable retries and run test_delete_value_all_params.
+ _service.enable_retries()
+ self.test_delete_value_all_params()
+
+ # Disable retries and run test_delete_value_all_params.
+ _service.disable_retries()
+ self.test_delete_value_all_params()
+
+ @responses.activate
+ def test_delete_value_value_error(self):
+ """
+ test_delete_value_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ "value": value,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_value(**req_copy)
+
+ def test_delete_value_value_error_with_retries(self):
+ # Enable retries and run test_delete_value_value_error.
+ _service.enable_retries()
+ self.test_delete_value_value_error()
+
+ # Disable retries and run test_delete_value_value_error.
+ _service.disable_retries()
+ self.test_delete_value_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Values
+##############################################################################
+
+##############################################################################
+# Start of Service: Synonyms
+##############################################################################
+# region
+
+
+class TestListSynonyms:
+ """
+ Test Class for list_synonyms
+ """
+
+ @responses.activate
+ def test_list_synonyms_all_params(self):
+ """
+ list_synonyms()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms')
+ mock_response = '{"synonyms": [{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ page_limit = 100
+ include_count = False
+ sort = 'synonym'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_synonyms(
+ workspace_id,
+ entity,
+ value,
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_synonyms_all_params_with_retries(self):
+ # Enable retries and run test_list_synonyms_all_params.
+ _service.enable_retries()
+ self.test_list_synonyms_all_params()
+
+ # Disable retries and run test_list_synonyms_all_params.
+ _service.disable_retries()
+ self.test_list_synonyms_all_params()
+
+ @responses.activate
+ def test_list_synonyms_required_params(self):
+ """
+ test_list_synonyms_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms')
+ mock_response = '{"synonyms": [{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+
+ # Invoke method
+ response = _service.list_synonyms(
+ workspace_id,
+ entity,
+ value,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_synonyms_required_params_with_retries(self):
+ # Enable retries and run test_list_synonyms_required_params.
+ _service.enable_retries()
+ self.test_list_synonyms_required_params()
+
+ # Disable retries and run test_list_synonyms_required_params.
+ _service.disable_retries()
+ self.test_list_synonyms_required_params()
+
+ @responses.activate
+ def test_list_synonyms_value_error(self):
+ """
+ test_list_synonyms_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms')
+ mock_response = '{"synonyms": [{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ "value": value,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_synonyms(**req_copy)
+
+ def test_list_synonyms_value_error_with_retries(self):
+ # Enable retries and run test_list_synonyms_value_error.
+ _service.enable_retries()
+ self.test_list_synonyms_value_error()
+
+ # Disable retries and run test_list_synonyms_value_error.
+ _service.disable_retries()
+ self.test_list_synonyms_value_error()
+
+
+class TestCreateSynonym:
+ """
+ Test Class for create_synonym
+ """
+
+ @responses.activate
+ def test_create_synonym_all_params(self):
+ """
+ create_synonym()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms')
+ mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_synonym(
+ workspace_id,
+ entity,
+ value,
+ synonym,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['synonym'] == 'testString'
+
+ def test_create_synonym_all_params_with_retries(self):
+ # Enable retries and run test_create_synonym_all_params.
+ _service.enable_retries()
+ self.test_create_synonym_all_params()
+
+ # Disable retries and run test_create_synonym_all_params.
+ _service.disable_retries()
+ self.test_create_synonym_all_params()
+
+ @responses.activate
+ def test_create_synonym_required_params(self):
+ """
+ test_create_synonym_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms')
+ mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+
+ # Invoke method
+ response = _service.create_synonym(
+ workspace_id,
+ entity,
+ value,
+ synonym,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['synonym'] == 'testString'
+
+ def test_create_synonym_required_params_with_retries(self):
+ # Enable retries and run test_create_synonym_required_params.
+ _service.enable_retries()
+ self.test_create_synonym_required_params()
+
+ # Disable retries and run test_create_synonym_required_params.
+ _service.disable_retries()
+ self.test_create_synonym_required_params()
+
+ @responses.activate
+ def test_create_synonym_value_error(self):
+ """
+ test_create_synonym_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms')
+ mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ "value": value,
+ "synonym": synonym,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_synonym(**req_copy)
+
+ def test_create_synonym_value_error_with_retries(self):
+ # Enable retries and run test_create_synonym_value_error.
+ _service.enable_retries()
+ self.test_create_synonym_value_error()
+
+ # Disable retries and run test_create_synonym_value_error.
+ _service.disable_retries()
+ self.test_create_synonym_value_error()
+
+
+class TestGetSynonym:
+ """
+ Test Class for get_synonym
+ """
+
+ @responses.activate
+ def test_get_synonym_all_params(self):
+ """
+ get_synonym()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString')
+ mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_synonym(
+ workspace_id,
+ entity,
+ value,
+ synonym,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_synonym_all_params_with_retries(self):
+ # Enable retries and run test_get_synonym_all_params.
+ _service.enable_retries()
+ self.test_get_synonym_all_params()
+
+ # Disable retries and run test_get_synonym_all_params.
+ _service.disable_retries()
+ self.test_get_synonym_all_params()
+
+ @responses.activate
+ def test_get_synonym_required_params(self):
+ """
+ test_get_synonym_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString')
+ mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+
+ # Invoke method
+ response = _service.get_synonym(
+ workspace_id,
+ entity,
+ value,
+ synonym,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_synonym_required_params_with_retries(self):
+ # Enable retries and run test_get_synonym_required_params.
+ _service.enable_retries()
+ self.test_get_synonym_required_params()
+
+ # Disable retries and run test_get_synonym_required_params.
+ _service.disable_retries()
+ self.test_get_synonym_required_params()
+
+ @responses.activate
+ def test_get_synonym_value_error(self):
+ """
+ test_get_synonym_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString')
+ mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ "value": value,
+ "synonym": synonym,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_synonym(**req_copy)
+
+ def test_get_synonym_value_error_with_retries(self):
+ # Enable retries and run test_get_synonym_value_error.
+ _service.enable_retries()
+ self.test_get_synonym_value_error()
+
+ # Disable retries and run test_get_synonym_value_error.
+ _service.disable_retries()
+ self.test_get_synonym_value_error()
+
+
+class TestUpdateSynonym:
+ """
+ Test Class for update_synonym
+ """
+
+ @responses.activate
+ def test_update_synonym_all_params(self):
+ """
+ update_synonym()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString')
+ mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+ new_synonym = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.update_synonym(
+ workspace_id,
+ entity,
+ value,
+ synonym,
+ new_synonym=new_synonym,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['synonym'] == 'testString'
+
+ def test_update_synonym_all_params_with_retries(self):
+ # Enable retries and run test_update_synonym_all_params.
+ _service.enable_retries()
+ self.test_update_synonym_all_params()
+
+ # Disable retries and run test_update_synonym_all_params.
+ _service.disable_retries()
+ self.test_update_synonym_all_params()
+
+ @responses.activate
+ def test_update_synonym_required_params(self):
+ """
+ test_update_synonym_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString')
+ mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+ new_synonym = 'testString'
+
+ # Invoke method
+ response = _service.update_synonym(
+ workspace_id,
+ entity,
+ value,
+ synonym,
+ new_synonym=new_synonym,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['synonym'] == 'testString'
+
+ def test_update_synonym_required_params_with_retries(self):
+ # Enable retries and run test_update_synonym_required_params.
+ _service.enable_retries()
+ self.test_update_synonym_required_params()
+
+ # Disable retries and run test_update_synonym_required_params.
+ _service.disable_retries()
+ self.test_update_synonym_required_params()
+
+ @responses.activate
+ def test_update_synonym_value_error(self):
+ """
+ test_update_synonym_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString')
+ mock_response = '{"synonym": "synonym", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+ new_synonym = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ "value": value,
+ "synonym": synonym,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_synonym(**req_copy)
+
+ def test_update_synonym_value_error_with_retries(self):
+ # Enable retries and run test_update_synonym_value_error.
+ _service.enable_retries()
+ self.test_update_synonym_value_error()
+
+ # Disable retries and run test_update_synonym_value_error.
+ _service.disable_retries()
+ self.test_update_synonym_value_error()
+
+
+class TestDeleteSynonym:
+ """
+ Test Class for delete_synonym
+ """
+
+ @responses.activate
+ def test_delete_synonym_all_params(self):
+ """
+ delete_synonym()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+
+ # Invoke method
+ response = _service.delete_synonym(
+ workspace_id,
+ entity,
+ value,
+ synonym,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_synonym_all_params_with_retries(self):
+ # Enable retries and run test_delete_synonym_all_params.
+ _service.enable_retries()
+ self.test_delete_synonym_all_params()
+
+ # Disable retries and run test_delete_synonym_all_params.
+ _service.disable_retries()
+ self.test_delete_synonym_all_params()
+
+ @responses.activate
+ def test_delete_synonym_value_error(self):
+ """
+ test_delete_synonym_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/entities/testString/values/testString/synonyms/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ entity = 'testString'
+ value = 'testString'
+ synonym = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "entity": entity,
+ "value": value,
+ "synonym": synonym,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_synonym(**req_copy)
+
+ def test_delete_synonym_value_error_with_retries(self):
+ # Enable retries and run test_delete_synonym_value_error.
+ _service.enable_retries()
+ self.test_delete_synonym_value_error()
+
+ # Disable retries and run test_delete_synonym_value_error.
+ _service.disable_retries()
+ self.test_delete_synonym_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Synonyms
+##############################################################################
+
+##############################################################################
+# Start of Service: DialogNodes
+##############################################################################
+# region
+
+
+class TestListDialogNodes:
+ """
+ Test Class for list_dialog_nodes
+ """
+
+ @responses.activate
+ def test_list_dialog_nodes_all_params(self):
+ """
+ list_dialog_nodes()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes')
+ mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ page_limit = 100
+ include_count = False
+ sort = 'dialog_node'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_dialog_nodes(
+ workspace_id,
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_dialog_nodes_all_params_with_retries(self):
+ # Enable retries and run test_list_dialog_nodes_all_params.
+ _service.enable_retries()
+ self.test_list_dialog_nodes_all_params()
+
+ # Disable retries and run test_list_dialog_nodes_all_params.
+ _service.disable_retries()
+ self.test_list_dialog_nodes_all_params()
+
+ @responses.activate
+ def test_list_dialog_nodes_required_params(self):
+ """
+ test_list_dialog_nodes_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes')
+ mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.list_dialog_nodes(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_dialog_nodes_required_params_with_retries(self):
+ # Enable retries and run test_list_dialog_nodes_required_params.
+ _service.enable_retries()
+ self.test_list_dialog_nodes_required_params()
+
+ # Disable retries and run test_list_dialog_nodes_required_params.
+ _service.disable_retries()
+ self.test_list_dialog_nodes_required_params()
+
+ @responses.activate
+ def test_list_dialog_nodes_value_error(self):
+ """
+ test_list_dialog_nodes_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes')
+ mock_response = '{"dialog_nodes": [{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_dialog_nodes(**req_copy)
+
+ def test_list_dialog_nodes_value_error_with_retries(self):
+ # Enable retries and run test_list_dialog_nodes_value_error.
+ _service.enable_retries()
+ self.test_list_dialog_nodes_value_error()
+
+ # Disable retries and run test_list_dialog_nodes_value_error.
+ _service.disable_retries()
+ self.test_list_dialog_nodes_value_error()
+
+
+class TestCreateDialogNode:
+ """
+ Test Class for create_dialog_node
+ """
+
+ @responses.activate
+ def test_create_dialog_node_all_params(self):
+ """
+ create_dialog_node()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes')
+ mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+ description = 'testString'
+ conditions = 'testString'
+ parent = 'testString'
+ previous_sibling = 'testString'
+ output = dialog_node_output_model
+ context = dialog_node_context_model
+ metadata = {'anyKey': 'anyValue'}
+ next_step = dialog_node_next_step_model
+ title = 'testString'
+ type = 'standard'
+ event_name = 'focus'
+ variable = 'testString'
+ actions = [dialog_node_action_model]
+ digress_in = 'not_available'
+ digress_out = 'allow_returning'
+ digress_out_slots = 'not_allowed'
+ user_label = 'testString'
+ disambiguation_opt_out = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_dialog_node(
+ workspace_id,
+ dialog_node,
+ description=description,
+ conditions=conditions,
+ parent=parent,
+ previous_sibling=previous_sibling,
+ output=output,
+ context=context,
+ metadata=metadata,
+ next_step=next_step,
+ title=title,
+ type=type,
+ event_name=event_name,
+ variable=variable,
+ actions=actions,
+ digress_in=digress_in,
+ digress_out=digress_out,
+ digress_out_slots=digress_out_slots,
+ user_label=user_label,
+ disambiguation_opt_out=disambiguation_opt_out,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['dialog_node'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['conditions'] == 'testString'
+ assert req_body['parent'] == 'testString'
+ assert req_body['previous_sibling'] == 'testString'
+ assert req_body['output'] == dialog_node_output_model
+ assert req_body['context'] == dialog_node_context_model
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['next_step'] == dialog_node_next_step_model
+ assert req_body['title'] == 'testString'
+ assert req_body['type'] == 'standard'
+ assert req_body['event_name'] == 'focus'
+ assert req_body['variable'] == 'testString'
+ assert req_body['actions'] == [dialog_node_action_model]
+ assert req_body['digress_in'] == 'not_available'
+ assert req_body['digress_out'] == 'allow_returning'
+ assert req_body['digress_out_slots'] == 'not_allowed'
+ assert req_body['user_label'] == 'testString'
+ assert req_body['disambiguation_opt_out'] == False
+
+ def test_create_dialog_node_all_params_with_retries(self):
+ # Enable retries and run test_create_dialog_node_all_params.
+ _service.enable_retries()
+ self.test_create_dialog_node_all_params()
+
+ # Disable retries and run test_create_dialog_node_all_params.
+ _service.disable_retries()
+ self.test_create_dialog_node_all_params()
+
+ @responses.activate
+ def test_create_dialog_node_required_params(self):
+ """
+ test_create_dialog_node_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes')
+ mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+ description = 'testString'
+ conditions = 'testString'
+ parent = 'testString'
+ previous_sibling = 'testString'
+ output = dialog_node_output_model
+ context = dialog_node_context_model
+ metadata = {'anyKey': 'anyValue'}
+ next_step = dialog_node_next_step_model
+ title = 'testString'
+ type = 'standard'
+ event_name = 'focus'
+ variable = 'testString'
+ actions = [dialog_node_action_model]
+ digress_in = 'not_available'
+ digress_out = 'allow_returning'
+ digress_out_slots = 'not_allowed'
+ user_label = 'testString'
+ disambiguation_opt_out = False
+
+ # Invoke method
+ response = _service.create_dialog_node(
+ workspace_id,
+ dialog_node,
+ description=description,
+ conditions=conditions,
+ parent=parent,
+ previous_sibling=previous_sibling,
+ output=output,
+ context=context,
+ metadata=metadata,
+ next_step=next_step,
+ title=title,
+ type=type,
+ event_name=event_name,
+ variable=variable,
+ actions=actions,
+ digress_in=digress_in,
+ digress_out=digress_out,
+ digress_out_slots=digress_out_slots,
+ user_label=user_label,
+ disambiguation_opt_out=disambiguation_opt_out,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['dialog_node'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['conditions'] == 'testString'
+ assert req_body['parent'] == 'testString'
+ assert req_body['previous_sibling'] == 'testString'
+ assert req_body['output'] == dialog_node_output_model
+ assert req_body['context'] == dialog_node_context_model
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['next_step'] == dialog_node_next_step_model
+ assert req_body['title'] == 'testString'
+ assert req_body['type'] == 'standard'
+ assert req_body['event_name'] == 'focus'
+ assert req_body['variable'] == 'testString'
+ assert req_body['actions'] == [dialog_node_action_model]
+ assert req_body['digress_in'] == 'not_available'
+ assert req_body['digress_out'] == 'allow_returning'
+ assert req_body['digress_out_slots'] == 'not_allowed'
+ assert req_body['user_label'] == 'testString'
+ assert req_body['disambiguation_opt_out'] == False
+
+ def test_create_dialog_node_required_params_with_retries(self):
+ # Enable retries and run test_create_dialog_node_required_params.
+ _service.enable_retries()
+ self.test_create_dialog_node_required_params()
+
+ # Disable retries and run test_create_dialog_node_required_params.
+ _service.disable_retries()
+ self.test_create_dialog_node_required_params()
+
+ @responses.activate
+ def test_create_dialog_node_value_error(self):
+ """
+ test_create_dialog_node_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes')
+ mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+ description = 'testString'
+ conditions = 'testString'
+ parent = 'testString'
+ previous_sibling = 'testString'
+ output = dialog_node_output_model
+ context = dialog_node_context_model
+ metadata = {'anyKey': 'anyValue'}
+ next_step = dialog_node_next_step_model
+ title = 'testString'
+ type = 'standard'
+ event_name = 'focus'
+ variable = 'testString'
+ actions = [dialog_node_action_model]
+ digress_in = 'not_available'
+ digress_out = 'allow_returning'
+ digress_out_slots = 'not_allowed'
+ user_label = 'testString'
+ disambiguation_opt_out = False
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "dialog_node": dialog_node,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_dialog_node(**req_copy)
+
+ def test_create_dialog_node_value_error_with_retries(self):
+ # Enable retries and run test_create_dialog_node_value_error.
+ _service.enable_retries()
+ self.test_create_dialog_node_value_error()
+
+ # Disable retries and run test_create_dialog_node_value_error.
+ _service.disable_retries()
+ self.test_create_dialog_node_value_error()
+
+
+class TestGetDialogNode:
+ """
+ Test Class for get_dialog_node
+ """
+
+ @responses.activate
+ def test_get_dialog_node_all_params(self):
+ """
+ get_dialog_node()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString')
+ mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_dialog_node(
+ workspace_id,
+ dialog_node,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_dialog_node_all_params_with_retries(self):
+ # Enable retries and run test_get_dialog_node_all_params.
+ _service.enable_retries()
+ self.test_get_dialog_node_all_params()
+
+ # Disable retries and run test_get_dialog_node_all_params.
+ _service.disable_retries()
+ self.test_get_dialog_node_all_params()
+
+ @responses.activate
+ def test_get_dialog_node_required_params(self):
+ """
+ test_get_dialog_node_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString')
+ mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+
+ # Invoke method
+ response = _service.get_dialog_node(
+ workspace_id,
+ dialog_node,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_dialog_node_required_params_with_retries(self):
+ # Enable retries and run test_get_dialog_node_required_params.
+ _service.enable_retries()
+ self.test_get_dialog_node_required_params()
+
+ # Disable retries and run test_get_dialog_node_required_params.
+ _service.disable_retries()
+ self.test_get_dialog_node_required_params()
+
+ @responses.activate
+ def test_get_dialog_node_value_error(self):
+ """
+ test_get_dialog_node_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString')
+ mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "dialog_node": dialog_node,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_dialog_node(**req_copy)
+
+ def test_get_dialog_node_value_error_with_retries(self):
+ # Enable retries and run test_get_dialog_node_value_error.
+ _service.enable_retries()
+ self.test_get_dialog_node_value_error()
+
+ # Disable retries and run test_get_dialog_node_value_error.
+ _service.disable_retries()
+ self.test_get_dialog_node_value_error()
+
+
+class TestUpdateDialogNode:
+ """
+ Test Class for update_dialog_node
+ """
+
+ @responses.activate
+ def test_update_dialog_node_all_params(self):
+ """
+ update_dialog_node()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString')
+ mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+ new_dialog_node = 'testString'
+ new_description = 'testString'
+ new_conditions = 'testString'
+ new_parent = 'testString'
+ new_previous_sibling = 'testString'
+ new_output = dialog_node_output_model
+ new_context = dialog_node_context_model
+ new_metadata = {'anyKey': 'anyValue'}
+ new_next_step = dialog_node_next_step_model
+ new_title = 'testString'
+ new_type = 'standard'
+ new_event_name = 'focus'
+ new_variable = 'testString'
+ new_actions = [dialog_node_action_model]
+ new_digress_in = 'not_available'
+ new_digress_out = 'allow_returning'
+ new_digress_out_slots = 'not_allowed'
+ new_user_label = 'testString'
+ new_disambiguation_opt_out = False
+ include_audit = False
+
+ # Invoke method
+ response = _service.update_dialog_node(
+ workspace_id,
+ dialog_node,
+ new_dialog_node=new_dialog_node,
+ new_description=new_description,
+ new_conditions=new_conditions,
+ new_parent=new_parent,
+ new_previous_sibling=new_previous_sibling,
+ new_output=new_output,
+ new_context=new_context,
+ new_metadata=new_metadata,
+ new_next_step=new_next_step,
+ new_title=new_title,
+ new_type=new_type,
+ new_event_name=new_event_name,
+ new_variable=new_variable,
+ new_actions=new_actions,
+ new_digress_in=new_digress_in,
+ new_digress_out=new_digress_out,
+ new_digress_out_slots=new_digress_out_slots,
+ new_user_label=new_user_label,
+ new_disambiguation_opt_out=new_disambiguation_opt_out,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['dialog_node'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['conditions'] == 'testString'
+ assert req_body['parent'] == 'testString'
+ assert req_body['previous_sibling'] == 'testString'
+ assert req_body['output'] == dialog_node_output_model
+ assert req_body['context'] == dialog_node_context_model
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['next_step'] == dialog_node_next_step_model
+ assert req_body['title'] == 'testString'
+ assert req_body['type'] == 'standard'
+ assert req_body['event_name'] == 'focus'
+ assert req_body['variable'] == 'testString'
+ assert req_body['actions'] == [dialog_node_action_model]
+ assert req_body['digress_in'] == 'not_available'
+ assert req_body['digress_out'] == 'allow_returning'
+ assert req_body['digress_out_slots'] == 'not_allowed'
+ assert req_body['user_label'] == 'testString'
+ assert req_body['disambiguation_opt_out'] == False
+
+ def test_update_dialog_node_all_params_with_retries(self):
+ # Enable retries and run test_update_dialog_node_all_params.
+ _service.enable_retries()
+ self.test_update_dialog_node_all_params()
+
+ # Disable retries and run test_update_dialog_node_all_params.
+ _service.disable_retries()
+ self.test_update_dialog_node_all_params()
+
+ @responses.activate
+ def test_update_dialog_node_required_params(self):
+ """
+ test_update_dialog_node_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString')
+ mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+ new_dialog_node = 'testString'
+ new_description = 'testString'
+ new_conditions = 'testString'
+ new_parent = 'testString'
+ new_previous_sibling = 'testString'
+ new_output = dialog_node_output_model
+ new_context = dialog_node_context_model
+ new_metadata = {'anyKey': 'anyValue'}
+ new_next_step = dialog_node_next_step_model
+ new_title = 'testString'
+ new_type = 'standard'
+ new_event_name = 'focus'
+ new_variable = 'testString'
+ new_actions = [dialog_node_action_model]
+ new_digress_in = 'not_available'
+ new_digress_out = 'allow_returning'
+ new_digress_out_slots = 'not_allowed'
+ new_user_label = 'testString'
+ new_disambiguation_opt_out = False
+
+ # Invoke method
+ response = _service.update_dialog_node(
+ workspace_id,
+ dialog_node,
+ new_dialog_node=new_dialog_node,
+ new_description=new_description,
+ new_conditions=new_conditions,
+ new_parent=new_parent,
+ new_previous_sibling=new_previous_sibling,
+ new_output=new_output,
+ new_context=new_context,
+ new_metadata=new_metadata,
+ new_next_step=new_next_step,
+ new_title=new_title,
+ new_type=new_type,
+ new_event_name=new_event_name,
+ new_variable=new_variable,
+ new_actions=new_actions,
+ new_digress_in=new_digress_in,
+ new_digress_out=new_digress_out,
+ new_digress_out_slots=new_digress_out_slots,
+ new_user_label=new_user_label,
+ new_disambiguation_opt_out=new_disambiguation_opt_out,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['dialog_node'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['conditions'] == 'testString'
+ assert req_body['parent'] == 'testString'
+ assert req_body['previous_sibling'] == 'testString'
+ assert req_body['output'] == dialog_node_output_model
+ assert req_body['context'] == dialog_node_context_model
+ assert req_body['metadata'] == {'anyKey': 'anyValue'}
+ assert req_body['next_step'] == dialog_node_next_step_model
+ assert req_body['title'] == 'testString'
+ assert req_body['type'] == 'standard'
+ assert req_body['event_name'] == 'focus'
+ assert req_body['variable'] == 'testString'
+ assert req_body['actions'] == [dialog_node_action_model]
+ assert req_body['digress_in'] == 'not_available'
+ assert req_body['digress_out'] == 'allow_returning'
+ assert req_body['digress_out_slots'] == 'not_allowed'
+ assert req_body['user_label'] == 'testString'
+ assert req_body['disambiguation_opt_out'] == False
+
+ def test_update_dialog_node_required_params_with_retries(self):
+ # Enable retries and run test_update_dialog_node_required_params.
+ _service.enable_retries()
+ self.test_update_dialog_node_required_params()
+
+ # Disable retries and run test_update_dialog_node_required_params.
+ _service.disable_retries()
+ self.test_update_dialog_node_required_params()
+
+ @responses.activate
+ def test_update_dialog_node_value_error(self):
+ """
+ test_update_dialog_node_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString')
+ mock_response = '{"dialog_node": "dialog_node", "description": "description", "conditions": "conditions", "parent": "parent", "previous_sibling": "previous_sibling", "output": {"generic": [{"response_type": "text", "values": [{"text": "text"}], "selection_policy": "sequential", "delimiter": "\n", "channels": [{"channel": "chat"}]}], "integrations": {"mapKey": {"anyKey": "anyValue"}}, "modifiers": {"overwrite": true}}, "context": {"integrations": {"mapKey": {"anyKey": "anyValue"}}}, "metadata": {"anyKey": "anyValue"}, "next_step": {"behavior": "get_user_input", "dialog_node": "dialog_node", "selector": "condition"}, "title": "title", "type": "standard", "event_name": "focus", "variable": "variable", "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "digress_in": "not_available", "digress_out": "allow_returning", "digress_out_slots": "not_allowed", "user_label": "user_label", "disambiguation_opt_out": false, "disabled": true, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model = {}
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ # Construct a dict representation of a ResponseGenericChannel model
+ response_generic_channel_model = {}
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a dict representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_model = {}
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a dict representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model = {}
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a dict representation of a DialogNodeOutput model
+ dialog_node_output_model = {}
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeContext model
+ dialog_node_context_model = {}
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ # Construct a dict representation of a DialogNodeNextStep model
+ dialog_node_next_step_model = {}
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ # Construct a dict representation of a DialogNodeAction model
+ dialog_node_action_model = {}
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+ new_dialog_node = 'testString'
+ new_description = 'testString'
+ new_conditions = 'testString'
+ new_parent = 'testString'
+ new_previous_sibling = 'testString'
+ new_output = dialog_node_output_model
+ new_context = dialog_node_context_model
+ new_metadata = {'anyKey': 'anyValue'}
+ new_next_step = dialog_node_next_step_model
+ new_title = 'testString'
+ new_type = 'standard'
+ new_event_name = 'focus'
+ new_variable = 'testString'
+ new_actions = [dialog_node_action_model]
+ new_digress_in = 'not_available'
+ new_digress_out = 'allow_returning'
+ new_digress_out_slots = 'not_allowed'
+ new_user_label = 'testString'
+ new_disambiguation_opt_out = False
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "dialog_node": dialog_node,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_dialog_node(**req_copy)
+
+ def test_update_dialog_node_value_error_with_retries(self):
+ # Enable retries and run test_update_dialog_node_value_error.
+ _service.enable_retries()
+ self.test_update_dialog_node_value_error()
+
+ # Disable retries and run test_update_dialog_node_value_error.
+ _service.disable_retries()
+ self.test_update_dialog_node_value_error()
+
+
+class TestDeleteDialogNode:
+ """
+ Test Class for delete_dialog_node
+ """
+
+ @responses.activate
+ def test_delete_dialog_node_all_params(self):
+ """
+ delete_dialog_node()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+
+ # Invoke method
+ response = _service.delete_dialog_node(
+ workspace_id,
+ dialog_node,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_dialog_node_all_params_with_retries(self):
+ # Enable retries and run test_delete_dialog_node_all_params.
+ _service.enable_retries()
+ self.test_delete_dialog_node_all_params()
+
+ # Disable retries and run test_delete_dialog_node_all_params.
+ _service.disable_retries()
+ self.test_delete_dialog_node_all_params()
+
+ @responses.activate
+ def test_delete_dialog_node_value_error(self):
+ """
+ test_delete_dialog_node_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/dialog_nodes/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ dialog_node = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ "dialog_node": dialog_node,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_dialog_node(**req_copy)
+
+ def test_delete_dialog_node_value_error_with_retries(self):
+ # Enable retries and run test_delete_dialog_node_value_error.
+ _service.enable_retries()
+ self.test_delete_dialog_node_value_error()
+
+ # Disable retries and run test_delete_dialog_node_value_error.
+ _service.disable_retries()
+ self.test_delete_dialog_node_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: DialogNodes
+##############################################################################
+
+##############################################################################
+# Start of Service: Logs
+##############################################################################
+# region
+
+
+class TestListLogs:
+ """
+ Test Class for list_logs
+ """
+
+ @responses.activate
+ def test_list_logs_all_params(self):
+ """
+ list_logs()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/logs')
+ mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+ sort = 'testString'
+ filter = 'testString'
+ page_limit = 100
+ cursor = 'testString'
+
+ # Invoke method
+ response = _service.list_logs(
+ workspace_id,
+ sort=sort,
+ filter=filter,
+ page_limit=page_limit,
+ cursor=cursor,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'sort={}'.format(sort) in query_string
+ assert 'filter={}'.format(filter) in query_string
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+
+ def test_list_logs_all_params_with_retries(self):
+ # Enable retries and run test_list_logs_all_params.
+ _service.enable_retries()
+ self.test_list_logs_all_params()
+
+ # Disable retries and run test_list_logs_all_params.
+ _service.disable_retries()
+ self.test_list_logs_all_params()
+
+ @responses.activate
+ def test_list_logs_required_params(self):
+ """
+ test_list_logs_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/logs')
+ mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Invoke method
+ response = _service.list_logs(
+ workspace_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_logs_required_params_with_retries(self):
+ # Enable retries and run test_list_logs_required_params.
+ _service.enable_retries()
+ self.test_list_logs_required_params()
+
+ # Disable retries and run test_list_logs_required_params.
+ _service.disable_retries()
+ self.test_list_logs_required_params()
+
+ @responses.activate
+ def test_list_logs_value_error(self):
+ """
+ test_list_logs_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/workspaces/testString/logs')
+ mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ workspace_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "workspace_id": workspace_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_logs(**req_copy)
+
+ def test_list_logs_value_error_with_retries(self):
+ # Enable retries and run test_list_logs_value_error.
+ _service.enable_retries()
+ self.test_list_logs_value_error()
+
+ # Disable retries and run test_list_logs_value_error.
+ _service.disable_retries()
+ self.test_list_logs_value_error()
+
+
+class TestListAllLogs:
+ """
+ Test Class for list_all_logs
+ """
+
+ @responses.activate
+ def test_list_all_logs_all_params(self):
+ """
+ list_all_logs()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/logs')
+ mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ filter = 'testString'
+ sort = 'testString'
+ page_limit = 100
+ cursor = 'testString'
+
+ # Invoke method
+ response = _service.list_all_logs(
+ filter,
+ sort=sort,
+ page_limit=page_limit,
+ cursor=cursor,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'filter={}'.format(filter) in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+
+ def test_list_all_logs_all_params_with_retries(self):
+ # Enable retries and run test_list_all_logs_all_params.
+ _service.enable_retries()
+ self.test_list_all_logs_all_params()
+
+ # Disable retries and run test_list_all_logs_all_params.
+ _service.disable_retries()
+ self.test_list_all_logs_all_params()
+
+ @responses.activate
+ def test_list_all_logs_required_params(self):
+ """
+ test_list_all_logs_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/logs')
+ mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ filter = 'testString'
+
+ # Invoke method
+ response = _service.list_all_logs(
+ filter,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'filter={}'.format(filter) in query_string
+
+ def test_list_all_logs_required_params_with_retries(self):
+ # Enable retries and run test_list_all_logs_required_params.
+ _service.enable_retries()
+ self.test_list_all_logs_required_params()
+
+ # Disable retries and run test_list_all_logs_required_params.
+ _service.disable_retries()
+ self.test_list_all_logs_required_params()
+
+ @responses.activate
+ def test_list_all_logs_value_error(self):
+ """
+ test_list_all_logs_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/logs')
+ mock_response = '{"logs": [{"request": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "response": {"input": {"text": "text", "spelling_suggestions": false, "spelling_auto_correct": false, "suggested_text": "suggested_text", "original_text": "original_text"}, "intents": [{"intent": "intent", "confidence": 10}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}}], "alternate_intents": false, "context": {"conversation_id": "conversation_id", "system": {"anyKey": "anyValue"}, "metadata": {"deployment": "deployment", "user_id": "user_id"}}, "output": {"nodes_visited": ["nodes_visited"], "nodes_visited_details": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "msg": "msg", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "generic": [{"response_type": "text", "text": "text", "channels": [{"channel": "chat"}]}]}, "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "user_id": "user_id"}, "log_id": "log_id", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "workspace_id": "workspace_id", "language": "language"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ filter = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "filter": filter,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_all_logs(**req_copy)
+
+ def test_list_all_logs_value_error_with_retries(self):
+ # Enable retries and run test_list_all_logs_value_error.
+ _service.enable_retries()
+ self.test_list_all_logs_value_error()
+
+ # Disable retries and run test_list_all_logs_value_error.
+ _service.disable_retries()
+ self.test_list_all_logs_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Logs
+##############################################################################
+
+##############################################################################
+# Start of Service: UserData
+##############################################################################
+# region
+
+
+class TestDeleteUserData:
+ """
+ Test Class for delete_user_data
+ """
+
+ @responses.activate
+ def test_delete_user_data_all_params(self):
+ """
+ delete_user_data()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=202,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_user_data(
+ customer_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'customer_id={}'.format(customer_id) in query_string
+
+ def test_delete_user_data_all_params_with_retries(self):
+ # Enable retries and run test_delete_user_data_all_params.
+ _service.enable_retries()
+ self.test_delete_user_data_all_params()
+
+ # Disable retries and run test_delete_user_data_all_params.
+ _service.disable_retries()
+ self.test_delete_user_data_all_params()
+
+ @responses.activate
+ def test_delete_user_data_value_error(self):
+ """
+ test_delete_user_data_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=202,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customer_id": customer_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_user_data(**req_copy)
+
+ def test_delete_user_data_value_error_with_retries(self):
+ # Enable retries and run test_delete_user_data_value_error.
+ _service.enable_retries()
+ self.test_delete_user_data_value_error()
+
+ # Disable retries and run test_delete_user_data_value_error.
+ _service.disable_retries()
+ self.test_delete_user_data_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: UserData
+##############################################################################
+
+
+##############################################################################
+# Start of Model Tests
+##############################################################################
+# region
+
+
+class TestModel_AgentAvailabilityMessage:
+ """
+ Test Class for AgentAvailabilityMessage
+ """
+
+ def test_agent_availability_message_serialization(self):
+ """
+ Test serialization/deserialization for AgentAvailabilityMessage
+ """
+
+ # Construct a json representation of a AgentAvailabilityMessage model
+ agent_availability_message_model_json = {}
+ agent_availability_message_model_json['message'] = 'testString'
+
+ # Construct a model instance of AgentAvailabilityMessage by calling from_dict on the json representation
+ agent_availability_message_model = AgentAvailabilityMessage.from_dict(agent_availability_message_model_json)
+ assert agent_availability_message_model != False
+
+ # Construct a model instance of AgentAvailabilityMessage by calling from_dict on the json representation
+ agent_availability_message_model_dict = AgentAvailabilityMessage.from_dict(agent_availability_message_model_json).__dict__
+ agent_availability_message_model2 = AgentAvailabilityMessage(**agent_availability_message_model_dict)
+
+ # Verify the model instances are equivalent
+ assert agent_availability_message_model == agent_availability_message_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ agent_availability_message_model_json2 = agent_availability_message_model.to_dict()
+ assert agent_availability_message_model_json2 == agent_availability_message_model_json
+
+
+class TestModel_BulkClassifyOutput:
+ """
+ Test Class for BulkClassifyOutput
+ """
+
+ def test_bulk_classify_output_serialization(self):
+ """
+ Test serialization/deserialization for BulkClassifyOutput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ bulk_classify_utterance_model = {} # BulkClassifyUtterance
+ bulk_classify_utterance_model['text'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ # Construct a json representation of a BulkClassifyOutput model
+ bulk_classify_output_model_json = {}
+ bulk_classify_output_model_json['input'] = bulk_classify_utterance_model
+ bulk_classify_output_model_json['entities'] = [runtime_entity_model]
+ bulk_classify_output_model_json['intents'] = [runtime_intent_model]
+
+ # Construct a model instance of BulkClassifyOutput by calling from_dict on the json representation
+ bulk_classify_output_model = BulkClassifyOutput.from_dict(bulk_classify_output_model_json)
+ assert bulk_classify_output_model != False
+
+ # Construct a model instance of BulkClassifyOutput by calling from_dict on the json representation
+ bulk_classify_output_model_dict = BulkClassifyOutput.from_dict(bulk_classify_output_model_json).__dict__
+ bulk_classify_output_model2 = BulkClassifyOutput(**bulk_classify_output_model_dict)
+
+ # Verify the model instances are equivalent
+ assert bulk_classify_output_model == bulk_classify_output_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ bulk_classify_output_model_json2 = bulk_classify_output_model.to_dict()
+ assert bulk_classify_output_model_json2 == bulk_classify_output_model_json
+
+
+class TestModel_BulkClassifyResponse:
+ """
+ Test Class for BulkClassifyResponse
+ """
+
+ def test_bulk_classify_response_serialization(self):
+ """
+ Test serialization/deserialization for BulkClassifyResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ bulk_classify_utterance_model = {} # BulkClassifyUtterance
+ bulk_classify_utterance_model['text'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ bulk_classify_output_model = {} # BulkClassifyOutput
+ bulk_classify_output_model['input'] = bulk_classify_utterance_model
+ bulk_classify_output_model['entities'] = [runtime_entity_model]
+ bulk_classify_output_model['intents'] = [runtime_intent_model]
+
+ # Construct a json representation of a BulkClassifyResponse model
+ bulk_classify_response_model_json = {}
+ bulk_classify_response_model_json['output'] = [bulk_classify_output_model]
+
+ # Construct a model instance of BulkClassifyResponse by calling from_dict on the json representation
+ bulk_classify_response_model = BulkClassifyResponse.from_dict(bulk_classify_response_model_json)
+ assert bulk_classify_response_model != False
+
+ # Construct a model instance of BulkClassifyResponse by calling from_dict on the json representation
+ bulk_classify_response_model_dict = BulkClassifyResponse.from_dict(bulk_classify_response_model_json).__dict__
+ bulk_classify_response_model2 = BulkClassifyResponse(**bulk_classify_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert bulk_classify_response_model == bulk_classify_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ bulk_classify_response_model_json2 = bulk_classify_response_model.to_dict()
+ assert bulk_classify_response_model_json2 == bulk_classify_response_model_json
+
+
+class TestModel_BulkClassifyUtterance:
+ """
+ Test Class for BulkClassifyUtterance
+ """
+
+ def test_bulk_classify_utterance_serialization(self):
+ """
+ Test serialization/deserialization for BulkClassifyUtterance
+ """
+
+ # Construct a json representation of a BulkClassifyUtterance model
+ bulk_classify_utterance_model_json = {}
+ bulk_classify_utterance_model_json['text'] = 'testString'
+
+ # Construct a model instance of BulkClassifyUtterance by calling from_dict on the json representation
+ bulk_classify_utterance_model = BulkClassifyUtterance.from_dict(bulk_classify_utterance_model_json)
+ assert bulk_classify_utterance_model != False
+
+ # Construct a model instance of BulkClassifyUtterance by calling from_dict on the json representation
+ bulk_classify_utterance_model_dict = BulkClassifyUtterance.from_dict(bulk_classify_utterance_model_json).__dict__
+ bulk_classify_utterance_model2 = BulkClassifyUtterance(**bulk_classify_utterance_model_dict)
+
+ # Verify the model instances are equivalent
+ assert bulk_classify_utterance_model == bulk_classify_utterance_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ bulk_classify_utterance_model_json2 = bulk_classify_utterance_model.to_dict()
+ assert bulk_classify_utterance_model_json2 == bulk_classify_utterance_model_json
+
+
+class TestModel_CaptureGroup:
+ """
+ Test Class for CaptureGroup
+ """
+
+ def test_capture_group_serialization(self):
+ """
+ Test serialization/deserialization for CaptureGroup
+ """
+
+ # Construct a json representation of a CaptureGroup model
+ capture_group_model_json = {}
+ capture_group_model_json['group'] = 'testString'
+ capture_group_model_json['location'] = [38]
+
+ # Construct a model instance of CaptureGroup by calling from_dict on the json representation
+ capture_group_model = CaptureGroup.from_dict(capture_group_model_json)
+ assert capture_group_model != False
+
+ # Construct a model instance of CaptureGroup by calling from_dict on the json representation
+ capture_group_model_dict = CaptureGroup.from_dict(capture_group_model_json).__dict__
+ capture_group_model2 = CaptureGroup(**capture_group_model_dict)
+
+ # Verify the model instances are equivalent
+ assert capture_group_model == capture_group_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ capture_group_model_json2 = capture_group_model.to_dict()
+ assert capture_group_model_json2 == capture_group_model_json
+
+
+class TestModel_ChannelTransferInfo:
+ """
+ Test Class for ChannelTransferInfo
+ """
+
+ def test_channel_transfer_info_serialization(self):
+ """
+ Test serialization/deserialization for ChannelTransferInfo
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ channel_transfer_target_chat_model = {} # ChannelTransferTargetChat
+ channel_transfer_target_chat_model['url'] = 'testString'
+
+ channel_transfer_target_model = {} # ChannelTransferTarget
+ channel_transfer_target_model['chat'] = channel_transfer_target_chat_model
+
+ # Construct a json representation of a ChannelTransferInfo model
+ channel_transfer_info_model_json = {}
+ channel_transfer_info_model_json['target'] = channel_transfer_target_model
+
+ # Construct a model instance of ChannelTransferInfo by calling from_dict on the json representation
+ channel_transfer_info_model = ChannelTransferInfo.from_dict(channel_transfer_info_model_json)
+ assert channel_transfer_info_model != False
+
+ # Construct a model instance of ChannelTransferInfo by calling from_dict on the json representation
+ channel_transfer_info_model_dict = ChannelTransferInfo.from_dict(channel_transfer_info_model_json).__dict__
+ channel_transfer_info_model2 = ChannelTransferInfo(**channel_transfer_info_model_dict)
+
+ # Verify the model instances are equivalent
+ assert channel_transfer_info_model == channel_transfer_info_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ channel_transfer_info_model_json2 = channel_transfer_info_model.to_dict()
+ assert channel_transfer_info_model_json2 == channel_transfer_info_model_json
+
+
+class TestModel_ChannelTransferTarget:
+ """
+ Test Class for ChannelTransferTarget
+ """
+
+ def test_channel_transfer_target_serialization(self):
+ """
+ Test serialization/deserialization for ChannelTransferTarget
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ channel_transfer_target_chat_model = {} # ChannelTransferTargetChat
+ channel_transfer_target_chat_model['url'] = 'testString'
+
+ # Construct a json representation of a ChannelTransferTarget model
+ channel_transfer_target_model_json = {}
+ channel_transfer_target_model_json['chat'] = channel_transfer_target_chat_model
+
+ # Construct a model instance of ChannelTransferTarget by calling from_dict on the json representation
+ channel_transfer_target_model = ChannelTransferTarget.from_dict(channel_transfer_target_model_json)
+ assert channel_transfer_target_model != False
+
+ # Construct a model instance of ChannelTransferTarget by calling from_dict on the json representation
+ channel_transfer_target_model_dict = ChannelTransferTarget.from_dict(channel_transfer_target_model_json).__dict__
+ channel_transfer_target_model2 = ChannelTransferTarget(**channel_transfer_target_model_dict)
+
+ # Verify the model instances are equivalent
+ assert channel_transfer_target_model == channel_transfer_target_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ channel_transfer_target_model_json2 = channel_transfer_target_model.to_dict()
+ assert channel_transfer_target_model_json2 == channel_transfer_target_model_json
+
+
+class TestModel_ChannelTransferTargetChat:
+ """
+ Test Class for ChannelTransferTargetChat
+ """
+
+ def test_channel_transfer_target_chat_serialization(self):
+ """
+ Test serialization/deserialization for ChannelTransferTargetChat
+ """
+
+ # Construct a json representation of a ChannelTransferTargetChat model
+ channel_transfer_target_chat_model_json = {}
+ channel_transfer_target_chat_model_json['url'] = 'testString'
+
+ # Construct a model instance of ChannelTransferTargetChat by calling from_dict on the json representation
+ channel_transfer_target_chat_model = ChannelTransferTargetChat.from_dict(channel_transfer_target_chat_model_json)
+ assert channel_transfer_target_chat_model != False
+
+ # Construct a model instance of ChannelTransferTargetChat by calling from_dict on the json representation
+ channel_transfer_target_chat_model_dict = ChannelTransferTargetChat.from_dict(channel_transfer_target_chat_model_json).__dict__
+ channel_transfer_target_chat_model2 = ChannelTransferTargetChat(**channel_transfer_target_chat_model_dict)
+
+ # Verify the model instances are equivalent
+ assert channel_transfer_target_chat_model == channel_transfer_target_chat_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ channel_transfer_target_chat_model_json2 = channel_transfer_target_chat_model.to_dict()
+ assert channel_transfer_target_chat_model_json2 == channel_transfer_target_chat_model_json
+
+
+class TestModel_Context:
+ """
+ Test Class for Context
+ """
+
+ def test_context_serialization(self):
+ """
+ Test serialization/deserialization for Context
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_metadata_model = {} # MessageContextMetadata
+ message_context_metadata_model['deployment'] = 'testString'
+ message_context_metadata_model['user_id'] = 'testString'
+
+ # Construct a json representation of a Context model
+ context_model_json = {}
+ context_model_json['conversation_id'] = 'testString'
+ context_model_json['system'] = {'anyKey': 'anyValue'}
+ context_model_json['metadata'] = message_context_metadata_model
+ context_model_json['foo'] = 'testString'
+
+ # Construct a model instance of Context by calling from_dict on the json representation
+ context_model = Context.from_dict(context_model_json)
+ assert context_model != False
+
+ # Construct a model instance of Context by calling from_dict on the json representation
+ context_model_dict = Context.from_dict(context_model_json).__dict__
+ context_model2 = Context(**context_model_dict)
+
+ # Verify the model instances are equivalent
+ assert context_model == context_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ context_model_json2 = context_model.to_dict()
+ assert context_model_json2 == context_model_json
+
+ # Test get_properties and set_properties methods.
+ context_model.set_properties({})
+ actual_dict = context_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': 'testString'}
+ context_model.set_properties(expected_dict)
+ actual_dict = context_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_Counterexample:
+ """
+ Test Class for Counterexample
+ """
+
+ def test_counterexample_serialization(self):
+ """
+ Test serialization/deserialization for Counterexample
+ """
+
+ # Construct a json representation of a Counterexample model
+ counterexample_model_json = {}
+ counterexample_model_json['text'] = 'testString'
+
+ # Construct a model instance of Counterexample by calling from_dict on the json representation
+ counterexample_model = Counterexample.from_dict(counterexample_model_json)
+ assert counterexample_model != False
+
+ # Construct a model instance of Counterexample by calling from_dict on the json representation
+ counterexample_model_dict = Counterexample.from_dict(counterexample_model_json).__dict__
+ counterexample_model2 = Counterexample(**counterexample_model_dict)
+
+ # Verify the model instances are equivalent
+ assert counterexample_model == counterexample_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ counterexample_model_json2 = counterexample_model.to_dict()
+ assert counterexample_model_json2 == counterexample_model_json
+
+
+class TestModel_CounterexampleCollection:
+ """
+ Test Class for CounterexampleCollection
+ """
+
+ def test_counterexample_collection_serialization(self):
+ """
+ Test serialization/deserialization for CounterexampleCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ counterexample_model = {} # Counterexample
+ counterexample_model['text'] = 'testString'
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a CounterexampleCollection model
+ counterexample_collection_model_json = {}
+ counterexample_collection_model_json['counterexamples'] = [counterexample_model]
+ counterexample_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of CounterexampleCollection by calling from_dict on the json representation
+ counterexample_collection_model = CounterexampleCollection.from_dict(counterexample_collection_model_json)
+ assert counterexample_collection_model != False
+
+ # Construct a model instance of CounterexampleCollection by calling from_dict on the json representation
+ counterexample_collection_model_dict = CounterexampleCollection.from_dict(counterexample_collection_model_json).__dict__
+ counterexample_collection_model2 = CounterexampleCollection(**counterexample_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert counterexample_collection_model == counterexample_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ counterexample_collection_model_json2 = counterexample_collection_model.to_dict()
+ assert counterexample_collection_model_json2 == counterexample_collection_model_json
+
+
+class TestModel_CreateEntity:
+ """
+ Test Class for CreateEntity
+ """
+
+ def test_create_entity_serialization(self):
+ """
+ Test serialization/deserialization for CreateEntity
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ create_value_model = {} # CreateValue
+ create_value_model['value'] = 'testString'
+ create_value_model['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model['type'] = 'synonyms'
+ create_value_model['synonyms'] = ['testString']
+ create_value_model['patterns'] = ['testString']
+
+ # Construct a json representation of a CreateEntity model
+ create_entity_model_json = {}
+ create_entity_model_json['entity'] = 'testString'
+ create_entity_model_json['description'] = 'testString'
+ create_entity_model_json['metadata'] = {'anyKey': 'anyValue'}
+ create_entity_model_json['fuzzy_match'] = True
+ create_entity_model_json['values'] = [create_value_model]
+
+ # Construct a model instance of CreateEntity by calling from_dict on the json representation
+ create_entity_model = CreateEntity.from_dict(create_entity_model_json)
+ assert create_entity_model != False
+
+ # Construct a model instance of CreateEntity by calling from_dict on the json representation
+ create_entity_model_dict = CreateEntity.from_dict(create_entity_model_json).__dict__
+ create_entity_model2 = CreateEntity(**create_entity_model_dict)
+
+ # Verify the model instances are equivalent
+ assert create_entity_model == create_entity_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ create_entity_model_json2 = create_entity_model.to_dict()
+ assert create_entity_model_json2 == create_entity_model_json
+
+
+class TestModel_CreateIntent:
+ """
+ Test Class for CreateIntent
+ """
+
+ def test_create_intent_serialization(self):
+ """
+ Test serialization/deserialization for CreateIntent
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ mention_model = {} # Mention
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ example_model = {} # Example
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Construct a json representation of a CreateIntent model
+ create_intent_model_json = {}
+ create_intent_model_json['intent'] = 'testString'
+ create_intent_model_json['description'] = 'testString'
+ create_intent_model_json['examples'] = [example_model]
+
+ # Construct a model instance of CreateIntent by calling from_dict on the json representation
+ create_intent_model = CreateIntent.from_dict(create_intent_model_json)
+ assert create_intent_model != False
+
+ # Construct a model instance of CreateIntent by calling from_dict on the json representation
+ create_intent_model_dict = CreateIntent.from_dict(create_intent_model_json).__dict__
+ create_intent_model2 = CreateIntent(**create_intent_model_dict)
+
+ # Verify the model instances are equivalent
+ assert create_intent_model == create_intent_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ create_intent_model_json2 = create_intent_model.to_dict()
+ assert create_intent_model_json2 == create_intent_model_json
+
+
+class TestModel_CreateValue:
+ """
+ Test Class for CreateValue
+ """
+
+ def test_create_value_serialization(self):
+ """
+ Test serialization/deserialization for CreateValue
+ """
+
+ # Construct a json representation of a CreateValue model
+ create_value_model_json = {}
+ create_value_model_json['value'] = 'testString'
+ create_value_model_json['metadata'] = {'anyKey': 'anyValue'}
+ create_value_model_json['type'] = 'synonyms'
+ create_value_model_json['synonyms'] = ['testString']
+ create_value_model_json['patterns'] = ['testString']
+
+ # Construct a model instance of CreateValue by calling from_dict on the json representation
+ create_value_model = CreateValue.from_dict(create_value_model_json)
+ assert create_value_model != False
+
+ # Construct a model instance of CreateValue by calling from_dict on the json representation
+ create_value_model_dict = CreateValue.from_dict(create_value_model_json).__dict__
+ create_value_model2 = CreateValue(**create_value_model_dict)
+
+ # Verify the model instances are equivalent
+ assert create_value_model == create_value_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ create_value_model_json2 = create_value_model.to_dict()
+ assert create_value_model_json2 == create_value_model_json
+
+
+class TestModel_DialogNode:
+ """
+ Test Class for DialogNode
+ """
+
+ def test_dialog_node_serialization(self):
+ """
+ Test serialization/deserialization for DialogNode
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ dialog_node_output_model = {} # DialogNodeOutput
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ dialog_node_context_model = {} # DialogNodeContext
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ dialog_node_next_step_model = {} # DialogNodeNextStep
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ # Construct a json representation of a DialogNode model
+ dialog_node_model_json = {}
+ dialog_node_model_json['dialog_node'] = 'testString'
+ dialog_node_model_json['description'] = 'testString'
+ dialog_node_model_json['conditions'] = 'testString'
+ dialog_node_model_json['parent'] = 'testString'
+ dialog_node_model_json['previous_sibling'] = 'testString'
+ dialog_node_model_json['output'] = dialog_node_output_model
+ dialog_node_model_json['context'] = dialog_node_context_model
+ dialog_node_model_json['metadata'] = {'anyKey': 'anyValue'}
+ dialog_node_model_json['next_step'] = dialog_node_next_step_model
+ dialog_node_model_json['title'] = 'testString'
+ dialog_node_model_json['type'] = 'standard'
+ dialog_node_model_json['event_name'] = 'focus'
+ dialog_node_model_json['variable'] = 'testString'
+ dialog_node_model_json['actions'] = [dialog_node_action_model]
+ dialog_node_model_json['digress_in'] = 'not_available'
+ dialog_node_model_json['digress_out'] = 'allow_returning'
+ dialog_node_model_json['digress_out_slots'] = 'not_allowed'
+ dialog_node_model_json['user_label'] = 'testString'
+ dialog_node_model_json['disambiguation_opt_out'] = False
+
+ # Construct a model instance of DialogNode by calling from_dict on the json representation
+ dialog_node_model = DialogNode.from_dict(dialog_node_model_json)
+ assert dialog_node_model != False
+
+ # Construct a model instance of DialogNode by calling from_dict on the json representation
+ dialog_node_model_dict = DialogNode.from_dict(dialog_node_model_json).__dict__
+ dialog_node_model2 = DialogNode(**dialog_node_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_model == dialog_node_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_model_json2 = dialog_node_model.to_dict()
+ assert dialog_node_model_json2 == dialog_node_model_json
+
+
+class TestModel_DialogNodeAction:
+ """
+ Test Class for DialogNodeAction
+ """
+
+ def test_dialog_node_action_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeAction
+ """
+
+ # Construct a json representation of a DialogNodeAction model
+ dialog_node_action_model_json = {}
+ dialog_node_action_model_json['name'] = 'testString'
+ dialog_node_action_model_json['type'] = 'client'
+ dialog_node_action_model_json['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model_json['result_variable'] = 'testString'
+ dialog_node_action_model_json['credentials'] = 'testString'
+
+ # Construct a model instance of DialogNodeAction by calling from_dict on the json representation
+ dialog_node_action_model = DialogNodeAction.from_dict(dialog_node_action_model_json)
+ assert dialog_node_action_model != False
+
+ # Construct a model instance of DialogNodeAction by calling from_dict on the json representation
+ dialog_node_action_model_dict = DialogNodeAction.from_dict(dialog_node_action_model_json).__dict__
+ dialog_node_action_model2 = DialogNodeAction(**dialog_node_action_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_action_model == dialog_node_action_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_action_model_json2 = dialog_node_action_model.to_dict()
+ assert dialog_node_action_model_json2 == dialog_node_action_model_json
+
+
+class TestModel_DialogNodeCollection:
+ """
+ Test Class for DialogNodeCollection
+ """
+
+ def test_dialog_node_collection_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ dialog_node_output_model = {} # DialogNodeOutput
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ dialog_node_context_model = {} # DialogNodeContext
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ dialog_node_next_step_model = {} # DialogNodeNextStep
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_model = {} # DialogNode
+ dialog_node_model['dialog_node'] = 'testString'
+ dialog_node_model['description'] = 'testString'
+ dialog_node_model['conditions'] = 'testString'
+ dialog_node_model['parent'] = 'testString'
+ dialog_node_model['previous_sibling'] = 'testString'
+ dialog_node_model['output'] = dialog_node_output_model
+ dialog_node_model['context'] = dialog_node_context_model
+ dialog_node_model['metadata'] = {'anyKey': 'anyValue'}
+ dialog_node_model['next_step'] = dialog_node_next_step_model
+ dialog_node_model['title'] = 'testString'
+ dialog_node_model['type'] = 'standard'
+ dialog_node_model['event_name'] = 'focus'
+ dialog_node_model['variable'] = 'testString'
+ dialog_node_model['actions'] = [dialog_node_action_model]
+ dialog_node_model['digress_in'] = 'not_available'
+ dialog_node_model['digress_out'] = 'allow_returning'
+ dialog_node_model['digress_out_slots'] = 'not_allowed'
+ dialog_node_model['user_label'] = 'testString'
+ dialog_node_model['disambiguation_opt_out'] = False
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a DialogNodeCollection model
+ dialog_node_collection_model_json = {}
+ dialog_node_collection_model_json['dialog_nodes'] = [dialog_node_model]
+ dialog_node_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of DialogNodeCollection by calling from_dict on the json representation
+ dialog_node_collection_model = DialogNodeCollection.from_dict(dialog_node_collection_model_json)
+ assert dialog_node_collection_model != False
+
+ # Construct a model instance of DialogNodeCollection by calling from_dict on the json representation
+ dialog_node_collection_model_dict = DialogNodeCollection.from_dict(dialog_node_collection_model_json).__dict__
+ dialog_node_collection_model2 = DialogNodeCollection(**dialog_node_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_collection_model == dialog_node_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_collection_model_json2 = dialog_node_collection_model.to_dict()
+ assert dialog_node_collection_model_json2 == dialog_node_collection_model_json
+
+
+class TestModel_DialogNodeContext:
+ """
+ Test Class for DialogNodeContext
+ """
+
+ def test_dialog_node_context_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeContext
+ """
+
+ # Construct a json representation of a DialogNodeContext model
+ dialog_node_context_model_json = {}
+ dialog_node_context_model_json['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model_json['foo'] = 'testString'
+
+ # Construct a model instance of DialogNodeContext by calling from_dict on the json representation
+ dialog_node_context_model = DialogNodeContext.from_dict(dialog_node_context_model_json)
+ assert dialog_node_context_model != False
+
+ # Construct a model instance of DialogNodeContext by calling from_dict on the json representation
+ dialog_node_context_model_dict = DialogNodeContext.from_dict(dialog_node_context_model_json).__dict__
+ dialog_node_context_model2 = DialogNodeContext(**dialog_node_context_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_context_model == dialog_node_context_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_context_model_json2 = dialog_node_context_model.to_dict()
+ assert dialog_node_context_model_json2 == dialog_node_context_model_json
+
+ # Test get_properties and set_properties methods.
+ dialog_node_context_model.set_properties({})
+ actual_dict = dialog_node_context_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': 'testString'}
+ dialog_node_context_model.set_properties(expected_dict)
+ actual_dict = dialog_node_context_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_DialogNodeNextStep:
+ """
+ Test Class for DialogNodeNextStep
+ """
+
+ def test_dialog_node_next_step_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeNextStep
+ """
+
+ # Construct a json representation of a DialogNodeNextStep model
+ dialog_node_next_step_model_json = {}
+ dialog_node_next_step_model_json['behavior'] = 'get_user_input'
+ dialog_node_next_step_model_json['dialog_node'] = 'testString'
+ dialog_node_next_step_model_json['selector'] = 'condition'
+
+ # Construct a model instance of DialogNodeNextStep by calling from_dict on the json representation
+ dialog_node_next_step_model = DialogNodeNextStep.from_dict(dialog_node_next_step_model_json)
+ assert dialog_node_next_step_model != False
+
+ # Construct a model instance of DialogNodeNextStep by calling from_dict on the json representation
+ dialog_node_next_step_model_dict = DialogNodeNextStep.from_dict(dialog_node_next_step_model_json).__dict__
+ dialog_node_next_step_model2 = DialogNodeNextStep(**dialog_node_next_step_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_next_step_model == dialog_node_next_step_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_next_step_model_json2 = dialog_node_next_step_model.to_dict()
+ assert dialog_node_next_step_model_json2 == dialog_node_next_step_model_json
+
+
+class TestModel_DialogNodeOutput:
+ """
+ Test Class for DialogNodeOutput
+ """
+
+ def test_dialog_node_output_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ # Construct a json representation of a DialogNodeOutput model
+ dialog_node_output_model_json = {}
+ dialog_node_output_model_json['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model_json['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model_json['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model_json['foo'] = 'testString'
+
+ # Construct a model instance of DialogNodeOutput by calling from_dict on the json representation
+ dialog_node_output_model = DialogNodeOutput.from_dict(dialog_node_output_model_json)
+ assert dialog_node_output_model != False
+
+ # Construct a model instance of DialogNodeOutput by calling from_dict on the json representation
+ dialog_node_output_model_dict = DialogNodeOutput.from_dict(dialog_node_output_model_json).__dict__
+ dialog_node_output_model2 = DialogNodeOutput(**dialog_node_output_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_model == dialog_node_output_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_model_json2 = dialog_node_output_model.to_dict()
+ assert dialog_node_output_model_json2 == dialog_node_output_model_json
+
+ # Test get_properties and set_properties methods.
+ dialog_node_output_model.set_properties({})
+ actual_dict = dialog_node_output_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': 'testString'}
+ dialog_node_output_model.set_properties(expected_dict)
+ actual_dict = dialog_node_output_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_DialogNodeOutputConnectToAgentTransferInfo:
+ """
+ Test Class for DialogNodeOutputConnectToAgentTransferInfo
+ """
+
+ def test_dialog_node_output_connect_to_agent_transfer_info_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputConnectToAgentTransferInfo
+ """
+
+ # Construct a json representation of a DialogNodeOutputConnectToAgentTransferInfo model
+ dialog_node_output_connect_to_agent_transfer_info_model_json = {}
+ dialog_node_output_connect_to_agent_transfer_info_model_json['target'] = {'key1': {'anyKey': 'anyValue'}}
+
+ # Construct a model instance of DialogNodeOutputConnectToAgentTransferInfo by calling from_dict on the json representation
+ dialog_node_output_connect_to_agent_transfer_info_model = DialogNodeOutputConnectToAgentTransferInfo.from_dict(dialog_node_output_connect_to_agent_transfer_info_model_json)
+ assert dialog_node_output_connect_to_agent_transfer_info_model != False
+
+ # Construct a model instance of DialogNodeOutputConnectToAgentTransferInfo by calling from_dict on the json representation
+ dialog_node_output_connect_to_agent_transfer_info_model_dict = DialogNodeOutputConnectToAgentTransferInfo.from_dict(dialog_node_output_connect_to_agent_transfer_info_model_json).__dict__
+ dialog_node_output_connect_to_agent_transfer_info_model2 = DialogNodeOutputConnectToAgentTransferInfo(**dialog_node_output_connect_to_agent_transfer_info_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_connect_to_agent_transfer_info_model == dialog_node_output_connect_to_agent_transfer_info_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_connect_to_agent_transfer_info_model_json2 = dialog_node_output_connect_to_agent_transfer_info_model.to_dict()
+ assert dialog_node_output_connect_to_agent_transfer_info_model_json2 == dialog_node_output_connect_to_agent_transfer_info_model_json
+
+
+class TestModel_DialogNodeOutputModifiers:
+ """
+ Test Class for DialogNodeOutputModifiers
+ """
+
+ def test_dialog_node_output_modifiers_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputModifiers
+ """
+
+ # Construct a json representation of a DialogNodeOutputModifiers model
+ dialog_node_output_modifiers_model_json = {}
+ dialog_node_output_modifiers_model_json['overwrite'] = True
+
+ # Construct a model instance of DialogNodeOutputModifiers by calling from_dict on the json representation
+ dialog_node_output_modifiers_model = DialogNodeOutputModifiers.from_dict(dialog_node_output_modifiers_model_json)
+ assert dialog_node_output_modifiers_model != False
+
+ # Construct a model instance of DialogNodeOutputModifiers by calling from_dict on the json representation
+ dialog_node_output_modifiers_model_dict = DialogNodeOutputModifiers.from_dict(dialog_node_output_modifiers_model_json).__dict__
+ dialog_node_output_modifiers_model2 = DialogNodeOutputModifiers(**dialog_node_output_modifiers_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_modifiers_model == dialog_node_output_modifiers_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_modifiers_model_json2 = dialog_node_output_modifiers_model.to_dict()
+ assert dialog_node_output_modifiers_model_json2 == dialog_node_output_modifiers_model_json
+
+
+class TestModel_DialogNodeOutputOptionsElement:
+ """
+ Test Class for DialogNodeOutputOptionsElement
+ """
+
+ def test_dialog_node_output_options_element_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputOptionsElement
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue
+ dialog_node_output_options_element_value_model['input'] = message_input_model
+ dialog_node_output_options_element_value_model['intents'] = [runtime_intent_model]
+ dialog_node_output_options_element_value_model['entities'] = [runtime_entity_model]
+
+ # Construct a json representation of a DialogNodeOutputOptionsElement model
+ dialog_node_output_options_element_model_json = {}
+ dialog_node_output_options_element_model_json['label'] = 'testString'
+ dialog_node_output_options_element_model_json['value'] = dialog_node_output_options_element_value_model
+
+ # Construct a model instance of DialogNodeOutputOptionsElement by calling from_dict on the json representation
+ dialog_node_output_options_element_model = DialogNodeOutputOptionsElement.from_dict(dialog_node_output_options_element_model_json)
+ assert dialog_node_output_options_element_model != False
+
+ # Construct a model instance of DialogNodeOutputOptionsElement by calling from_dict on the json representation
+ dialog_node_output_options_element_model_dict = DialogNodeOutputOptionsElement.from_dict(dialog_node_output_options_element_model_json).__dict__
+ dialog_node_output_options_element_model2 = DialogNodeOutputOptionsElement(**dialog_node_output_options_element_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_options_element_model == dialog_node_output_options_element_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_options_element_model_json2 = dialog_node_output_options_element_model.to_dict()
+ assert dialog_node_output_options_element_model_json2 == dialog_node_output_options_element_model_json
+
+
+class TestModel_DialogNodeOutputOptionsElementValue:
+ """
+ Test Class for DialogNodeOutputOptionsElementValue
+ """
+
+ def test_dialog_node_output_options_element_value_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputOptionsElementValue
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ # Construct a json representation of a DialogNodeOutputOptionsElementValue model
+ dialog_node_output_options_element_value_model_json = {}
+ dialog_node_output_options_element_value_model_json['input'] = message_input_model
+ dialog_node_output_options_element_value_model_json['intents'] = [runtime_intent_model]
+ dialog_node_output_options_element_value_model_json['entities'] = [runtime_entity_model]
+
+ # Construct a model instance of DialogNodeOutputOptionsElementValue by calling from_dict on the json representation
+ dialog_node_output_options_element_value_model = DialogNodeOutputOptionsElementValue.from_dict(dialog_node_output_options_element_value_model_json)
+ assert dialog_node_output_options_element_value_model != False
+
+ # Construct a model instance of DialogNodeOutputOptionsElementValue by calling from_dict on the json representation
+ dialog_node_output_options_element_value_model_dict = DialogNodeOutputOptionsElementValue.from_dict(dialog_node_output_options_element_value_model_json).__dict__
+ dialog_node_output_options_element_value_model2 = DialogNodeOutputOptionsElementValue(**dialog_node_output_options_element_value_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_options_element_value_model == dialog_node_output_options_element_value_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_options_element_value_model_json2 = dialog_node_output_options_element_value_model.to_dict()
+ assert dialog_node_output_options_element_value_model_json2 == dialog_node_output_options_element_value_model_json
+
+
+class TestModel_DialogNodeOutputTextValuesElement:
+ """
+ Test Class for DialogNodeOutputTextValuesElement
+ """
+
+ def test_dialog_node_output_text_values_element_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputTextValuesElement
+ """
+
+ # Construct a json representation of a DialogNodeOutputTextValuesElement model
+ dialog_node_output_text_values_element_model_json = {}
+ dialog_node_output_text_values_element_model_json['text'] = 'testString'
+
+ # Construct a model instance of DialogNodeOutputTextValuesElement by calling from_dict on the json representation
+ dialog_node_output_text_values_element_model = DialogNodeOutputTextValuesElement.from_dict(dialog_node_output_text_values_element_model_json)
+ assert dialog_node_output_text_values_element_model != False
+
+ # Construct a model instance of DialogNodeOutputTextValuesElement by calling from_dict on the json representation
+ dialog_node_output_text_values_element_model_dict = DialogNodeOutputTextValuesElement.from_dict(dialog_node_output_text_values_element_model_json).__dict__
+ dialog_node_output_text_values_element_model2 = DialogNodeOutputTextValuesElement(**dialog_node_output_text_values_element_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_text_values_element_model == dialog_node_output_text_values_element_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_text_values_element_model_json2 = dialog_node_output_text_values_element_model.to_dict()
+ assert dialog_node_output_text_values_element_model_json2 == dialog_node_output_text_values_element_model_json
+
+
+class TestModel_DialogNodeVisitedDetails:
+ """
+ Test Class for DialogNodeVisitedDetails
+ """
+
+ def test_dialog_node_visited_details_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeVisitedDetails
+ """
+
+ # Construct a json representation of a DialogNodeVisitedDetails model
+ dialog_node_visited_details_model_json = {}
+ dialog_node_visited_details_model_json['dialog_node'] = 'testString'
+ dialog_node_visited_details_model_json['title'] = 'testString'
+ dialog_node_visited_details_model_json['conditions'] = 'testString'
+
+ # Construct a model instance of DialogNodeVisitedDetails by calling from_dict on the json representation
+ dialog_node_visited_details_model = DialogNodeVisitedDetails.from_dict(dialog_node_visited_details_model_json)
+ assert dialog_node_visited_details_model != False
+
+ # Construct a model instance of DialogNodeVisitedDetails by calling from_dict on the json representation
+ dialog_node_visited_details_model_dict = DialogNodeVisitedDetails.from_dict(dialog_node_visited_details_model_json).__dict__
+ dialog_node_visited_details_model2 = DialogNodeVisitedDetails(**dialog_node_visited_details_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_visited_details_model == dialog_node_visited_details_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_visited_details_model_json2 = dialog_node_visited_details_model.to_dict()
+ assert dialog_node_visited_details_model_json2 == dialog_node_visited_details_model_json
+
+
+class TestModel_DialogSuggestion:
+ """
+ Test Class for DialogSuggestion
+ """
+
+ def test_dialog_suggestion_serialization(self):
+ """
+ Test serialization/deserialization for DialogSuggestion
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ dialog_suggestion_value_model = {} # DialogSuggestionValue
+ dialog_suggestion_value_model['input'] = message_input_model
+ dialog_suggestion_value_model['intents'] = [runtime_intent_model]
+ dialog_suggestion_value_model['entities'] = [runtime_entity_model]
+
+ # Construct a json representation of a DialogSuggestion model
+ dialog_suggestion_model_json = {}
+ dialog_suggestion_model_json['label'] = 'testString'
+ dialog_suggestion_model_json['value'] = dialog_suggestion_value_model
+ dialog_suggestion_model_json['output'] = {'anyKey': 'anyValue'}
+ dialog_suggestion_model_json['dialog_node'] = 'testString'
+
+ # Construct a model instance of DialogSuggestion by calling from_dict on the json representation
+ dialog_suggestion_model = DialogSuggestion.from_dict(dialog_suggestion_model_json)
+ assert dialog_suggestion_model != False
+
+ # Construct a model instance of DialogSuggestion by calling from_dict on the json representation
+ dialog_suggestion_model_dict = DialogSuggestion.from_dict(dialog_suggestion_model_json).__dict__
+ dialog_suggestion_model2 = DialogSuggestion(**dialog_suggestion_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_suggestion_model == dialog_suggestion_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_suggestion_model_json2 = dialog_suggestion_model.to_dict()
+ assert dialog_suggestion_model_json2 == dialog_suggestion_model_json
+
+
+class TestModel_DialogSuggestionValue:
+ """
+ Test Class for DialogSuggestionValue
+ """
+
+ def test_dialog_suggestion_value_serialization(self):
+ """
+ Test serialization/deserialization for DialogSuggestionValue
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ # Construct a json representation of a DialogSuggestionValue model
+ dialog_suggestion_value_model_json = {}
+ dialog_suggestion_value_model_json['input'] = message_input_model
+ dialog_suggestion_value_model_json['intents'] = [runtime_intent_model]
+ dialog_suggestion_value_model_json['entities'] = [runtime_entity_model]
+
+ # Construct a model instance of DialogSuggestionValue by calling from_dict on the json representation
+ dialog_suggestion_value_model = DialogSuggestionValue.from_dict(dialog_suggestion_value_model_json)
+ assert dialog_suggestion_value_model != False
+
+ # Construct a model instance of DialogSuggestionValue by calling from_dict on the json representation
+ dialog_suggestion_value_model_dict = DialogSuggestionValue.from_dict(dialog_suggestion_value_model_json).__dict__
+ dialog_suggestion_value_model2 = DialogSuggestionValue(**dialog_suggestion_value_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_suggestion_value_model == dialog_suggestion_value_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_suggestion_value_model_json2 = dialog_suggestion_value_model.to_dict()
+ assert dialog_suggestion_value_model_json2 == dialog_suggestion_value_model_json
+
+
+class TestModel_Entity:
+ """
+ Test Class for Entity
+ """
+
+ def test_entity_serialization(self):
+ """
+ Test serialization/deserialization for Entity
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ value_model = {} # Value
+ value_model['value'] = 'testString'
+ value_model['metadata'] = {'anyKey': 'anyValue'}
+ value_model['type'] = 'synonyms'
+ value_model['synonyms'] = ['testString']
+ value_model['patterns'] = ['testString']
+
+ # Construct a json representation of a Entity model
+ entity_model_json = {}
+ entity_model_json['entity'] = 'testString'
+ entity_model_json['description'] = 'testString'
+ entity_model_json['metadata'] = {'anyKey': 'anyValue'}
+ entity_model_json['fuzzy_match'] = True
+ entity_model_json['values'] = [value_model]
+
+ # Construct a model instance of Entity by calling from_dict on the json representation
+ entity_model = Entity.from_dict(entity_model_json)
+ assert entity_model != False
+
+ # Construct a model instance of Entity by calling from_dict on the json representation
+ entity_model_dict = Entity.from_dict(entity_model_json).__dict__
+ entity_model2 = Entity(**entity_model_dict)
+
+ # Verify the model instances are equivalent
+ assert entity_model == entity_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ entity_model_json2 = entity_model.to_dict()
+ assert entity_model_json2 == entity_model_json
+
+
+class TestModel_EntityCollection:
+ """
+ Test Class for EntityCollection
+ """
+
+ def test_entity_collection_serialization(self):
+ """
+ Test serialization/deserialization for EntityCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ value_model = {} # Value
+ value_model['value'] = 'testString'
+ value_model['metadata'] = {'anyKey': 'anyValue'}
+ value_model['type'] = 'synonyms'
+ value_model['synonyms'] = ['testString']
+ value_model['patterns'] = ['testString']
+
+ entity_model = {} # Entity
+ entity_model['entity'] = 'testString'
+ entity_model['description'] = 'testString'
+ entity_model['metadata'] = {'anyKey': 'anyValue'}
+ entity_model['fuzzy_match'] = True
+ entity_model['values'] = [value_model]
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a EntityCollection model
+ entity_collection_model_json = {}
+ entity_collection_model_json['entities'] = [entity_model]
+ entity_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of EntityCollection by calling from_dict on the json representation
+ entity_collection_model = EntityCollection.from_dict(entity_collection_model_json)
+ assert entity_collection_model != False
+
+ # Construct a model instance of EntityCollection by calling from_dict on the json representation
+ entity_collection_model_dict = EntityCollection.from_dict(entity_collection_model_json).__dict__
+ entity_collection_model2 = EntityCollection(**entity_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert entity_collection_model == entity_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ entity_collection_model_json2 = entity_collection_model.to_dict()
+ assert entity_collection_model_json2 == entity_collection_model_json
+
+
+class TestModel_EntityMention:
+ """
+ Test Class for EntityMention
+ """
+
+ def test_entity_mention_serialization(self):
+ """
+ Test serialization/deserialization for EntityMention
+ """
+
+ # Construct a json representation of a EntityMention model
+ entity_mention_model_json = {}
+ entity_mention_model_json['text'] = 'testString'
+ entity_mention_model_json['intent'] = 'testString'
+ entity_mention_model_json['location'] = [38]
+
+ # Construct a model instance of EntityMention by calling from_dict on the json representation
+ entity_mention_model = EntityMention.from_dict(entity_mention_model_json)
+ assert entity_mention_model != False
+
+ # Construct a model instance of EntityMention by calling from_dict on the json representation
+ entity_mention_model_dict = EntityMention.from_dict(entity_mention_model_json).__dict__
+ entity_mention_model2 = EntityMention(**entity_mention_model_dict)
+
+ # Verify the model instances are equivalent
+ assert entity_mention_model == entity_mention_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ entity_mention_model_json2 = entity_mention_model.to_dict()
+ assert entity_mention_model_json2 == entity_mention_model_json
+
+
+class TestModel_EntityMentionCollection:
+ """
+ Test Class for EntityMentionCollection
+ """
+
+ def test_entity_mention_collection_serialization(self):
+ """
+ Test serialization/deserialization for EntityMentionCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ entity_mention_model = {} # EntityMention
+ entity_mention_model['text'] = 'testString'
+ entity_mention_model['intent'] = 'testString'
+ entity_mention_model['location'] = [38]
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a EntityMentionCollection model
+ entity_mention_collection_model_json = {}
+ entity_mention_collection_model_json['examples'] = [entity_mention_model]
+ entity_mention_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of EntityMentionCollection by calling from_dict on the json representation
+ entity_mention_collection_model = EntityMentionCollection.from_dict(entity_mention_collection_model_json)
+ assert entity_mention_collection_model != False
+
+ # Construct a model instance of EntityMentionCollection by calling from_dict on the json representation
+ entity_mention_collection_model_dict = EntityMentionCollection.from_dict(entity_mention_collection_model_json).__dict__
+ entity_mention_collection_model2 = EntityMentionCollection(**entity_mention_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert entity_mention_collection_model == entity_mention_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ entity_mention_collection_model_json2 = entity_mention_collection_model.to_dict()
+ assert entity_mention_collection_model_json2 == entity_mention_collection_model_json
+
+
+class TestModel_Example:
+ """
+ Test Class for Example
+ """
+
+ def test_example_serialization(self):
+ """
+ Test serialization/deserialization for Example
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ mention_model = {} # Mention
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ # Construct a json representation of a Example model
+ example_model_json = {}
+ example_model_json['text'] = 'testString'
+ example_model_json['mentions'] = [mention_model]
+
+ # Construct a model instance of Example by calling from_dict on the json representation
+ example_model = Example.from_dict(example_model_json)
+ assert example_model != False
+
+ # Construct a model instance of Example by calling from_dict on the json representation
+ example_model_dict = Example.from_dict(example_model_json).__dict__
+ example_model2 = Example(**example_model_dict)
+
+ # Verify the model instances are equivalent
+ assert example_model == example_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ example_model_json2 = example_model.to_dict()
+ assert example_model_json2 == example_model_json
+
+
+class TestModel_ExampleCollection:
+ """
+ Test Class for ExampleCollection
+ """
+
+ def test_example_collection_serialization(self):
+ """
+ Test serialization/deserialization for ExampleCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ mention_model = {} # Mention
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ example_model = {} # Example
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a ExampleCollection model
+ example_collection_model_json = {}
+ example_collection_model_json['examples'] = [example_model]
+ example_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of ExampleCollection by calling from_dict on the json representation
+ example_collection_model = ExampleCollection.from_dict(example_collection_model_json)
+ assert example_collection_model != False
+
+ # Construct a model instance of ExampleCollection by calling from_dict on the json representation
+ example_collection_model_dict = ExampleCollection.from_dict(example_collection_model_json).__dict__
+ example_collection_model2 = ExampleCollection(**example_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert example_collection_model == example_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ example_collection_model_json2 = example_collection_model.to_dict()
+ assert example_collection_model_json2 == example_collection_model_json
+
+
+class TestModel_Intent:
+ """
+ Test Class for Intent
+ """
+
+ def test_intent_serialization(self):
+ """
+ Test serialization/deserialization for Intent
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ mention_model = {} # Mention
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ example_model = {} # Example
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ # Construct a json representation of a Intent model
+ intent_model_json = {}
+ intent_model_json['intent'] = 'testString'
+ intent_model_json['description'] = 'testString'
+ intent_model_json['examples'] = [example_model]
+
+ # Construct a model instance of Intent by calling from_dict on the json representation
+ intent_model = Intent.from_dict(intent_model_json)
+ assert intent_model != False
+
+ # Construct a model instance of Intent by calling from_dict on the json representation
+ intent_model_dict = Intent.from_dict(intent_model_json).__dict__
+ intent_model2 = Intent(**intent_model_dict)
+
+ # Verify the model instances are equivalent
+ assert intent_model == intent_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ intent_model_json2 = intent_model.to_dict()
+ assert intent_model_json2 == intent_model_json
+
+
+class TestModel_IntentCollection:
+ """
+ Test Class for IntentCollection
+ """
+
+ def test_intent_collection_serialization(self):
+ """
+ Test serialization/deserialization for IntentCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ mention_model = {} # Mention
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ example_model = {} # Example
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ intent_model = {} # Intent
+ intent_model['intent'] = 'testString'
+ intent_model['description'] = 'testString'
+ intent_model['examples'] = [example_model]
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a IntentCollection model
+ intent_collection_model_json = {}
+ intent_collection_model_json['intents'] = [intent_model]
+ intent_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of IntentCollection by calling from_dict on the json representation
+ intent_collection_model = IntentCollection.from_dict(intent_collection_model_json)
+ assert intent_collection_model != False
+
+ # Construct a model instance of IntentCollection by calling from_dict on the json representation
+ intent_collection_model_dict = IntentCollection.from_dict(intent_collection_model_json).__dict__
+ intent_collection_model2 = IntentCollection(**intent_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert intent_collection_model == intent_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ intent_collection_model_json2 = intent_collection_model.to_dict()
+ assert intent_collection_model_json2 == intent_collection_model_json
+
+
+class TestModel_Log:
+ """
+ Test Class for Log
+ """
+
+ def test_log_serialization(self):
+ """
+ Test serialization/deserialization for Log
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ message_context_metadata_model = {} # MessageContextMetadata
+ message_context_metadata_model['deployment'] = 'testString'
+ message_context_metadata_model['user_id'] = 'testString'
+
+ context_model = {} # Context
+ context_model['conversation_id'] = 'testString'
+ context_model['system'] = {'anyKey': 'anyValue'}
+ context_model['metadata'] = message_context_metadata_model
+ context_model['foo'] = 'testString'
+
+ dialog_node_visited_details_model = {} # DialogNodeVisitedDetails
+ dialog_node_visited_details_model['dialog_node'] = 'testString'
+ dialog_node_visited_details_model['title'] = 'testString'
+ dialog_node_visited_details_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSource
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ log_message_model = {} # LogMessage
+ log_message_model['level'] = 'info'
+ log_message_model['msg'] = 'testString'
+ log_message_model['code'] = 'testString'
+ log_message_model['source'] = log_message_source_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText
+ runtime_response_generic_model['response_type'] = 'text'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['channels'] = [response_generic_channel_model]
+
+ output_data_model = {} # OutputData
+ output_data_model['nodes_visited'] = ['testString']
+ output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model]
+ output_data_model['log_messages'] = [log_message_model]
+ output_data_model['generic'] = [runtime_response_generic_model]
+ output_data_model['foo'] = 'testString'
+
+ message_request_model = {} # MessageRequest
+ message_request_model['input'] = message_input_model
+ message_request_model['intents'] = [runtime_intent_model]
+ message_request_model['entities'] = [runtime_entity_model]
+ message_request_model['alternate_intents'] = False
+ message_request_model['context'] = context_model
+ message_request_model['output'] = output_data_model
+ message_request_model['user_id'] = 'testString'
+
+ message_response_model = {} # MessageResponse
+ message_response_model['input'] = message_input_model
+ message_response_model['intents'] = [runtime_intent_model]
+ message_response_model['entities'] = [runtime_entity_model]
+ message_response_model['alternate_intents'] = False
+ message_response_model['context'] = context_model
+ message_response_model['output'] = output_data_model
+ message_response_model['user_id'] = 'testString'
+
+ # Construct a json representation of a Log model
+ log_model_json = {}
+ log_model_json['request'] = message_request_model
+ log_model_json['response'] = message_response_model
+ log_model_json['log_id'] = 'testString'
+ log_model_json['request_timestamp'] = 'testString'
+ log_model_json['response_timestamp'] = 'testString'
+ log_model_json['workspace_id'] = 'testString'
+ log_model_json['language'] = 'testString'
+
+ # Construct a model instance of Log by calling from_dict on the json representation
+ log_model = Log.from_dict(log_model_json)
+ assert log_model != False
+
+ # Construct a model instance of Log by calling from_dict on the json representation
+ log_model_dict = Log.from_dict(log_model_json).__dict__
+ log_model2 = Log(**log_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_model == log_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_model_json2 = log_model.to_dict()
+ assert log_model_json2 == log_model_json
+
+
+class TestModel_LogCollection:
+ """
+ Test Class for LogCollection
+ """
+
+ def test_log_collection_serialization(self):
+ """
+ Test serialization/deserialization for LogCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ message_context_metadata_model = {} # MessageContextMetadata
+ message_context_metadata_model['deployment'] = 'testString'
+ message_context_metadata_model['user_id'] = 'testString'
+
+ context_model = {} # Context
+ context_model['conversation_id'] = 'testString'
+ context_model['system'] = {'anyKey': 'anyValue'}
+ context_model['metadata'] = message_context_metadata_model
+ context_model['foo'] = 'testString'
+
+ dialog_node_visited_details_model = {} # DialogNodeVisitedDetails
+ dialog_node_visited_details_model['dialog_node'] = 'testString'
+ dialog_node_visited_details_model['title'] = 'testString'
+ dialog_node_visited_details_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSource
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ log_message_model = {} # LogMessage
+ log_message_model['level'] = 'info'
+ log_message_model['msg'] = 'testString'
+ log_message_model['code'] = 'testString'
+ log_message_model['source'] = log_message_source_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText
+ runtime_response_generic_model['response_type'] = 'text'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['channels'] = [response_generic_channel_model]
+
+ output_data_model = {} # OutputData
+ output_data_model['nodes_visited'] = ['testString']
+ output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model]
+ output_data_model['log_messages'] = [log_message_model]
+ output_data_model['generic'] = [runtime_response_generic_model]
+ output_data_model['foo'] = 'testString'
+
+ message_request_model = {} # MessageRequest
+ message_request_model['input'] = message_input_model
+ message_request_model['intents'] = [runtime_intent_model]
+ message_request_model['entities'] = [runtime_entity_model]
+ message_request_model['alternate_intents'] = False
+ message_request_model['context'] = context_model
+ message_request_model['output'] = output_data_model
+ message_request_model['user_id'] = 'testString'
+
+ message_response_model = {} # MessageResponse
+ message_response_model['input'] = message_input_model
+ message_response_model['intents'] = [runtime_intent_model]
+ message_response_model['entities'] = [runtime_entity_model]
+ message_response_model['alternate_intents'] = False
+ message_response_model['context'] = context_model
+ message_response_model['output'] = output_data_model
+ message_response_model['user_id'] = 'testString'
+
+ log_model = {} # Log
+ log_model['request'] = message_request_model
+ log_model['response'] = message_response_model
+ log_model['log_id'] = 'testString'
+ log_model['request_timestamp'] = 'testString'
+ log_model['response_timestamp'] = 'testString'
+ log_model['workspace_id'] = 'testString'
+ log_model['language'] = 'testString'
+
+ log_pagination_model = {} # LogPagination
+ log_pagination_model['next_url'] = 'testString'
+ log_pagination_model['matched'] = 38
+ log_pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a LogCollection model
+ log_collection_model_json = {}
+ log_collection_model_json['logs'] = [log_model]
+ log_collection_model_json['pagination'] = log_pagination_model
+
+ # Construct a model instance of LogCollection by calling from_dict on the json representation
+ log_collection_model = LogCollection.from_dict(log_collection_model_json)
+ assert log_collection_model != False
+
+ # Construct a model instance of LogCollection by calling from_dict on the json representation
+ log_collection_model_dict = LogCollection.from_dict(log_collection_model_json).__dict__
+ log_collection_model2 = LogCollection(**log_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_collection_model == log_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_collection_model_json2 = log_collection_model.to_dict()
+ assert log_collection_model_json2 == log_collection_model_json
+
+
+class TestModel_LogMessage:
+ """
+ Test Class for LogMessage
+ """
+
+ def test_log_message_serialization(self):
+ """
+ Test serialization/deserialization for LogMessage
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ log_message_source_model = {} # LogMessageSource
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ # Construct a json representation of a LogMessage model
+ log_message_model_json = {}
+ log_message_model_json['level'] = 'info'
+ log_message_model_json['msg'] = 'testString'
+ log_message_model_json['code'] = 'testString'
+ log_message_model_json['source'] = log_message_source_model
+
+ # Construct a model instance of LogMessage by calling from_dict on the json representation
+ log_message_model = LogMessage.from_dict(log_message_model_json)
+ assert log_message_model != False
+
+ # Construct a model instance of LogMessage by calling from_dict on the json representation
+ log_message_model_dict = LogMessage.from_dict(log_message_model_json).__dict__
+ log_message_model2 = LogMessage(**log_message_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_message_model == log_message_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_message_model_json2 = log_message_model.to_dict()
+ assert log_message_model_json2 == log_message_model_json
+
+
+class TestModel_LogMessageSource:
+ """
+ Test Class for LogMessageSource
+ """
+
+ def test_log_message_source_serialization(self):
+ """
+ Test serialization/deserialization for LogMessageSource
+ """
+
+ # Construct a json representation of a LogMessageSource model
+ log_message_source_model_json = {}
+ log_message_source_model_json['type'] = 'dialog_node'
+ log_message_source_model_json['dialog_node'] = 'testString'
+
+ # Construct a model instance of LogMessageSource by calling from_dict on the json representation
+ log_message_source_model = LogMessageSource.from_dict(log_message_source_model_json)
+ assert log_message_source_model != False
+
+ # Construct a model instance of LogMessageSource by calling from_dict on the json representation
+ log_message_source_model_dict = LogMessageSource.from_dict(log_message_source_model_json).__dict__
+ log_message_source_model2 = LogMessageSource(**log_message_source_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_message_source_model == log_message_source_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_message_source_model_json2 = log_message_source_model.to_dict()
+ assert log_message_source_model_json2 == log_message_source_model_json
+
+
+class TestModel_LogPagination:
+ """
+ Test Class for LogPagination
+ """
+
+ def test_log_pagination_serialization(self):
+ """
+ Test serialization/deserialization for LogPagination
+ """
+
+ # Construct a json representation of a LogPagination model
+ log_pagination_model_json = {}
+ log_pagination_model_json['next_url'] = 'testString'
+ log_pagination_model_json['matched'] = 38
+ log_pagination_model_json['next_cursor'] = 'testString'
+
+ # Construct a model instance of LogPagination by calling from_dict on the json representation
+ log_pagination_model = LogPagination.from_dict(log_pagination_model_json)
+ assert log_pagination_model != False
+
+ # Construct a model instance of LogPagination by calling from_dict on the json representation
+ log_pagination_model_dict = LogPagination.from_dict(log_pagination_model_json).__dict__
+ log_pagination_model2 = LogPagination(**log_pagination_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_pagination_model == log_pagination_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_pagination_model_json2 = log_pagination_model.to_dict()
+ assert log_pagination_model_json2 == log_pagination_model_json
+
+
+class TestModel_Mention:
+ """
+ Test Class for Mention
+ """
+
+ def test_mention_serialization(self):
+ """
+ Test serialization/deserialization for Mention
+ """
+
+ # Construct a json representation of a Mention model
+ mention_model_json = {}
+ mention_model_json['entity'] = 'testString'
+ mention_model_json['location'] = [38]
+
+ # Construct a model instance of Mention by calling from_dict on the json representation
+ mention_model = Mention.from_dict(mention_model_json)
+ assert mention_model != False
+
+ # Construct a model instance of Mention by calling from_dict on the json representation
+ mention_model_dict = Mention.from_dict(mention_model_json).__dict__
+ mention_model2 = Mention(**mention_model_dict)
+
+ # Verify the model instances are equivalent
+ assert mention_model == mention_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ mention_model_json2 = mention_model.to_dict()
+ assert mention_model_json2 == mention_model_json
+
+
+class TestModel_MessageContextMetadata:
+ """
+ Test Class for MessageContextMetadata
+ """
+
+ def test_message_context_metadata_serialization(self):
+ """
+ Test serialization/deserialization for MessageContextMetadata
+ """
+
+ # Construct a json representation of a MessageContextMetadata model
+ message_context_metadata_model_json = {}
+ message_context_metadata_model_json['deployment'] = 'testString'
+ message_context_metadata_model_json['user_id'] = 'testString'
+
+ # Construct a model instance of MessageContextMetadata by calling from_dict on the json representation
+ message_context_metadata_model = MessageContextMetadata.from_dict(message_context_metadata_model_json)
+ assert message_context_metadata_model != False
+
+ # Construct a model instance of MessageContextMetadata by calling from_dict on the json representation
+ message_context_metadata_model_dict = MessageContextMetadata.from_dict(message_context_metadata_model_json).__dict__
+ message_context_metadata_model2 = MessageContextMetadata(**message_context_metadata_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_context_metadata_model == message_context_metadata_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_context_metadata_model_json2 = message_context_metadata_model.to_dict()
+ assert message_context_metadata_model_json2 == message_context_metadata_model_json
+
+
+class TestModel_MessageInput:
+ """
+ Test Class for MessageInput
+ """
+
+ def test_message_input_serialization(self):
+ """
+ Test serialization/deserialization for MessageInput
+ """
+
+ # Construct a json representation of a MessageInput model
+ message_input_model_json = {}
+ message_input_model_json['text'] = 'testString'
+ message_input_model_json['spelling_suggestions'] = False
+ message_input_model_json['spelling_auto_correct'] = False
+ message_input_model_json['foo'] = 'testString'
+
+ # Construct a model instance of MessageInput by calling from_dict on the json representation
+ message_input_model = MessageInput.from_dict(message_input_model_json)
+ assert message_input_model != False
+
+ # Construct a model instance of MessageInput by calling from_dict on the json representation
+ message_input_model_dict = MessageInput.from_dict(message_input_model_json).__dict__
+ message_input_model2 = MessageInput(**message_input_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_input_model == message_input_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_input_model_json2 = message_input_model.to_dict()
+ assert message_input_model_json2 == message_input_model_json
+
+ # Test get_properties and set_properties methods.
+ message_input_model.set_properties({})
+ actual_dict = message_input_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': 'testString'}
+ message_input_model.set_properties(expected_dict)
+ actual_dict = message_input_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_MessageRequest:
+ """
+ Test Class for MessageRequest
+ """
+
+ def test_message_request_serialization(self):
+ """
+ Test serialization/deserialization for MessageRequest
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ message_context_metadata_model = {} # MessageContextMetadata
+ message_context_metadata_model['deployment'] = 'testString'
+ message_context_metadata_model['user_id'] = 'testString'
+
+ context_model = {} # Context
+ context_model['conversation_id'] = 'testString'
+ context_model['system'] = {'anyKey': 'anyValue'}
+ context_model['metadata'] = message_context_metadata_model
+ context_model['foo'] = 'testString'
+
+ dialog_node_visited_details_model = {} # DialogNodeVisitedDetails
+ dialog_node_visited_details_model['dialog_node'] = 'testString'
+ dialog_node_visited_details_model['title'] = 'testString'
+ dialog_node_visited_details_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSource
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ log_message_model = {} # LogMessage
+ log_message_model['level'] = 'info'
+ log_message_model['msg'] = 'testString'
+ log_message_model['code'] = 'testString'
+ log_message_model['source'] = log_message_source_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText
+ runtime_response_generic_model['response_type'] = 'text'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['channels'] = [response_generic_channel_model]
+
+ output_data_model = {} # OutputData
+ output_data_model['nodes_visited'] = ['testString']
+ output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model]
+ output_data_model['log_messages'] = [log_message_model]
+ output_data_model['generic'] = [runtime_response_generic_model]
+ output_data_model['foo'] = 'testString'
+
+ # Construct a json representation of a MessageRequest model
+ message_request_model_json = {}
+ message_request_model_json['input'] = message_input_model
+ message_request_model_json['intents'] = [runtime_intent_model]
+ message_request_model_json['entities'] = [runtime_entity_model]
+ message_request_model_json['alternate_intents'] = False
+ message_request_model_json['context'] = context_model
+ message_request_model_json['output'] = output_data_model
+ message_request_model_json['user_id'] = 'testString'
+
+ # Construct a model instance of MessageRequest by calling from_dict on the json representation
+ message_request_model = MessageRequest.from_dict(message_request_model_json)
+ assert message_request_model != False
+
+ # Construct a model instance of MessageRequest by calling from_dict on the json representation
+ message_request_model_dict = MessageRequest.from_dict(message_request_model_json).__dict__
+ message_request_model2 = MessageRequest(**message_request_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_request_model == message_request_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_request_model_json2 = message_request_model.to_dict()
+ assert message_request_model_json2 == message_request_model_json
+
+
+class TestModel_MessageResponse:
+ """
+ Test Class for MessageResponse
+ """
+
+ def test_message_response_serialization(self):
+ """
+ Test serialization/deserialization for MessageResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ message_context_metadata_model = {} # MessageContextMetadata
+ message_context_metadata_model['deployment'] = 'testString'
+ message_context_metadata_model['user_id'] = 'testString'
+
+ context_model = {} # Context
+ context_model['conversation_id'] = 'testString'
+ context_model['system'] = {'anyKey': 'anyValue'}
+ context_model['metadata'] = message_context_metadata_model
+ context_model['foo'] = 'testString'
+
+ dialog_node_visited_details_model = {} # DialogNodeVisitedDetails
+ dialog_node_visited_details_model['dialog_node'] = 'testString'
+ dialog_node_visited_details_model['title'] = 'testString'
+ dialog_node_visited_details_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSource
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ log_message_model = {} # LogMessage
+ log_message_model['level'] = 'info'
+ log_message_model['msg'] = 'testString'
+ log_message_model['code'] = 'testString'
+ log_message_model['source'] = log_message_source_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText
+ runtime_response_generic_model['response_type'] = 'text'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['channels'] = [response_generic_channel_model]
+
+ output_data_model = {} # OutputData
+ output_data_model['nodes_visited'] = ['testString']
+ output_data_model['nodes_visited_details'] = [dialog_node_visited_details_model]
+ output_data_model['log_messages'] = [log_message_model]
+ output_data_model['generic'] = [runtime_response_generic_model]
+ output_data_model['foo'] = 'testString'
+
+ # Construct a json representation of a MessageResponse model
+ message_response_model_json = {}
+ message_response_model_json['input'] = message_input_model
+ message_response_model_json['intents'] = [runtime_intent_model]
+ message_response_model_json['entities'] = [runtime_entity_model]
+ message_response_model_json['alternate_intents'] = False
+ message_response_model_json['context'] = context_model
+ message_response_model_json['output'] = output_data_model
+ message_response_model_json['user_id'] = 'testString'
+
+ # Construct a model instance of MessageResponse by calling from_dict on the json representation
+ message_response_model = MessageResponse.from_dict(message_response_model_json)
+ assert message_response_model != False
+
+ # Construct a model instance of MessageResponse by calling from_dict on the json representation
+ message_response_model_dict = MessageResponse.from_dict(message_response_model_json).__dict__
+ message_response_model2 = MessageResponse(**message_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_response_model == message_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_response_model_json2 = message_response_model.to_dict()
+ assert message_response_model_json2 == message_response_model_json
+
+
+class TestModel_OutputData:
+ """
+ Test Class for OutputData
+ """
+
+ def test_output_data_serialization(self):
+ """
+ Test serialization/deserialization for OutputData
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ dialog_node_visited_details_model = {} # DialogNodeVisitedDetails
+ dialog_node_visited_details_model['dialog_node'] = 'testString'
+ dialog_node_visited_details_model['title'] = 'testString'
+ dialog_node_visited_details_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSource
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ log_message_model = {} # LogMessage
+ log_message_model['level'] = 'info'
+ log_message_model['msg'] = 'testString'
+ log_message_model['code'] = 'testString'
+ log_message_model['source'] = log_message_source_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeText
+ runtime_response_generic_model['response_type'] = 'text'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['channels'] = [response_generic_channel_model]
+
+ # Construct a json representation of a OutputData model
+ output_data_model_json = {}
+ output_data_model_json['nodes_visited'] = ['testString']
+ output_data_model_json['nodes_visited_details'] = [dialog_node_visited_details_model]
+ output_data_model_json['log_messages'] = [log_message_model]
+ output_data_model_json['generic'] = [runtime_response_generic_model]
+ output_data_model_json['foo'] = 'testString'
+
+ # Construct a model instance of OutputData by calling from_dict on the json representation
+ output_data_model = OutputData.from_dict(output_data_model_json)
+ assert output_data_model != False
+
+ # Construct a model instance of OutputData by calling from_dict on the json representation
+ output_data_model_dict = OutputData.from_dict(output_data_model_json).__dict__
+ output_data_model2 = OutputData(**output_data_model_dict)
+
+ # Verify the model instances are equivalent
+ assert output_data_model == output_data_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ output_data_model_json2 = output_data_model.to_dict()
+ assert output_data_model_json2 == output_data_model_json
+
+ # Test get_properties and set_properties methods.
+ output_data_model.set_properties({})
+ actual_dict = output_data_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': 'testString'}
+ output_data_model.set_properties(expected_dict)
+ actual_dict = output_data_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_Pagination:
+ """
+ Test Class for Pagination
+ """
+
+ def test_pagination_serialization(self):
+ """
+ Test serialization/deserialization for Pagination
+ """
+
+ # Construct a json representation of a Pagination model
+ pagination_model_json = {}
+ pagination_model_json['refresh_url'] = 'testString'
+ pagination_model_json['next_url'] = 'testString'
+ pagination_model_json['total'] = 38
+ pagination_model_json['matched'] = 38
+ pagination_model_json['refresh_cursor'] = 'testString'
+ pagination_model_json['next_cursor'] = 'testString'
+
+ # Construct a model instance of Pagination by calling from_dict on the json representation
+ pagination_model = Pagination.from_dict(pagination_model_json)
+ assert pagination_model != False
+
+ # Construct a model instance of Pagination by calling from_dict on the json representation
+ pagination_model_dict = Pagination.from_dict(pagination_model_json).__dict__
+ pagination_model2 = Pagination(**pagination_model_dict)
+
+ # Verify the model instances are equivalent
+ assert pagination_model == pagination_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ pagination_model_json2 = pagination_model.to_dict()
+ assert pagination_model_json2 == pagination_model_json
+
+
+class TestModel_ResponseGenericChannel:
+ """
+ Test Class for ResponseGenericChannel
+ """
+
+ def test_response_generic_channel_serialization(self):
+ """
+ Test serialization/deserialization for ResponseGenericChannel
+ """
+
+ # Construct a json representation of a ResponseGenericChannel model
+ response_generic_channel_model_json = {}
+ response_generic_channel_model_json['channel'] = 'chat'
+
+ # Construct a model instance of ResponseGenericChannel by calling from_dict on the json representation
+ response_generic_channel_model = ResponseGenericChannel.from_dict(response_generic_channel_model_json)
+ assert response_generic_channel_model != False
+
+ # Construct a model instance of ResponseGenericChannel by calling from_dict on the json representation
+ response_generic_channel_model_dict = ResponseGenericChannel.from_dict(response_generic_channel_model_json).__dict__
+ response_generic_channel_model2 = ResponseGenericChannel(**response_generic_channel_model_dict)
+
+ # Verify the model instances are equivalent
+ assert response_generic_channel_model == response_generic_channel_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ response_generic_channel_model_json2 = response_generic_channel_model.to_dict()
+ assert response_generic_channel_model_json2 == response_generic_channel_model_json
+
+
+class TestModel_RuntimeEntity:
+ """
+ Test Class for RuntimeEntity
+ """
+
+ def test_runtime_entity_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeEntity
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ # Construct a json representation of a RuntimeEntity model
+ runtime_entity_model_json = {}
+ runtime_entity_model_json['entity'] = 'testString'
+ runtime_entity_model_json['location'] = [38]
+ runtime_entity_model_json['value'] = 'testString'
+ runtime_entity_model_json['confidence'] = 72.5
+ runtime_entity_model_json['groups'] = [capture_group_model]
+ runtime_entity_model_json['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model_json['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model_json['role'] = runtime_entity_role_model
+
+ # Construct a model instance of RuntimeEntity by calling from_dict on the json representation
+ runtime_entity_model = RuntimeEntity.from_dict(runtime_entity_model_json)
+ assert runtime_entity_model != False
+
+ # Construct a model instance of RuntimeEntity by calling from_dict on the json representation
+ runtime_entity_model_dict = RuntimeEntity.from_dict(runtime_entity_model_json).__dict__
+ runtime_entity_model2 = RuntimeEntity(**runtime_entity_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_entity_model == runtime_entity_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_entity_model_json2 = runtime_entity_model.to_dict()
+ assert runtime_entity_model_json2 == runtime_entity_model_json
+
+
+class TestModel_RuntimeEntityAlternative:
+ """
+ Test Class for RuntimeEntityAlternative
+ """
+
+ def test_runtime_entity_alternative_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeEntityAlternative
+ """
+
+ # Construct a json representation of a RuntimeEntityAlternative model
+ runtime_entity_alternative_model_json = {}
+ runtime_entity_alternative_model_json['value'] = 'testString'
+ runtime_entity_alternative_model_json['confidence'] = 72.5
+
+ # Construct a model instance of RuntimeEntityAlternative by calling from_dict on the json representation
+ runtime_entity_alternative_model = RuntimeEntityAlternative.from_dict(runtime_entity_alternative_model_json)
+ assert runtime_entity_alternative_model != False
+
+ # Construct a model instance of RuntimeEntityAlternative by calling from_dict on the json representation
+ runtime_entity_alternative_model_dict = RuntimeEntityAlternative.from_dict(runtime_entity_alternative_model_json).__dict__
+ runtime_entity_alternative_model2 = RuntimeEntityAlternative(**runtime_entity_alternative_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_entity_alternative_model == runtime_entity_alternative_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_entity_alternative_model_json2 = runtime_entity_alternative_model.to_dict()
+ assert runtime_entity_alternative_model_json2 == runtime_entity_alternative_model_json
+
+
+class TestModel_RuntimeEntityInterpretation:
+ """
+ Test Class for RuntimeEntityInterpretation
+ """
+
+ def test_runtime_entity_interpretation_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeEntityInterpretation
+ """
+
+ # Construct a json representation of a RuntimeEntityInterpretation model
+ runtime_entity_interpretation_model_json = {}
+ runtime_entity_interpretation_model_json['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model_json['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model_json['festival'] = 'testString'
+ runtime_entity_interpretation_model_json['granularity'] = 'day'
+ runtime_entity_interpretation_model_json['range_link'] = 'testString'
+ runtime_entity_interpretation_model_json['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model_json['relative_day'] = 72.5
+ runtime_entity_interpretation_model_json['relative_month'] = 72.5
+ runtime_entity_interpretation_model_json['relative_week'] = 72.5
+ runtime_entity_interpretation_model_json['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model_json['relative_year'] = 72.5
+ runtime_entity_interpretation_model_json['specific_day'] = 72.5
+ runtime_entity_interpretation_model_json['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model_json['specific_month'] = 72.5
+ runtime_entity_interpretation_model_json['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model_json['specific_year'] = 72.5
+ runtime_entity_interpretation_model_json['numeric_value'] = 72.5
+ runtime_entity_interpretation_model_json['subtype'] = 'testString'
+ runtime_entity_interpretation_model_json['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model_json['relative_hour'] = 72.5
+ runtime_entity_interpretation_model_json['relative_minute'] = 72.5
+ runtime_entity_interpretation_model_json['relative_second'] = 72.5
+ runtime_entity_interpretation_model_json['specific_hour'] = 72.5
+ runtime_entity_interpretation_model_json['specific_minute'] = 72.5
+ runtime_entity_interpretation_model_json['specific_second'] = 72.5
+ runtime_entity_interpretation_model_json['timezone'] = 'testString'
+
+ # Construct a model instance of RuntimeEntityInterpretation by calling from_dict on the json representation
+ runtime_entity_interpretation_model = RuntimeEntityInterpretation.from_dict(runtime_entity_interpretation_model_json)
+ assert runtime_entity_interpretation_model != False
+
+ # Construct a model instance of RuntimeEntityInterpretation by calling from_dict on the json representation
+ runtime_entity_interpretation_model_dict = RuntimeEntityInterpretation.from_dict(runtime_entity_interpretation_model_json).__dict__
+ runtime_entity_interpretation_model2 = RuntimeEntityInterpretation(**runtime_entity_interpretation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_entity_interpretation_model == runtime_entity_interpretation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_entity_interpretation_model_json2 = runtime_entity_interpretation_model.to_dict()
+ assert runtime_entity_interpretation_model_json2 == runtime_entity_interpretation_model_json
+
+
+class TestModel_RuntimeEntityRole:
+ """
+ Test Class for RuntimeEntityRole
+ """
+
+ def test_runtime_entity_role_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeEntityRole
+ """
+
+ # Construct a json representation of a RuntimeEntityRole model
+ runtime_entity_role_model_json = {}
+ runtime_entity_role_model_json['type'] = 'date_from'
+
+ # Construct a model instance of RuntimeEntityRole by calling from_dict on the json representation
+ runtime_entity_role_model = RuntimeEntityRole.from_dict(runtime_entity_role_model_json)
+ assert runtime_entity_role_model != False
+
+ # Construct a model instance of RuntimeEntityRole by calling from_dict on the json representation
+ runtime_entity_role_model_dict = RuntimeEntityRole.from_dict(runtime_entity_role_model_json).__dict__
+ runtime_entity_role_model2 = RuntimeEntityRole(**runtime_entity_role_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_entity_role_model == runtime_entity_role_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_entity_role_model_json2 = runtime_entity_role_model.to_dict()
+ assert runtime_entity_role_model_json2 == runtime_entity_role_model_json
+
+
+class TestModel_RuntimeIntent:
+ """
+ Test Class for RuntimeIntent
+ """
+
+ def test_runtime_intent_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeIntent
+ """
+
+ # Construct a json representation of a RuntimeIntent model
+ runtime_intent_model_json = {}
+ runtime_intent_model_json['intent'] = 'testString'
+ runtime_intent_model_json['confidence'] = 72.5
+
+ # Construct a model instance of RuntimeIntent by calling from_dict on the json representation
+ runtime_intent_model = RuntimeIntent.from_dict(runtime_intent_model_json)
+ assert runtime_intent_model != False
+
+ # Construct a model instance of RuntimeIntent by calling from_dict on the json representation
+ runtime_intent_model_dict = RuntimeIntent.from_dict(runtime_intent_model_json).__dict__
+ runtime_intent_model2 = RuntimeIntent(**runtime_intent_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_intent_model == runtime_intent_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_intent_model_json2 = runtime_intent_model.to_dict()
+ assert runtime_intent_model_json2 == runtime_intent_model_json
+
+
+class TestModel_StatusError:
+ """
+ Test Class for StatusError
+ """
+
+ def test_status_error_serialization(self):
+ """
+ Test serialization/deserialization for StatusError
+ """
+
+ # Construct a json representation of a StatusError model
+ status_error_model_json = {}
+ status_error_model_json['message'] = 'testString'
+
+ # Construct a model instance of StatusError by calling from_dict on the json representation
+ status_error_model = StatusError.from_dict(status_error_model_json)
+ assert status_error_model != False
+
+ # Construct a model instance of StatusError by calling from_dict on the json representation
+ status_error_model_dict = StatusError.from_dict(status_error_model_json).__dict__
+ status_error_model2 = StatusError(**status_error_model_dict)
+
+ # Verify the model instances are equivalent
+ assert status_error_model == status_error_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ status_error_model_json2 = status_error_model.to_dict()
+ assert status_error_model_json2 == status_error_model_json
+
+
+class TestModel_Synonym:
+ """
+ Test Class for Synonym
+ """
+
+ def test_synonym_serialization(self):
+ """
+ Test serialization/deserialization for Synonym
+ """
+
+ # Construct a json representation of a Synonym model
+ synonym_model_json = {}
+ synonym_model_json['synonym'] = 'testString'
+
+ # Construct a model instance of Synonym by calling from_dict on the json representation
+ synonym_model = Synonym.from_dict(synonym_model_json)
+ assert synonym_model != False
+
+ # Construct a model instance of Synonym by calling from_dict on the json representation
+ synonym_model_dict = Synonym.from_dict(synonym_model_json).__dict__
+ synonym_model2 = Synonym(**synonym_model_dict)
+
+ # Verify the model instances are equivalent
+ assert synonym_model == synonym_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ synonym_model_json2 = synonym_model.to_dict()
+ assert synonym_model_json2 == synonym_model_json
+
+
+class TestModel_SynonymCollection:
+ """
+ Test Class for SynonymCollection
+ """
+
+ def test_synonym_collection_serialization(self):
+ """
+ Test serialization/deserialization for SynonymCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ synonym_model = {} # Synonym
+ synonym_model['synonym'] = 'testString'
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a SynonymCollection model
+ synonym_collection_model_json = {}
+ synonym_collection_model_json['synonyms'] = [synonym_model]
+ synonym_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of SynonymCollection by calling from_dict on the json representation
+ synonym_collection_model = SynonymCollection.from_dict(synonym_collection_model_json)
+ assert synonym_collection_model != False
+
+ # Construct a model instance of SynonymCollection by calling from_dict on the json representation
+ synonym_collection_model_dict = SynonymCollection.from_dict(synonym_collection_model_json).__dict__
+ synonym_collection_model2 = SynonymCollection(**synonym_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert synonym_collection_model == synonym_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ synonym_collection_model_json2 = synonym_collection_model.to_dict()
+ assert synonym_collection_model_json2 == synonym_collection_model_json
+
+
+class TestModel_Value:
+ """
+ Test Class for Value
+ """
+
+ def test_value_serialization(self):
+ """
+ Test serialization/deserialization for Value
+ """
+
+ # Construct a json representation of a Value model
+ value_model_json = {}
+ value_model_json['value'] = 'testString'
+ value_model_json['metadata'] = {'anyKey': 'anyValue'}
+ value_model_json['type'] = 'synonyms'
+ value_model_json['synonyms'] = ['testString']
+ value_model_json['patterns'] = ['testString']
+
+ # Construct a model instance of Value by calling from_dict on the json representation
+ value_model = Value.from_dict(value_model_json)
+ assert value_model != False
+
+ # Construct a model instance of Value by calling from_dict on the json representation
+ value_model_dict = Value.from_dict(value_model_json).__dict__
+ value_model2 = Value(**value_model_dict)
+
+ # Verify the model instances are equivalent
+ assert value_model == value_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ value_model_json2 = value_model.to_dict()
+ assert value_model_json2 == value_model_json
+
+
+class TestModel_ValueCollection:
+ """
+ Test Class for ValueCollection
+ """
+
+ def test_value_collection_serialization(self):
+ """
+ Test serialization/deserialization for ValueCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ value_model = {} # Value
+ value_model['value'] = 'testString'
+ value_model['metadata'] = {'anyKey': 'anyValue'}
+ value_model['type'] = 'synonyms'
+ value_model['synonyms'] = ['testString']
+ value_model['patterns'] = ['testString']
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a ValueCollection model
+ value_collection_model_json = {}
+ value_collection_model_json['values'] = [value_model]
+ value_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of ValueCollection by calling from_dict on the json representation
+ value_collection_model = ValueCollection.from_dict(value_collection_model_json)
+ assert value_collection_model != False
+
+ # Construct a model instance of ValueCollection by calling from_dict on the json representation
+ value_collection_model_dict = ValueCollection.from_dict(value_collection_model_json).__dict__
+ value_collection_model2 = ValueCollection(**value_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert value_collection_model == value_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ value_collection_model_json2 = value_collection_model.to_dict()
+ assert value_collection_model_json2 == value_collection_model_json
+
+
+class TestModel_Webhook:
+ """
+ Test Class for Webhook
+ """
+
+ def test_webhook_serialization(self):
+ """
+ Test serialization/deserialization for Webhook
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ webhook_header_model = {} # WebhookHeader
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ # Construct a json representation of a Webhook model
+ webhook_model_json = {}
+ webhook_model_json['url'] = 'testString'
+ webhook_model_json['name'] = 'testString'
+ webhook_model_json['headers'] = [webhook_header_model]
+
+ # Construct a model instance of Webhook by calling from_dict on the json representation
+ webhook_model = Webhook.from_dict(webhook_model_json)
+ assert webhook_model != False
+
+ # Construct a model instance of Webhook by calling from_dict on the json representation
+ webhook_model_dict = Webhook.from_dict(webhook_model_json).__dict__
+ webhook_model2 = Webhook(**webhook_model_dict)
+
+ # Verify the model instances are equivalent
+ assert webhook_model == webhook_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ webhook_model_json2 = webhook_model.to_dict()
+ assert webhook_model_json2 == webhook_model_json
+
+
+class TestModel_WebhookHeader:
+ """
+ Test Class for WebhookHeader
+ """
+
+ def test_webhook_header_serialization(self):
+ """
+ Test serialization/deserialization for WebhookHeader
+ """
+
+ # Construct a json representation of a WebhookHeader model
+ webhook_header_model_json = {}
+ webhook_header_model_json['name'] = 'testString'
+ webhook_header_model_json['value'] = 'testString'
+
+ # Construct a model instance of WebhookHeader by calling from_dict on the json representation
+ webhook_header_model = WebhookHeader.from_dict(webhook_header_model_json)
+ assert webhook_header_model != False
+
+ # Construct a model instance of WebhookHeader by calling from_dict on the json representation
+ webhook_header_model_dict = WebhookHeader.from_dict(webhook_header_model_json).__dict__
+ webhook_header_model2 = WebhookHeader(**webhook_header_model_dict)
+
+ # Verify the model instances are equivalent
+ assert webhook_header_model == webhook_header_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ webhook_header_model_json2 = webhook_header_model.to_dict()
+ assert webhook_header_model_json2 == webhook_header_model_json
+
+
+class TestModel_Workspace:
+ """
+ Test Class for Workspace
+ """
+
+ def test_workspace_serialization(self):
+ """
+ Test serialization/deserialization for Workspace
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ dialog_node_output_model = {} # DialogNodeOutput
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ dialog_node_context_model = {} # DialogNodeContext
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ dialog_node_next_step_model = {} # DialogNodeNextStep
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_model = {} # DialogNode
+ dialog_node_model['dialog_node'] = 'testString'
+ dialog_node_model['description'] = 'testString'
+ dialog_node_model['conditions'] = 'testString'
+ dialog_node_model['parent'] = 'testString'
+ dialog_node_model['previous_sibling'] = 'testString'
+ dialog_node_model['output'] = dialog_node_output_model
+ dialog_node_model['context'] = dialog_node_context_model
+ dialog_node_model['metadata'] = {'anyKey': 'anyValue'}
+ dialog_node_model['next_step'] = dialog_node_next_step_model
+ dialog_node_model['title'] = 'testString'
+ dialog_node_model['type'] = 'standard'
+ dialog_node_model['event_name'] = 'focus'
+ dialog_node_model['variable'] = 'testString'
+ dialog_node_model['actions'] = [dialog_node_action_model]
+ dialog_node_model['digress_in'] = 'not_available'
+ dialog_node_model['digress_out'] = 'allow_returning'
+ dialog_node_model['digress_out_slots'] = 'not_allowed'
+ dialog_node_model['user_label'] = 'testString'
+ dialog_node_model['disambiguation_opt_out'] = False
+
+ counterexample_model = {} # Counterexample
+ counterexample_model['text'] = 'testString'
+
+ workspace_system_settings_tooling_model = {} # WorkspaceSystemSettingsTooling
+ workspace_system_settings_tooling_model['store_generic_responses'] = True
+
+ workspace_system_settings_disambiguation_model = {} # WorkspaceSystemSettingsDisambiguation
+ workspace_system_settings_disambiguation_model['prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['enabled'] = False
+ workspace_system_settings_disambiguation_model['sensitivity'] = 'auto'
+ workspace_system_settings_disambiguation_model['randomize'] = True
+ workspace_system_settings_disambiguation_model['max_suggestions'] = 1
+ workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString'
+
+ workspace_system_settings_system_entities_model = {} # WorkspaceSystemSettingsSystemEntities
+ workspace_system_settings_system_entities_model['enabled'] = False
+
+ workspace_system_settings_off_topic_model = {} # WorkspaceSystemSettingsOffTopic
+ workspace_system_settings_off_topic_model['enabled'] = False
+
+ workspace_system_settings_nlp_model = {} # WorkspaceSystemSettingsNlp
+ workspace_system_settings_nlp_model['model'] = 'testString'
+
+ workspace_system_settings_model = {} # WorkspaceSystemSettings
+ workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model
+ workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model
+ workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'}
+ workspace_system_settings_model['spelling_suggestions'] = False
+ workspace_system_settings_model['spelling_auto_correct'] = False
+ workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model
+ workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model
+ workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model
+ workspace_system_settings_model['foo'] = 'testString'
+
+ webhook_header_model = {} # WebhookHeader
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ webhook_model = {} # Webhook
+ webhook_model['url'] = 'testString'
+ webhook_model['name'] = 'testString'
+ webhook_model['headers'] = [webhook_header_model]
+
+ mention_model = {} # Mention
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ example_model = {} # Example
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ intent_model = {} # Intent
+ intent_model['intent'] = 'testString'
+ intent_model['description'] = 'testString'
+ intent_model['examples'] = [example_model]
+
+ value_model = {} # Value
+ value_model['value'] = 'testString'
+ value_model['metadata'] = {'anyKey': 'anyValue'}
+ value_model['type'] = 'synonyms'
+ value_model['synonyms'] = ['testString']
+ value_model['patterns'] = ['testString']
+
+ entity_model = {} # Entity
+ entity_model['entity'] = 'testString'
+ entity_model['description'] = 'testString'
+ entity_model['metadata'] = {'anyKey': 'anyValue'}
+ entity_model['fuzzy_match'] = True
+ entity_model['values'] = [value_model]
+
+ # Construct a json representation of a Workspace model
+ workspace_model_json = {}
+ workspace_model_json['name'] = 'testString'
+ workspace_model_json['description'] = 'testString'
+ workspace_model_json['language'] = 'testString'
+ workspace_model_json['dialog_nodes'] = [dialog_node_model]
+ workspace_model_json['counterexamples'] = [counterexample_model]
+ workspace_model_json['metadata'] = {'anyKey': 'anyValue'}
+ workspace_model_json['learning_opt_out'] = False
+ workspace_model_json['system_settings'] = workspace_system_settings_model
+ workspace_model_json['webhooks'] = [webhook_model]
+ workspace_model_json['intents'] = [intent_model]
+ workspace_model_json['entities'] = [entity_model]
+
+ # Construct a model instance of Workspace by calling from_dict on the json representation
+ workspace_model = Workspace.from_dict(workspace_model_json)
+ assert workspace_model != False
+
+ # Construct a model instance of Workspace by calling from_dict on the json representation
+ workspace_model_dict = Workspace.from_dict(workspace_model_json).__dict__
+ workspace_model2 = Workspace(**workspace_model_dict)
+
+ # Verify the model instances are equivalent
+ assert workspace_model == workspace_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ workspace_model_json2 = workspace_model.to_dict()
+ assert workspace_model_json2 == workspace_model_json
+
+
+class TestModel_WorkspaceCollection:
+ """
+ Test Class for WorkspaceCollection
+ """
+
+ def test_workspace_collection_serialization(self):
+ """
+ Test serialization/deserialization for WorkspaceCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ dialog_node_output_generic_model = {} # DialogNodeOutputGenericDialogNodeOutputResponseTypeText
+ dialog_node_output_generic_model['response_type'] = 'text'
+ dialog_node_output_generic_model['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_model['selection_policy'] = 'sequential'
+ dialog_node_output_generic_model['delimiter'] = '\\n'
+ dialog_node_output_generic_model['channels'] = [response_generic_channel_model]
+
+ dialog_node_output_modifiers_model = {} # DialogNodeOutputModifiers
+ dialog_node_output_modifiers_model['overwrite'] = True
+
+ dialog_node_output_model = {} # DialogNodeOutput
+ dialog_node_output_model['generic'] = [dialog_node_output_generic_model]
+ dialog_node_output_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_output_model['modifiers'] = dialog_node_output_modifiers_model
+ dialog_node_output_model['foo'] = 'testString'
+
+ dialog_node_context_model = {} # DialogNodeContext
+ dialog_node_context_model['integrations'] = {'key1': {'anyKey': 'anyValue'}}
+ dialog_node_context_model['foo'] = 'testString'
+
+ dialog_node_next_step_model = {} # DialogNodeNextStep
+ dialog_node_next_step_model['behavior'] = 'get_user_input'
+ dialog_node_next_step_model['dialog_node'] = 'testString'
+ dialog_node_next_step_model['selector'] = 'condition'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_model = {} # DialogNode
+ dialog_node_model['dialog_node'] = 'testString'
+ dialog_node_model['description'] = 'testString'
+ dialog_node_model['conditions'] = 'testString'
+ dialog_node_model['parent'] = 'testString'
+ dialog_node_model['previous_sibling'] = 'testString'
+ dialog_node_model['output'] = dialog_node_output_model
+ dialog_node_model['context'] = dialog_node_context_model
+ dialog_node_model['metadata'] = {'anyKey': 'anyValue'}
+ dialog_node_model['next_step'] = dialog_node_next_step_model
+ dialog_node_model['title'] = 'testString'
+ dialog_node_model['type'] = 'standard'
+ dialog_node_model['event_name'] = 'focus'
+ dialog_node_model['variable'] = 'testString'
+ dialog_node_model['actions'] = [dialog_node_action_model]
+ dialog_node_model['digress_in'] = 'not_available'
+ dialog_node_model['digress_out'] = 'allow_returning'
+ dialog_node_model['digress_out_slots'] = 'not_allowed'
+ dialog_node_model['user_label'] = 'testString'
+ dialog_node_model['disambiguation_opt_out'] = False
+
+ counterexample_model = {} # Counterexample
+ counterexample_model['text'] = 'testString'
+
+ workspace_system_settings_tooling_model = {} # WorkspaceSystemSettingsTooling
+ workspace_system_settings_tooling_model['store_generic_responses'] = True
+
+ workspace_system_settings_disambiguation_model = {} # WorkspaceSystemSettingsDisambiguation
+ workspace_system_settings_disambiguation_model['prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['enabled'] = False
+ workspace_system_settings_disambiguation_model['sensitivity'] = 'auto'
+ workspace_system_settings_disambiguation_model['randomize'] = True
+ workspace_system_settings_disambiguation_model['max_suggestions'] = 1
+ workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString'
+
+ workspace_system_settings_system_entities_model = {} # WorkspaceSystemSettingsSystemEntities
+ workspace_system_settings_system_entities_model['enabled'] = False
+
+ workspace_system_settings_off_topic_model = {} # WorkspaceSystemSettingsOffTopic
+ workspace_system_settings_off_topic_model['enabled'] = False
+
+ workspace_system_settings_nlp_model = {} # WorkspaceSystemSettingsNlp
+ workspace_system_settings_nlp_model['model'] = 'testString'
+
+ workspace_system_settings_model = {} # WorkspaceSystemSettings
+ workspace_system_settings_model['tooling'] = workspace_system_settings_tooling_model
+ workspace_system_settings_model['disambiguation'] = workspace_system_settings_disambiguation_model
+ workspace_system_settings_model['human_agent_assist'] = {'anyKey': 'anyValue'}
+ workspace_system_settings_model['spelling_suggestions'] = False
+ workspace_system_settings_model['spelling_auto_correct'] = False
+ workspace_system_settings_model['system_entities'] = workspace_system_settings_system_entities_model
+ workspace_system_settings_model['off_topic'] = workspace_system_settings_off_topic_model
+ workspace_system_settings_model['nlp'] = workspace_system_settings_nlp_model
+ workspace_system_settings_model['foo'] = 'testString'
+
+ webhook_header_model = {} # WebhookHeader
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ webhook_model = {} # Webhook
+ webhook_model['url'] = 'testString'
+ webhook_model['name'] = 'testString'
+ webhook_model['headers'] = [webhook_header_model]
+
+ mention_model = {} # Mention
+ mention_model['entity'] = 'testString'
+ mention_model['location'] = [38]
+
+ example_model = {} # Example
+ example_model['text'] = 'testString'
+ example_model['mentions'] = [mention_model]
+
+ intent_model = {} # Intent
+ intent_model['intent'] = 'testString'
+ intent_model['description'] = 'testString'
+ intent_model['examples'] = [example_model]
+
+ value_model = {} # Value
+ value_model['value'] = 'testString'
+ value_model['metadata'] = {'anyKey': 'anyValue'}
+ value_model['type'] = 'synonyms'
+ value_model['synonyms'] = ['testString']
+ value_model['patterns'] = ['testString']
+
+ entity_model = {} # Entity
+ entity_model['entity'] = 'testString'
+ entity_model['description'] = 'testString'
+ entity_model['metadata'] = {'anyKey': 'anyValue'}
+ entity_model['fuzzy_match'] = True
+ entity_model['values'] = [value_model]
+
+ workspace_model = {} # Workspace
+ workspace_model['name'] = 'testString'
+ workspace_model['description'] = 'testString'
+ workspace_model['language'] = 'testString'
+ workspace_model['dialog_nodes'] = [dialog_node_model]
+ workspace_model['counterexamples'] = [counterexample_model]
+ workspace_model['metadata'] = {'anyKey': 'anyValue'}
+ workspace_model['learning_opt_out'] = False
+ workspace_model['system_settings'] = workspace_system_settings_model
+ workspace_model['webhooks'] = [webhook_model]
+ workspace_model['intents'] = [intent_model]
+ workspace_model['entities'] = [entity_model]
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a WorkspaceCollection model
+ workspace_collection_model_json = {}
+ workspace_collection_model_json['workspaces'] = [workspace_model]
+ workspace_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of WorkspaceCollection by calling from_dict on the json representation
+ workspace_collection_model = WorkspaceCollection.from_dict(workspace_collection_model_json)
+ assert workspace_collection_model != False
+
+ # Construct a model instance of WorkspaceCollection by calling from_dict on the json representation
+ workspace_collection_model_dict = WorkspaceCollection.from_dict(workspace_collection_model_json).__dict__
+ workspace_collection_model2 = WorkspaceCollection(**workspace_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert workspace_collection_model == workspace_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ workspace_collection_model_json2 = workspace_collection_model.to_dict()
+ assert workspace_collection_model_json2 == workspace_collection_model_json
+
+
+class TestModel_WorkspaceCounts:
+ """
+ Test Class for WorkspaceCounts
+ """
+
+ def test_workspace_counts_serialization(self):
+ """
+ Test serialization/deserialization for WorkspaceCounts
+ """
+
+ # Construct a json representation of a WorkspaceCounts model
+ workspace_counts_model_json = {}
+ workspace_counts_model_json['intent'] = 38
+ workspace_counts_model_json['entity'] = 38
+ workspace_counts_model_json['node'] = 38
+
+ # Construct a model instance of WorkspaceCounts by calling from_dict on the json representation
+ workspace_counts_model = WorkspaceCounts.from_dict(workspace_counts_model_json)
+ assert workspace_counts_model != False
+
+ # Construct a model instance of WorkspaceCounts by calling from_dict on the json representation
+ workspace_counts_model_dict = WorkspaceCounts.from_dict(workspace_counts_model_json).__dict__
+ workspace_counts_model2 = WorkspaceCounts(**workspace_counts_model_dict)
+
+ # Verify the model instances are equivalent
+ assert workspace_counts_model == workspace_counts_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ workspace_counts_model_json2 = workspace_counts_model.to_dict()
+ assert workspace_counts_model_json2 == workspace_counts_model_json
+
+
+class TestModel_WorkspaceSystemSettings:
+ """
+ Test Class for WorkspaceSystemSettings
+ """
+
+ def test_workspace_system_settings_serialization(self):
+ """
+ Test serialization/deserialization for WorkspaceSystemSettings
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ workspace_system_settings_tooling_model = {} # WorkspaceSystemSettingsTooling
+ workspace_system_settings_tooling_model['store_generic_responses'] = True
+
+ workspace_system_settings_disambiguation_model = {} # WorkspaceSystemSettingsDisambiguation
+ workspace_system_settings_disambiguation_model['prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['none_of_the_above_prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model['enabled'] = False
+ workspace_system_settings_disambiguation_model['sensitivity'] = 'auto'
+ workspace_system_settings_disambiguation_model['randomize'] = True
+ workspace_system_settings_disambiguation_model['max_suggestions'] = 1
+ workspace_system_settings_disambiguation_model['suggestion_text_policy'] = 'testString'
+
+ workspace_system_settings_system_entities_model = {} # WorkspaceSystemSettingsSystemEntities
+ workspace_system_settings_system_entities_model['enabled'] = False
+
+ workspace_system_settings_off_topic_model = {} # WorkspaceSystemSettingsOffTopic
+ workspace_system_settings_off_topic_model['enabled'] = False
+
+ workspace_system_settings_nlp_model = {} # WorkspaceSystemSettingsNlp
+ workspace_system_settings_nlp_model['model'] = 'testString'
+
+ # Construct a json representation of a WorkspaceSystemSettings model
+ workspace_system_settings_model_json = {}
+ workspace_system_settings_model_json['tooling'] = workspace_system_settings_tooling_model
+ workspace_system_settings_model_json['disambiguation'] = workspace_system_settings_disambiguation_model
+ workspace_system_settings_model_json['human_agent_assist'] = {'anyKey': 'anyValue'}
+ workspace_system_settings_model_json['spelling_suggestions'] = False
+ workspace_system_settings_model_json['spelling_auto_correct'] = False
+ workspace_system_settings_model_json['system_entities'] = workspace_system_settings_system_entities_model
+ workspace_system_settings_model_json['off_topic'] = workspace_system_settings_off_topic_model
+ workspace_system_settings_model_json['nlp'] = workspace_system_settings_nlp_model
+ workspace_system_settings_model_json['foo'] = 'testString'
+
+ # Construct a model instance of WorkspaceSystemSettings by calling from_dict on the json representation
+ workspace_system_settings_model = WorkspaceSystemSettings.from_dict(workspace_system_settings_model_json)
+ assert workspace_system_settings_model != False
+
+ # Construct a model instance of WorkspaceSystemSettings by calling from_dict on the json representation
+ workspace_system_settings_model_dict = WorkspaceSystemSettings.from_dict(workspace_system_settings_model_json).__dict__
+ workspace_system_settings_model2 = WorkspaceSystemSettings(**workspace_system_settings_model_dict)
+
+ # Verify the model instances are equivalent
+ assert workspace_system_settings_model == workspace_system_settings_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ workspace_system_settings_model_json2 = workspace_system_settings_model.to_dict()
+ assert workspace_system_settings_model_json2 == workspace_system_settings_model_json
+
+ # Test get_properties and set_properties methods.
+ workspace_system_settings_model.set_properties({})
+ actual_dict = workspace_system_settings_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': 'testString'}
+ workspace_system_settings_model.set_properties(expected_dict)
+ actual_dict = workspace_system_settings_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_WorkspaceSystemSettingsDisambiguation:
+ """
+ Test Class for WorkspaceSystemSettingsDisambiguation
+ """
+
+ def test_workspace_system_settings_disambiguation_serialization(self):
+ """
+ Test serialization/deserialization for WorkspaceSystemSettingsDisambiguation
+ """
+
+ # Construct a json representation of a WorkspaceSystemSettingsDisambiguation model
+ workspace_system_settings_disambiguation_model_json = {}
+ workspace_system_settings_disambiguation_model_json['prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model_json['none_of_the_above_prompt'] = 'testString'
+ workspace_system_settings_disambiguation_model_json['enabled'] = False
+ workspace_system_settings_disambiguation_model_json['sensitivity'] = 'auto'
+ workspace_system_settings_disambiguation_model_json['randomize'] = True
+ workspace_system_settings_disambiguation_model_json['max_suggestions'] = 1
+ workspace_system_settings_disambiguation_model_json['suggestion_text_policy'] = 'testString'
+
+ # Construct a model instance of WorkspaceSystemSettingsDisambiguation by calling from_dict on the json representation
+ workspace_system_settings_disambiguation_model = WorkspaceSystemSettingsDisambiguation.from_dict(workspace_system_settings_disambiguation_model_json)
+ assert workspace_system_settings_disambiguation_model != False
+
+ # Construct a model instance of WorkspaceSystemSettingsDisambiguation by calling from_dict on the json representation
+ workspace_system_settings_disambiguation_model_dict = WorkspaceSystemSettingsDisambiguation.from_dict(workspace_system_settings_disambiguation_model_json).__dict__
+ workspace_system_settings_disambiguation_model2 = WorkspaceSystemSettingsDisambiguation(**workspace_system_settings_disambiguation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert workspace_system_settings_disambiguation_model == workspace_system_settings_disambiguation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ workspace_system_settings_disambiguation_model_json2 = workspace_system_settings_disambiguation_model.to_dict()
+ assert workspace_system_settings_disambiguation_model_json2 == workspace_system_settings_disambiguation_model_json
+
+
+class TestModel_WorkspaceSystemSettingsNlp:
+ """
+ Test Class for WorkspaceSystemSettingsNlp
+ """
+
+ def test_workspace_system_settings_nlp_serialization(self):
+ """
+ Test serialization/deserialization for WorkspaceSystemSettingsNlp
+ """
+
+ # Construct a json representation of a WorkspaceSystemSettingsNlp model
+ workspace_system_settings_nlp_model_json = {}
+ workspace_system_settings_nlp_model_json['model'] = 'testString'
+
+ # Construct a model instance of WorkspaceSystemSettingsNlp by calling from_dict on the json representation
+ workspace_system_settings_nlp_model = WorkspaceSystemSettingsNlp.from_dict(workspace_system_settings_nlp_model_json)
+ assert workspace_system_settings_nlp_model != False
+
+ # Construct a model instance of WorkspaceSystemSettingsNlp by calling from_dict on the json representation
+ workspace_system_settings_nlp_model_dict = WorkspaceSystemSettingsNlp.from_dict(workspace_system_settings_nlp_model_json).__dict__
+ workspace_system_settings_nlp_model2 = WorkspaceSystemSettingsNlp(**workspace_system_settings_nlp_model_dict)
+
+ # Verify the model instances are equivalent
+ assert workspace_system_settings_nlp_model == workspace_system_settings_nlp_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ workspace_system_settings_nlp_model_json2 = workspace_system_settings_nlp_model.to_dict()
+ assert workspace_system_settings_nlp_model_json2 == workspace_system_settings_nlp_model_json
+
+
+class TestModel_WorkspaceSystemSettingsOffTopic:
+ """
+ Test Class for WorkspaceSystemSettingsOffTopic
+ """
+
+ def test_workspace_system_settings_off_topic_serialization(self):
+ """
+ Test serialization/deserialization for WorkspaceSystemSettingsOffTopic
+ """
+
+ # Construct a json representation of a WorkspaceSystemSettingsOffTopic model
+ workspace_system_settings_off_topic_model_json = {}
+ workspace_system_settings_off_topic_model_json['enabled'] = False
+
+ # Construct a model instance of WorkspaceSystemSettingsOffTopic by calling from_dict on the json representation
+ workspace_system_settings_off_topic_model = WorkspaceSystemSettingsOffTopic.from_dict(workspace_system_settings_off_topic_model_json)
+ assert workspace_system_settings_off_topic_model != False
+
+ # Construct a model instance of WorkspaceSystemSettingsOffTopic by calling from_dict on the json representation
+ workspace_system_settings_off_topic_model_dict = WorkspaceSystemSettingsOffTopic.from_dict(workspace_system_settings_off_topic_model_json).__dict__
+ workspace_system_settings_off_topic_model2 = WorkspaceSystemSettingsOffTopic(**workspace_system_settings_off_topic_model_dict)
+
+ # Verify the model instances are equivalent
+ assert workspace_system_settings_off_topic_model == workspace_system_settings_off_topic_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ workspace_system_settings_off_topic_model_json2 = workspace_system_settings_off_topic_model.to_dict()
+ assert workspace_system_settings_off_topic_model_json2 == workspace_system_settings_off_topic_model_json
+
+
+class TestModel_WorkspaceSystemSettingsSystemEntities:
+ """
+ Test Class for WorkspaceSystemSettingsSystemEntities
+ """
+
+ def test_workspace_system_settings_system_entities_serialization(self):
+ """
+ Test serialization/deserialization for WorkspaceSystemSettingsSystemEntities
+ """
+
+ # Construct a json representation of a WorkspaceSystemSettingsSystemEntities model
+ workspace_system_settings_system_entities_model_json = {}
+ workspace_system_settings_system_entities_model_json['enabled'] = False
+
+ # Construct a model instance of WorkspaceSystemSettingsSystemEntities by calling from_dict on the json representation
+ workspace_system_settings_system_entities_model = WorkspaceSystemSettingsSystemEntities.from_dict(workspace_system_settings_system_entities_model_json)
+ assert workspace_system_settings_system_entities_model != False
+
+ # Construct a model instance of WorkspaceSystemSettingsSystemEntities by calling from_dict on the json representation
+ workspace_system_settings_system_entities_model_dict = WorkspaceSystemSettingsSystemEntities.from_dict(workspace_system_settings_system_entities_model_json).__dict__
+ workspace_system_settings_system_entities_model2 = WorkspaceSystemSettingsSystemEntities(**workspace_system_settings_system_entities_model_dict)
+
+ # Verify the model instances are equivalent
+ assert workspace_system_settings_system_entities_model == workspace_system_settings_system_entities_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ workspace_system_settings_system_entities_model_json2 = workspace_system_settings_system_entities_model.to_dict()
+ assert workspace_system_settings_system_entities_model_json2 == workspace_system_settings_system_entities_model_json
+
+
+class TestModel_WorkspaceSystemSettingsTooling:
+ """
+ Test Class for WorkspaceSystemSettingsTooling
+ """
+
+ def test_workspace_system_settings_tooling_serialization(self):
+ """
+ Test serialization/deserialization for WorkspaceSystemSettingsTooling
+ """
+
+ # Construct a json representation of a WorkspaceSystemSettingsTooling model
+ workspace_system_settings_tooling_model_json = {}
+ workspace_system_settings_tooling_model_json['store_generic_responses'] = True
+
+ # Construct a model instance of WorkspaceSystemSettingsTooling by calling from_dict on the json representation
+ workspace_system_settings_tooling_model = WorkspaceSystemSettingsTooling.from_dict(workspace_system_settings_tooling_model_json)
+ assert workspace_system_settings_tooling_model != False
+
+ # Construct a model instance of WorkspaceSystemSettingsTooling by calling from_dict on the json representation
+ workspace_system_settings_tooling_model_dict = WorkspaceSystemSettingsTooling.from_dict(workspace_system_settings_tooling_model_json).__dict__
+ workspace_system_settings_tooling_model2 = WorkspaceSystemSettingsTooling(**workspace_system_settings_tooling_model_dict)
+
+ # Verify the model instances are equivalent
+ assert workspace_system_settings_tooling_model == workspace_system_settings_tooling_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ workspace_system_settings_tooling_model_json2 = workspace_system_settings_tooling_model.to_dict()
+ assert workspace_system_settings_tooling_model_json2 == workspace_system_settings_tooling_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_audio_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio model
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['response_type'] = 'audio'
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['source'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['title'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['description'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['channels'] = [response_generic_channel_model]
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['channel_options'] = {'anyKey': 'anyValue'}
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_json['alt_text'] = 'testString'
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio.from_dict(dialog_node_output_generic_dialog_node_output_response_type_audio_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_audio_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio.from_dict(dialog_node_output_generic_dialog_node_output_response_type_audio_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeAudio(**dialog_node_output_generic_dialog_node_output_response_type_audio_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_audio_model == dialog_node_output_generic_dialog_node_output_response_type_audio_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_audio_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_audio_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_audio_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_audio_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ channel_transfer_target_chat_model = {} # ChannelTransferTargetChat
+ channel_transfer_target_chat_model['url'] = 'testString'
+
+ channel_transfer_target_model = {} # ChannelTransferTarget
+ channel_transfer_target_model['chat'] = channel_transfer_target_chat_model
+
+ channel_transfer_info_model = {} # ChannelTransferInfo
+ channel_transfer_info_model['target'] = channel_transfer_target_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer model
+ dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json['response_type'] = 'channel_transfer'
+ dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json['message_to_user'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json['transfer_info'] = channel_transfer_info_model
+ dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer.from_dict(dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer.from_dict(dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeChannelTransfer(**dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model == dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_channel_transfer_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ agent_availability_message_model = {} # AgentAvailabilityMessage
+ agent_availability_message_model['message'] = 'testString'
+
+ dialog_node_output_connect_to_agent_transfer_info_model = {} # DialogNodeOutputConnectToAgentTransferInfo
+ dialog_node_output_connect_to_agent_transfer_info_model['target'] = {'key1': {'anyKey': 'anyValue'}}
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent model
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['response_type'] = 'connect_to_agent'
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['message_to_human_agent'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['agent_available'] = agent_availability_message_model
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['agent_unavailable'] = agent_availability_message_model
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['transfer_info'] = dialog_node_output_connect_to_agent_transfer_info_model
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent.from_dict(dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent.from_dict(dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeConnectToAgent(**dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model == dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_connect_to_agent_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_iframe_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe model
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['response_type'] = 'iframe'
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['source'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['title'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['description'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['image_url'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe.from_dict(dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_iframe_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe.from_dict(dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeIframe(**dialog_node_output_generic_dialog_node_output_response_type_iframe_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_iframe_model == dialog_node_output_generic_dialog_node_output_response_type_iframe_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_iframe_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_iframe_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeImage:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeImage
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_image_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeImage
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeImage model
+ dialog_node_output_generic_dialog_node_output_response_type_image_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_image_model_json['response_type'] = 'image'
+ dialog_node_output_generic_dialog_node_output_response_type_image_model_json['source'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_image_model_json['title'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_image_model_json['description'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_image_model_json['channels'] = [response_generic_channel_model]
+ dialog_node_output_generic_dialog_node_output_response_type_image_model_json['alt_text'] = 'testString'
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeImage by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_image_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeImage.from_dict(dialog_node_output_generic_dialog_node_output_response_type_image_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_image_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeImage by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_image_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeImage.from_dict(dialog_node_output_generic_dialog_node_output_response_type_image_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_image_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeImage(**dialog_node_output_generic_dialog_node_output_response_type_image_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_image_model == dialog_node_output_generic_dialog_node_output_response_type_image_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_image_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_image_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_image_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_image_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeOption:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeOption
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_option_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeOption
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue
+ dialog_node_output_options_element_value_model['input'] = message_input_model
+ dialog_node_output_options_element_value_model['intents'] = [runtime_intent_model]
+ dialog_node_output_options_element_value_model['entities'] = [runtime_entity_model]
+
+ dialog_node_output_options_element_model = {} # DialogNodeOutputOptionsElement
+ dialog_node_output_options_element_model['label'] = 'testString'
+ dialog_node_output_options_element_model['value'] = dialog_node_output_options_element_value_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeOption model
+ dialog_node_output_generic_dialog_node_output_response_type_option_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_option_model_json['response_type'] = 'option'
+ dialog_node_output_generic_dialog_node_output_response_type_option_model_json['title'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_option_model_json['description'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_option_model_json['preference'] = 'dropdown'
+ dialog_node_output_generic_dialog_node_output_response_type_option_model_json['options'] = [dialog_node_output_options_element_model]
+ dialog_node_output_generic_dialog_node_output_response_type_option_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeOption by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_option_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeOption.from_dict(dialog_node_output_generic_dialog_node_output_response_type_option_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_option_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeOption by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_option_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeOption.from_dict(dialog_node_output_generic_dialog_node_output_response_type_option_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_option_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeOption(**dialog_node_output_generic_dialog_node_output_response_type_option_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_option_model == dialog_node_output_generic_dialog_node_output_response_type_option_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_option_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_option_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_option_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_option_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypePause:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypePause
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_pause_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypePause
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypePause model
+ dialog_node_output_generic_dialog_node_output_response_type_pause_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_pause_model_json['response_type'] = 'pause'
+ dialog_node_output_generic_dialog_node_output_response_type_pause_model_json['time'] = 38
+ dialog_node_output_generic_dialog_node_output_response_type_pause_model_json['typing'] = True
+ dialog_node_output_generic_dialog_node_output_response_type_pause_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypePause by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_pause_model = DialogNodeOutputGenericDialogNodeOutputResponseTypePause.from_dict(dialog_node_output_generic_dialog_node_output_response_type_pause_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_pause_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypePause by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_pause_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypePause.from_dict(dialog_node_output_generic_dialog_node_output_response_type_pause_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_pause_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypePause(**dialog_node_output_generic_dialog_node_output_response_type_pause_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_pause_model == dialog_node_output_generic_dialog_node_output_response_type_pause_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_pause_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_pause_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_pause_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_pause_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_search_skill_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill model
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['response_type'] = 'search_skill'
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['query'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['query_type'] = 'natural_language'
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['filter'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['discovery_version'] = '2018-12-03'
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill.from_dict(dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_search_skill_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill.from_dict(dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeSearchSkill(**dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_search_skill_model == dialog_node_output_generic_dialog_node_output_response_type_search_skill_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_search_skill_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_search_skill_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeText:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeText
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_text_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeText
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ dialog_node_output_text_values_element_model = {} # DialogNodeOutputTextValuesElement
+ dialog_node_output_text_values_element_model['text'] = 'testString'
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeText model
+ dialog_node_output_generic_dialog_node_output_response_type_text_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_text_model_json['response_type'] = 'text'
+ dialog_node_output_generic_dialog_node_output_response_type_text_model_json['values'] = [dialog_node_output_text_values_element_model]
+ dialog_node_output_generic_dialog_node_output_response_type_text_model_json['selection_policy'] = 'sequential'
+ dialog_node_output_generic_dialog_node_output_response_type_text_model_json['delimiter'] = '\\n'
+ dialog_node_output_generic_dialog_node_output_response_type_text_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeText by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_text_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeText.from_dict(dialog_node_output_generic_dialog_node_output_response_type_text_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_text_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeText by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_text_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeText.from_dict(dialog_node_output_generic_dialog_node_output_response_type_text_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_text_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeText(**dialog_node_output_generic_dialog_node_output_response_type_text_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_text_model == dialog_node_output_generic_dialog_node_output_response_type_text_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_text_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_text_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_text_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_text_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_user_defined_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined model
+ dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json['response_type'] = 'user_defined'
+ dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_user_defined_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined.from_dict(dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_user_defined_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined.from_dict(dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_user_defined_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined(**dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_user_defined_model == dialog_node_output_generic_dialog_node_output_response_type_user_defined_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_user_defined_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_user_defined_model_json
+
+
+class TestModel_DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo:
+ """
+ Test Class for DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo
+ """
+
+ def test_dialog_node_output_generic_dialog_node_output_response_type_video_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo model
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_json = {}
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_json['response_type'] = 'video'
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_json['source'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_json['title'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_json['description'] = 'testString'
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_json['channels'] = [response_generic_channel_model]
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_json['channel_options'] = {'anyKey': 'anyValue'}
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_json['alt_text'] = 'testString'
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_video_model = DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo.from_dict(dialog_node_output_generic_dialog_node_output_response_type_video_model_json)
+ assert dialog_node_output_generic_dialog_node_output_response_type_video_model != False
+
+ # Construct a model instance of DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo by calling from_dict on the json representation
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_dict = DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo.from_dict(dialog_node_output_generic_dialog_node_output_response_type_video_model_json).__dict__
+ dialog_node_output_generic_dialog_node_output_response_type_video_model2 = DialogNodeOutputGenericDialogNodeOutputResponseTypeVideo(**dialog_node_output_generic_dialog_node_output_response_type_video_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_generic_dialog_node_output_response_type_video_model == dialog_node_output_generic_dialog_node_output_response_type_video_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_generic_dialog_node_output_response_type_video_model_json2 = dialog_node_output_generic_dialog_node_output_response_type_video_model.to_dict()
+ assert dialog_node_output_generic_dialog_node_output_response_type_video_model_json2 == dialog_node_output_generic_dialog_node_output_response_type_video_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeAudio:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeAudio
+ """
+
+ def test_runtime_response_generic_runtime_response_type_audio_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeAudio
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeAudio model
+ runtime_response_generic_runtime_response_type_audio_model_json = {}
+ runtime_response_generic_runtime_response_type_audio_model_json['response_type'] = 'audio'
+ runtime_response_generic_runtime_response_type_audio_model_json['source'] = 'testString'
+ runtime_response_generic_runtime_response_type_audio_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_audio_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_audio_model_json['channels'] = [response_generic_channel_model]
+ runtime_response_generic_runtime_response_type_audio_model_json['channel_options'] = {'anyKey': 'anyValue'}
+ runtime_response_generic_runtime_response_type_audio_model_json['alt_text'] = 'testString'
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeAudio by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_audio_model = RuntimeResponseGenericRuntimeResponseTypeAudio.from_dict(runtime_response_generic_runtime_response_type_audio_model_json)
+ assert runtime_response_generic_runtime_response_type_audio_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeAudio by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_audio_model_dict = RuntimeResponseGenericRuntimeResponseTypeAudio.from_dict(runtime_response_generic_runtime_response_type_audio_model_json).__dict__
+ runtime_response_generic_runtime_response_type_audio_model2 = RuntimeResponseGenericRuntimeResponseTypeAudio(**runtime_response_generic_runtime_response_type_audio_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_audio_model == runtime_response_generic_runtime_response_type_audio_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_audio_model_json2 = runtime_response_generic_runtime_response_type_audio_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_audio_model_json2 == runtime_response_generic_runtime_response_type_audio_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeChannelTransfer:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer
+ """
+
+ def test_runtime_response_generic_runtime_response_type_channel_transfer_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ channel_transfer_target_chat_model = {} # ChannelTransferTargetChat
+ channel_transfer_target_chat_model['url'] = 'testString'
+
+ channel_transfer_target_model = {} # ChannelTransferTarget
+ channel_transfer_target_model['chat'] = channel_transfer_target_chat_model
+
+ channel_transfer_info_model = {} # ChannelTransferInfo
+ channel_transfer_info_model['target'] = channel_transfer_target_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer model
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json = {}
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json['response_type'] = 'channel_transfer'
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json['message_to_user'] = 'testString'
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json['transfer_info'] = channel_transfer_info_model
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeChannelTransfer by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_channel_transfer_model = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer.from_dict(runtime_response_generic_runtime_response_type_channel_transfer_model_json)
+ assert runtime_response_generic_runtime_response_type_channel_transfer_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeChannelTransfer by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_channel_transfer_model_dict = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer.from_dict(runtime_response_generic_runtime_response_type_channel_transfer_model_json).__dict__
+ runtime_response_generic_runtime_response_type_channel_transfer_model2 = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer(**runtime_response_generic_runtime_response_type_channel_transfer_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_channel_transfer_model == runtime_response_generic_runtime_response_type_channel_transfer_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json2 = runtime_response_generic_runtime_response_type_channel_transfer_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_channel_transfer_model_json2 == runtime_response_generic_runtime_response_type_channel_transfer_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeConnectToAgent:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent
+ """
+
+ def test_runtime_response_generic_runtime_response_type_connect_to_agent_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ agent_availability_message_model = {} # AgentAvailabilityMessage
+ agent_availability_message_model['message'] = 'testString'
+
+ dialog_node_output_connect_to_agent_transfer_info_model = {} # DialogNodeOutputConnectToAgentTransferInfo
+ dialog_node_output_connect_to_agent_transfer_info_model['target'] = {'key1': {'anyKey': 'anyValue'}}
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent model
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json = {}
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['response_type'] = 'connect_to_agent'
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['message_to_human_agent'] = 'testString'
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['agent_available'] = agent_availability_message_model
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['agent_unavailable'] = agent_availability_message_model
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['transfer_info'] = dialog_node_output_connect_to_agent_transfer_info_model
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['topic'] = 'testString'
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['dialog_node'] = 'testString'
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConnectToAgent by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_connect_to_agent_model = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.from_dict(runtime_response_generic_runtime_response_type_connect_to_agent_model_json)
+ assert runtime_response_generic_runtime_response_type_connect_to_agent_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConnectToAgent by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_dict = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.from_dict(runtime_response_generic_runtime_response_type_connect_to_agent_model_json).__dict__
+ runtime_response_generic_runtime_response_type_connect_to_agent_model2 = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent(**runtime_response_generic_runtime_response_type_connect_to_agent_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_connect_to_agent_model == runtime_response_generic_runtime_response_type_connect_to_agent_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 = runtime_response_generic_runtime_response_type_connect_to_agent_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 == runtime_response_generic_runtime_response_type_connect_to_agent_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeIframe:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeIframe
+ """
+
+ def test_runtime_response_generic_runtime_response_type_iframe_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeIframe
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeIframe model
+ runtime_response_generic_runtime_response_type_iframe_model_json = {}
+ runtime_response_generic_runtime_response_type_iframe_model_json['response_type'] = 'iframe'
+ runtime_response_generic_runtime_response_type_iframe_model_json['source'] = 'testString'
+ runtime_response_generic_runtime_response_type_iframe_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_iframe_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_iframe_model_json['image_url'] = 'testString'
+ runtime_response_generic_runtime_response_type_iframe_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeIframe by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_iframe_model = RuntimeResponseGenericRuntimeResponseTypeIframe.from_dict(runtime_response_generic_runtime_response_type_iframe_model_json)
+ assert runtime_response_generic_runtime_response_type_iframe_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeIframe by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_iframe_model_dict = RuntimeResponseGenericRuntimeResponseTypeIframe.from_dict(runtime_response_generic_runtime_response_type_iframe_model_json).__dict__
+ runtime_response_generic_runtime_response_type_iframe_model2 = RuntimeResponseGenericRuntimeResponseTypeIframe(**runtime_response_generic_runtime_response_type_iframe_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_iframe_model == runtime_response_generic_runtime_response_type_iframe_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_iframe_model_json2 = runtime_response_generic_runtime_response_type_iframe_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_iframe_model_json2 == runtime_response_generic_runtime_response_type_iframe_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeImage:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeImage
+ """
+
+ def test_runtime_response_generic_runtime_response_type_image_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeImage
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeImage model
+ runtime_response_generic_runtime_response_type_image_model_json = {}
+ runtime_response_generic_runtime_response_type_image_model_json['response_type'] = 'image'
+ runtime_response_generic_runtime_response_type_image_model_json['source'] = 'testString'
+ runtime_response_generic_runtime_response_type_image_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_image_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_image_model_json['channels'] = [response_generic_channel_model]
+ runtime_response_generic_runtime_response_type_image_model_json['alt_text'] = 'testString'
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_image_model = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json)
+ assert runtime_response_generic_runtime_response_type_image_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_image_model_dict = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json).__dict__
+ runtime_response_generic_runtime_response_type_image_model2 = RuntimeResponseGenericRuntimeResponseTypeImage(**runtime_response_generic_runtime_response_type_image_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_image_model == runtime_response_generic_runtime_response_type_image_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_image_model_json2 = runtime_response_generic_runtime_response_type_image_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_image_model_json2 == runtime_response_generic_runtime_response_type_image_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeOption:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeOption
+ """
+
+ def test_runtime_response_generic_runtime_response_type_option_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeOption
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue
+ dialog_node_output_options_element_value_model['input'] = message_input_model
+ dialog_node_output_options_element_value_model['intents'] = [runtime_intent_model]
+ dialog_node_output_options_element_value_model['entities'] = [runtime_entity_model]
+
+ dialog_node_output_options_element_model = {} # DialogNodeOutputOptionsElement
+ dialog_node_output_options_element_model['label'] = 'testString'
+ dialog_node_output_options_element_model['value'] = dialog_node_output_options_element_value_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeOption model
+ runtime_response_generic_runtime_response_type_option_model_json = {}
+ runtime_response_generic_runtime_response_type_option_model_json['response_type'] = 'option'
+ runtime_response_generic_runtime_response_type_option_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_option_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_option_model_json['preference'] = 'dropdown'
+ runtime_response_generic_runtime_response_type_option_model_json['options'] = [dialog_node_output_options_element_model]
+ runtime_response_generic_runtime_response_type_option_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeOption by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_option_model = RuntimeResponseGenericRuntimeResponseTypeOption.from_dict(runtime_response_generic_runtime_response_type_option_model_json)
+ assert runtime_response_generic_runtime_response_type_option_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeOption by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_option_model_dict = RuntimeResponseGenericRuntimeResponseTypeOption.from_dict(runtime_response_generic_runtime_response_type_option_model_json).__dict__
+ runtime_response_generic_runtime_response_type_option_model2 = RuntimeResponseGenericRuntimeResponseTypeOption(**runtime_response_generic_runtime_response_type_option_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_option_model == runtime_response_generic_runtime_response_type_option_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_option_model_json2 = runtime_response_generic_runtime_response_type_option_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_option_model_json2 == runtime_response_generic_runtime_response_type_option_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypePause:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypePause
+ """
+
+ def test_runtime_response_generic_runtime_response_type_pause_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypePause
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypePause model
+ runtime_response_generic_runtime_response_type_pause_model_json = {}
+ runtime_response_generic_runtime_response_type_pause_model_json['response_type'] = 'pause'
+ runtime_response_generic_runtime_response_type_pause_model_json['time'] = 38
+ runtime_response_generic_runtime_response_type_pause_model_json['typing'] = True
+ runtime_response_generic_runtime_response_type_pause_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypePause by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_pause_model = RuntimeResponseGenericRuntimeResponseTypePause.from_dict(runtime_response_generic_runtime_response_type_pause_model_json)
+ assert runtime_response_generic_runtime_response_type_pause_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypePause by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_pause_model_dict = RuntimeResponseGenericRuntimeResponseTypePause.from_dict(runtime_response_generic_runtime_response_type_pause_model_json).__dict__
+ runtime_response_generic_runtime_response_type_pause_model2 = RuntimeResponseGenericRuntimeResponseTypePause(**runtime_response_generic_runtime_response_type_pause_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_pause_model == runtime_response_generic_runtime_response_type_pause_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_pause_model_json2 = runtime_response_generic_runtime_response_type_pause_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_pause_model_json2 == runtime_response_generic_runtime_response_type_pause_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeSuggestion:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeSuggestion
+ """
+
+ def test_runtime_response_generic_runtime_response_type_suggestion_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeSuggestion
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_model = {} # MessageInput
+ message_input_model['text'] = 'testString'
+ message_input_model['spelling_suggestions'] = False
+ message_input_model['spelling_auto_correct'] = False
+ message_input_model['foo'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+
+ dialog_suggestion_value_model = {} # DialogSuggestionValue
+ dialog_suggestion_value_model['input'] = message_input_model
+ dialog_suggestion_value_model['intents'] = [runtime_intent_model]
+ dialog_suggestion_value_model['entities'] = [runtime_entity_model]
+
+ dialog_suggestion_model = {} # DialogSuggestion
+ dialog_suggestion_model['label'] = 'testString'
+ dialog_suggestion_model['value'] = dialog_suggestion_value_model
+ dialog_suggestion_model['output'] = {'anyKey': 'anyValue'}
+ dialog_suggestion_model['dialog_node'] = 'testString'
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeSuggestion model
+ runtime_response_generic_runtime_response_type_suggestion_model_json = {}
+ runtime_response_generic_runtime_response_type_suggestion_model_json['response_type'] = 'suggestion'
+ runtime_response_generic_runtime_response_type_suggestion_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_suggestion_model_json['suggestions'] = [dialog_suggestion_model]
+ runtime_response_generic_runtime_response_type_suggestion_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSuggestion by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_suggestion_model = RuntimeResponseGenericRuntimeResponseTypeSuggestion.from_dict(runtime_response_generic_runtime_response_type_suggestion_model_json)
+ assert runtime_response_generic_runtime_response_type_suggestion_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSuggestion by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_suggestion_model_dict = RuntimeResponseGenericRuntimeResponseTypeSuggestion.from_dict(runtime_response_generic_runtime_response_type_suggestion_model_json).__dict__
+ runtime_response_generic_runtime_response_type_suggestion_model2 = RuntimeResponseGenericRuntimeResponseTypeSuggestion(**runtime_response_generic_runtime_response_type_suggestion_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_suggestion_model == runtime_response_generic_runtime_response_type_suggestion_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_suggestion_model_json2 = runtime_response_generic_runtime_response_type_suggestion_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_suggestion_model_json2 == runtime_response_generic_runtime_response_type_suggestion_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeText:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeText
+ """
+
+ def test_runtime_response_generic_runtime_response_type_text_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeText
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeText model
+ runtime_response_generic_runtime_response_type_text_model_json = {}
+ runtime_response_generic_runtime_response_type_text_model_json['response_type'] = 'text'
+ runtime_response_generic_runtime_response_type_text_model_json['text'] = 'testString'
+ runtime_response_generic_runtime_response_type_text_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeText by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_text_model = RuntimeResponseGenericRuntimeResponseTypeText.from_dict(runtime_response_generic_runtime_response_type_text_model_json)
+ assert runtime_response_generic_runtime_response_type_text_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeText by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_text_model_dict = RuntimeResponseGenericRuntimeResponseTypeText.from_dict(runtime_response_generic_runtime_response_type_text_model_json).__dict__
+ runtime_response_generic_runtime_response_type_text_model2 = RuntimeResponseGenericRuntimeResponseTypeText(**runtime_response_generic_runtime_response_type_text_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_text_model == runtime_response_generic_runtime_response_type_text_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_text_model_json2 = runtime_response_generic_runtime_response_type_text_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_text_model_json2 == runtime_response_generic_runtime_response_type_text_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeUserDefined:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeUserDefined
+ """
+
+ def test_runtime_response_generic_runtime_response_type_user_defined_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeUserDefined
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeUserDefined model
+ runtime_response_generic_runtime_response_type_user_defined_model_json = {}
+ runtime_response_generic_runtime_response_type_user_defined_model_json['response_type'] = 'user_defined'
+ runtime_response_generic_runtime_response_type_user_defined_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ runtime_response_generic_runtime_response_type_user_defined_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeUserDefined by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_user_defined_model = RuntimeResponseGenericRuntimeResponseTypeUserDefined.from_dict(runtime_response_generic_runtime_response_type_user_defined_model_json)
+ assert runtime_response_generic_runtime_response_type_user_defined_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeUserDefined by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_user_defined_model_dict = RuntimeResponseGenericRuntimeResponseTypeUserDefined.from_dict(runtime_response_generic_runtime_response_type_user_defined_model_json).__dict__
+ runtime_response_generic_runtime_response_type_user_defined_model2 = RuntimeResponseGenericRuntimeResponseTypeUserDefined(**runtime_response_generic_runtime_response_type_user_defined_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_user_defined_model == runtime_response_generic_runtime_response_type_user_defined_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_user_defined_model_json2 = runtime_response_generic_runtime_response_type_user_defined_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_user_defined_model_json2 == runtime_response_generic_runtime_response_type_user_defined_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeVideo:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeVideo
+ """
+
+ def test_runtime_response_generic_runtime_response_type_video_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeVideo
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'chat'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeVideo model
+ runtime_response_generic_runtime_response_type_video_model_json = {}
+ runtime_response_generic_runtime_response_type_video_model_json['response_type'] = 'video'
+ runtime_response_generic_runtime_response_type_video_model_json['source'] = 'testString'
+ runtime_response_generic_runtime_response_type_video_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_video_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_video_model_json['channels'] = [response_generic_channel_model]
+ runtime_response_generic_runtime_response_type_video_model_json['channel_options'] = {'anyKey': 'anyValue'}
+ runtime_response_generic_runtime_response_type_video_model_json['alt_text'] = 'testString'
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeVideo by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_video_model = RuntimeResponseGenericRuntimeResponseTypeVideo.from_dict(runtime_response_generic_runtime_response_type_video_model_json)
+ assert runtime_response_generic_runtime_response_type_video_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeVideo by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_video_model_dict = RuntimeResponseGenericRuntimeResponseTypeVideo.from_dict(runtime_response_generic_runtime_response_type_video_model_json).__dict__
+ runtime_response_generic_runtime_response_type_video_model2 = RuntimeResponseGenericRuntimeResponseTypeVideo(**runtime_response_generic_runtime_response_type_video_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_video_model == runtime_response_generic_runtime_response_type_video_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_video_model_json2 = runtime_response_generic_runtime_response_type_video_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_video_model_json2 == runtime_response_generic_runtime_response_type_video_model_json
+
+
+# endregion
+##############################################################################
+# End of Model Tests
+##############################################################################
diff --git a/test/unit/test_assistant_v2.py b/test/unit/test_assistant_v2.py
index 9931c0071..f2ab84821 100644
--- a/test/unit/test_assistant_v2.py
+++ b/test/unit/test_assistant_v2.py
@@ -1,84 +1,16831 @@
-# coding: utf-8
+# -*- coding: utf-8 -*-
+# (C) Copyright IBM Corp. 2019, 2026.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit Tests for AssistantV2
+"""
+
+from datetime import datetime, timezone
+from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
+from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime
+import inspect
+import io
import json
+import pytest
+import re
+import requests
import responses
-import ibm_watson
-
-platform_url = 'https://gateway.watsonplatform.net'
-service_path = '/assistant/api'
-base_url = '{0}{1}'.format(platform_url, service_path)
-
-@responses.activate
-def test_create_session():
- endpoint = '/v2/assistants/{0}/sessions'.format('bogus_id')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {'session_id': 'session_id'}
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV2(
- username='username', password='password', version='2017-02-03')
- session = service.create_session('bogus_id').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert session == response
-
-
-@responses.activate
-def test_delete_session():
- endpoint = '/v2/assistants/{0}/sessions/{1}'.format('bogus_id',
- 'session_id')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {}
- responses.add(
- responses.DELETE,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV2(
- username='username', password='password', version='2017-02-03')
- delete_session = service.delete_session('bogus_id',
- 'session_id').get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert delete_session == response
-
-
-@responses.activate
-def test_message():
- endpoint = '/v2/assistants/{0}/sessions/{1}/message'.format(
- 'bogus_id', 'session_id')
- url = '{0}{1}'.format(base_url, endpoint)
- response = {
- 'output': {
- 'generic': [{
- 'text':
- 'I did not understand that. I can help you get pizza, tell a joke or find a movie.',
- 'response_type':
- 'text'
- }],
- 'entities': [],
- 'intents': [{
- 'confidence': 0.8521236419677736,
- 'intent': 'Weather'
- }]
- }
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- service = ibm_watson.AssistantV2(
- username='username', password='password', version='2017-02-03')
- message = service.message(
- 'bogus_id', 'session_id', input={
- 'text': 'What\'s the weather like?'
- }).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert message == response
+import tempfile
+import urllib
+from ibm_watson.assistant_v2 import *
+
+version = 'testString'
+
+_service = AssistantV2(
+ authenticator=NoAuthAuthenticator(),
+ version=version,
+)
+
+_base_url = 'https://api.us-south.assistant.watson.cloud.ibm.com'
+_service.set_service_url(_base_url)
+
+
+def preprocess_url(operation_path: str):
+ """
+ Returns the request url associated with the specified operation path.
+ This will be base_url concatenated with a quoted version of operation_path.
+ The returned request URL is used to register the mock response so it needs
+ to match the request URL that is formed by the requests library.
+ """
+
+ # Form the request URL from the base URL and operation path.
+ request_url = _base_url + operation_path
+
+ # If the request url does NOT end with a /, then just return it as-is.
+ # Otherwise, return a regular expression that matches one or more trailing /.
+ if not request_url.endswith('/'):
+ return request_url
+ return re.compile(request_url.rstrip('/') + '/+')
+
+
+##############################################################################
+# Start of Service: ConversationalSkillProviders
+##############################################################################
+# region
+
+
+class TestCreateProvider:
+ """
+ Test Class for create_provider
+ """
+
+ @responses.activate
+ def test_create_provider_all_params(self):
+ """
+ create_provider()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/providers')
+ mock_response = '{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a ProviderSpecificationServersItem model
+ provider_specification_servers_item_model = {}
+ provider_specification_servers_item_model['url'] = 'testString'
+
+ # Construct a dict representation of a ProviderAuthenticationTypeAndValue model
+ provider_authentication_type_and_value_model = {}
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemesBasic model
+ provider_specification_components_security_schemes_basic_model = {}
+ provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2PasswordUsername model
+ provider_authentication_o_auth2_password_username_model = {}
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model
+ provider_authentication_o_auth2_flows_model = {}
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2 model
+ provider_authentication_o_auth2_model = {}
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemes model
+ provider_specification_components_security_schemes_model = {}
+ provider_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model
+ provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ # Construct a dict representation of a ProviderSpecificationComponents model
+ provider_specification_components_model = {}
+ provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model
+
+ # Construct a dict representation of a ProviderSpecification model
+ provider_specification_model = {}
+ provider_specification_model['servers'] = [provider_specification_servers_item_model]
+ provider_specification_model['components'] = provider_specification_components_model
+
+ # Construct a dict representation of a ProviderPrivateAuthenticationBearerFlow model
+ provider_private_authentication_model = {}
+ provider_private_authentication_model['token'] = provider_authentication_type_and_value_model
+
+ # Construct a dict representation of a ProviderPrivate model
+ provider_private_model = {}
+ provider_private_model['authentication'] = provider_private_authentication_model
+
+ # Set up parameter values
+ provider_id = 'testString'
+ specification = provider_specification_model
+ private = provider_private_model
+
+ # Invoke method
+ response = _service.create_provider(
+ provider_id,
+ specification,
+ private,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['provider_id'] == 'testString'
+ assert req_body['specification'] == provider_specification_model
+ assert req_body['private'] == provider_private_model
+
+ def test_create_provider_all_params_with_retries(self):
+ # Enable retries and run test_create_provider_all_params.
+ _service.enable_retries()
+ self.test_create_provider_all_params()
+
+ # Disable retries and run test_create_provider_all_params.
+ _service.disable_retries()
+ self.test_create_provider_all_params()
+
+ @responses.activate
+ def test_create_provider_value_error(self):
+ """
+ test_create_provider_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/providers')
+ mock_response = '{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a ProviderSpecificationServersItem model
+ provider_specification_servers_item_model = {}
+ provider_specification_servers_item_model['url'] = 'testString'
+
+ # Construct a dict representation of a ProviderAuthenticationTypeAndValue model
+ provider_authentication_type_and_value_model = {}
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemesBasic model
+ provider_specification_components_security_schemes_basic_model = {}
+ provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2PasswordUsername model
+ provider_authentication_o_auth2_password_username_model = {}
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model
+ provider_authentication_o_auth2_flows_model = {}
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2 model
+ provider_authentication_o_auth2_model = {}
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemes model
+ provider_specification_components_security_schemes_model = {}
+ provider_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model
+ provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ # Construct a dict representation of a ProviderSpecificationComponents model
+ provider_specification_components_model = {}
+ provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model
+
+ # Construct a dict representation of a ProviderSpecification model
+ provider_specification_model = {}
+ provider_specification_model['servers'] = [provider_specification_servers_item_model]
+ provider_specification_model['components'] = provider_specification_components_model
+
+ # Construct a dict representation of a ProviderPrivateAuthenticationBearerFlow model
+ provider_private_authentication_model = {}
+ provider_private_authentication_model['token'] = provider_authentication_type_and_value_model
+
+ # Construct a dict representation of a ProviderPrivate model
+ provider_private_model = {}
+ provider_private_model['authentication'] = provider_private_authentication_model
+
+ # Set up parameter values
+ provider_id = 'testString'
+ specification = provider_specification_model
+ private = provider_private_model
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "provider_id": provider_id,
+ "specification": specification,
+ "private": private,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_provider(**req_copy)
+
+ def test_create_provider_value_error_with_retries(self):
+ # Enable retries and run test_create_provider_value_error.
+ _service.enable_retries()
+ self.test_create_provider_value_error()
+
+ # Disable retries and run test_create_provider_value_error.
+ _service.disable_retries()
+ self.test_create_provider_value_error()
+
+
+class TestListProviders:
+ """
+ Test Class for list_providers
+ """
+
+ @responses.activate
+ def test_list_providers_all_params(self):
+ """
+ list_providers()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/providers')
+ mock_response = '{"conversational_skill_providers": [{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ page_limit = 100
+ include_count = False
+ sort = 'name'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_providers(
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_providers_all_params_with_retries(self):
+ # Enable retries and run test_list_providers_all_params.
+ _service.enable_retries()
+ self.test_list_providers_all_params()
+
+ # Disable retries and run test_list_providers_all_params.
+ _service.disable_retries()
+ self.test_list_providers_all_params()
+
+ @responses.activate
+ def test_list_providers_required_params(self):
+ """
+ test_list_providers_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/providers')
+ mock_response = '{"conversational_skill_providers": [{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_providers()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_providers_required_params_with_retries(self):
+ # Enable retries and run test_list_providers_required_params.
+ _service.enable_retries()
+ self.test_list_providers_required_params()
+
+ # Disable retries and run test_list_providers_required_params.
+ _service.disable_retries()
+ self.test_list_providers_required_params()
+
+ @responses.activate
+ def test_list_providers_value_error(self):
+ """
+ test_list_providers_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/providers')
+ mock_response = '{"conversational_skill_providers": [{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_providers(**req_copy)
+
+ def test_list_providers_value_error_with_retries(self):
+ # Enable retries and run test_list_providers_value_error.
+ _service.enable_retries()
+ self.test_list_providers_value_error()
+
+ # Disable retries and run test_list_providers_value_error.
+ _service.disable_retries()
+ self.test_list_providers_value_error()
+
+
+class TestUpdateProvider:
+ """
+ Test Class for update_provider
+ """
+
+ @responses.activate
+ def test_update_provider_all_params(self):
+ """
+ update_provider()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/providers/testString')
+ mock_response = '{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a ProviderSpecificationServersItem model
+ provider_specification_servers_item_model = {}
+ provider_specification_servers_item_model['url'] = 'testString'
+
+ # Construct a dict representation of a ProviderAuthenticationTypeAndValue model
+ provider_authentication_type_and_value_model = {}
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemesBasic model
+ provider_specification_components_security_schemes_basic_model = {}
+ provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2PasswordUsername model
+ provider_authentication_o_auth2_password_username_model = {}
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model
+ provider_authentication_o_auth2_flows_model = {}
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2 model
+ provider_authentication_o_auth2_model = {}
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemes model
+ provider_specification_components_security_schemes_model = {}
+ provider_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model
+ provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ # Construct a dict representation of a ProviderSpecificationComponents model
+ provider_specification_components_model = {}
+ provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model
+
+ # Construct a dict representation of a ProviderSpecification model
+ provider_specification_model = {}
+ provider_specification_model['servers'] = [provider_specification_servers_item_model]
+ provider_specification_model['components'] = provider_specification_components_model
+
+ # Construct a dict representation of a ProviderPrivateAuthenticationBearerFlow model
+ provider_private_authentication_model = {}
+ provider_private_authentication_model['token'] = provider_authentication_type_and_value_model
+
+ # Construct a dict representation of a ProviderPrivate model
+ provider_private_model = {}
+ provider_private_model['authentication'] = provider_private_authentication_model
+
+ # Set up parameter values
+ provider_id = 'testString'
+ specification = provider_specification_model
+ private = provider_private_model
+
+ # Invoke method
+ response = _service.update_provider(
+ provider_id,
+ specification,
+ private,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['specification'] == provider_specification_model
+ assert req_body['private'] == provider_private_model
+
+ def test_update_provider_all_params_with_retries(self):
+ # Enable retries and run test_update_provider_all_params.
+ _service.enable_retries()
+ self.test_update_provider_all_params()
+
+ # Disable retries and run test_update_provider_all_params.
+ _service.disable_retries()
+ self.test_update_provider_all_params()
+
+ @responses.activate
+ def test_update_provider_value_error(self):
+ """
+ test_update_provider_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/providers/testString')
+ mock_response = '{"provider_id": "provider_id", "specification": {"servers": [{"url": "url"}], "components": {"securitySchemes": {"authentication_method": "basic", "basic": {"username": {"type": "value", "value": "value"}}, "oauth2": {"preferred_flow": "password", "flows": {"token_url": "token_url", "refresh_url": "refresh_url", "client_auth_type": "Body", "content_type": "content_type", "header_prefix": "header_prefix", "username": {"type": "value", "value": "value"}}}}}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a ProviderSpecificationServersItem model
+ provider_specification_servers_item_model = {}
+ provider_specification_servers_item_model['url'] = 'testString'
+
+ # Construct a dict representation of a ProviderAuthenticationTypeAndValue model
+ provider_authentication_type_and_value_model = {}
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemesBasic model
+ provider_specification_components_security_schemes_basic_model = {}
+ provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2PasswordUsername model
+ provider_authentication_o_auth2_password_username_model = {}
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model
+ provider_authentication_o_auth2_flows_model = {}
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ # Construct a dict representation of a ProviderAuthenticationOAuth2 model
+ provider_authentication_o_auth2_model = {}
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ # Construct a dict representation of a ProviderSpecificationComponentsSecuritySchemes model
+ provider_specification_components_security_schemes_model = {}
+ provider_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model
+ provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ # Construct a dict representation of a ProviderSpecificationComponents model
+ provider_specification_components_model = {}
+ provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model
+
+ # Construct a dict representation of a ProviderSpecification model
+ provider_specification_model = {}
+ provider_specification_model['servers'] = [provider_specification_servers_item_model]
+ provider_specification_model['components'] = provider_specification_components_model
+
+ # Construct a dict representation of a ProviderPrivateAuthenticationBearerFlow model
+ provider_private_authentication_model = {}
+ provider_private_authentication_model['token'] = provider_authentication_type_and_value_model
+
+ # Construct a dict representation of a ProviderPrivate model
+ provider_private_model = {}
+ provider_private_model['authentication'] = provider_private_authentication_model
+
+ # Set up parameter values
+ provider_id = 'testString'
+ specification = provider_specification_model
+ private = provider_private_model
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "provider_id": provider_id,
+ "specification": specification,
+ "private": private,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_provider(**req_copy)
+
+ def test_update_provider_value_error_with_retries(self):
+ # Enable retries and run test_update_provider_value_error.
+ _service.enable_retries()
+ self.test_update_provider_value_error()
+
+ # Disable retries and run test_update_provider_value_error.
+ _service.disable_retries()
+ self.test_update_provider_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: ConversationalSkillProviders
+##############################################################################
+
+##############################################################################
+# Start of Service: Assistants
+##############################################################################
+# region
+
+
+class TestCreateAssistant:
+ """
+ Test Class for create_assistant
+ """
+
+ @responses.activate
+ def test_create_assistant_all_params(self):
+ """
+ create_assistant()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants')
+ mock_response = '{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ language = 'testString'
+ name = 'testString'
+ description = 'testString'
+
+ # Invoke method
+ response = _service.create_assistant(
+ language=language,
+ name=name,
+ description=description,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['language'] == 'testString'
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+
+ def test_create_assistant_all_params_with_retries(self):
+ # Enable retries and run test_create_assistant_all_params.
+ _service.enable_retries()
+ self.test_create_assistant_all_params()
+
+ # Disable retries and run test_create_assistant_all_params.
+ _service.disable_retries()
+ self.test_create_assistant_all_params()
+
+ @responses.activate
+ def test_create_assistant_required_params(self):
+ """
+ test_create_assistant_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants')
+ mock_response = '{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.create_assistant()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_create_assistant_required_params_with_retries(self):
+ # Enable retries and run test_create_assistant_required_params.
+ _service.enable_retries()
+ self.test_create_assistant_required_params()
+
+ # Disable retries and run test_create_assistant_required_params.
+ _service.disable_retries()
+ self.test_create_assistant_required_params()
+
+ @responses.activate
+ def test_create_assistant_value_error(self):
+ """
+ test_create_assistant_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants')
+ mock_response = '{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_assistant(**req_copy)
+
+ def test_create_assistant_value_error_with_retries(self):
+ # Enable retries and run test_create_assistant_value_error.
+ _service.enable_retries()
+ self.test_create_assistant_value_error()
+
+ # Disable retries and run test_create_assistant_value_error.
+ _service.disable_retries()
+ self.test_create_assistant_value_error()
+
+
+class TestListAssistants:
+ """
+ Test Class for list_assistants
+ """
+
+ @responses.activate
+ def test_list_assistants_all_params(self):
+ """
+ list_assistants()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants')
+ mock_response = '{"assistants": [{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ page_limit = 100
+ include_count = False
+ sort = 'name'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_assistants(
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_assistants_all_params_with_retries(self):
+ # Enable retries and run test_list_assistants_all_params.
+ _service.enable_retries()
+ self.test_list_assistants_all_params()
+
+ # Disable retries and run test_list_assistants_all_params.
+ _service.disable_retries()
+ self.test_list_assistants_all_params()
+
+ @responses.activate
+ def test_list_assistants_required_params(self):
+ """
+ test_list_assistants_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants')
+ mock_response = '{"assistants": [{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_assistants()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_assistants_required_params_with_retries(self):
+ # Enable retries and run test_list_assistants_required_params.
+ _service.enable_retries()
+ self.test_list_assistants_required_params()
+
+ # Disable retries and run test_list_assistants_required_params.
+ _service.disable_retries()
+ self.test_list_assistants_required_params()
+
+ @responses.activate
+ def test_list_assistants_value_error(self):
+ """
+ test_list_assistants_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants')
+ mock_response = '{"assistants": [{"assistant_id": "assistant_id", "name": "name", "description": "description", "language": "language", "assistant_skills": [{"skill_id": "skill_id", "type": "dialog"}], "assistant_environments": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}]}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_assistants(**req_copy)
+
+ def test_list_assistants_value_error_with_retries(self):
+ # Enable retries and run test_list_assistants_value_error.
+ _service.enable_retries()
+ self.test_list_assistants_value_error()
+
+ # Disable retries and run test_list_assistants_value_error.
+ _service.disable_retries()
+ self.test_list_assistants_value_error()
+
+
+class TestDeleteAssistant:
+ """
+ Test Class for delete_assistant
+ """
+
+ @responses.activate
+ def test_delete_assistant_all_params(self):
+ """
+ delete_assistant()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_assistant(
+ assistant_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_assistant_all_params_with_retries(self):
+ # Enable retries and run test_delete_assistant_all_params.
+ _service.enable_retries()
+ self.test_delete_assistant_all_params()
+
+ # Disable retries and run test_delete_assistant_all_params.
+ _service.disable_retries()
+ self.test_delete_assistant_all_params()
+
+ @responses.activate
+ def test_delete_assistant_value_error(self):
+ """
+ test_delete_assistant_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_assistant(**req_copy)
+
+ def test_delete_assistant_value_error_with_retries(self):
+ # Enable retries and run test_delete_assistant_value_error.
+ _service.enable_retries()
+ self.test_delete_assistant_value_error()
+
+ # Disable retries and run test_delete_assistant_value_error.
+ _service.disable_retries()
+ self.test_delete_assistant_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Assistants
+##############################################################################
+
+##############################################################################
+# Start of Service: Sessions
+##############################################################################
+# region
+
+
+class TestCreateSession:
+ """
+ Test Class for create_session
+ """
+
+ @responses.activate
+ def test_create_session_all_params(self):
+ """
+ create_session()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions')
+ mock_response = '{"session_id": "session_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a RequestAnalytics model
+ request_analytics_model = {}
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ analytics = request_analytics_model
+
+ # Invoke method
+ response = _service.create_session(
+ assistant_id,
+ environment_id,
+ analytics=analytics,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['analytics'] == request_analytics_model
+
+ def test_create_session_all_params_with_retries(self):
+ # Enable retries and run test_create_session_all_params.
+ _service.enable_retries()
+ self.test_create_session_all_params()
+
+ # Disable retries and run test_create_session_all_params.
+ _service.disable_retries()
+ self.test_create_session_all_params()
+
+ @responses.activate
+ def test_create_session_required_params(self):
+ """
+ test_create_session_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions')
+ mock_response = '{"session_id": "session_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Invoke method
+ response = _service.create_session(
+ assistant_id,
+ environment_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_session_required_params_with_retries(self):
+ # Enable retries and run test_create_session_required_params.
+ _service.enable_retries()
+ self.test_create_session_required_params()
+
+ # Disable retries and run test_create_session_required_params.
+ _service.disable_retries()
+ self.test_create_session_required_params()
+
+ @responses.activate
+ def test_create_session_value_error(self):
+ """
+ test_create_session_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions')
+ mock_response = '{"session_id": "session_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "environment_id": environment_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_session(**req_copy)
+
+ def test_create_session_value_error_with_retries(self):
+ # Enable retries and run test_create_session_value_error.
+ _service.enable_retries()
+ self.test_create_session_value_error()
+
+ # Disable retries and run test_create_session_value_error.
+ _service.disable_retries()
+ self.test_create_session_value_error()
+
+
+class TestDeleteSession:
+ """
+ Test Class for delete_session
+ """
+
+ @responses.activate
+ def test_delete_session_all_params(self):
+ """
+ delete_session()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ session_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_session(
+ assistant_id,
+ environment_id,
+ session_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_session_all_params_with_retries(self):
+ # Enable retries and run test_delete_session_all_params.
+ _service.enable_retries()
+ self.test_delete_session_all_params()
+
+ # Disable retries and run test_delete_session_all_params.
+ _service.disable_retries()
+ self.test_delete_session_all_params()
+
+ @responses.activate
+ def test_delete_session_value_error(self):
+ """
+ test_delete_session_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ session_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "environment_id": environment_id,
+ "session_id": session_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_session(**req_copy)
+
+ def test_delete_session_value_error_with_retries(self):
+ # Enable retries and run test_delete_session_value_error.
+ _service.enable_retries()
+ self.test_delete_session_value_error()
+
+ # Disable retries and run test_delete_session_value_error.
+ _service.disable_retries()
+ self.test_delete_session_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Sessions
+##############################################################################
+
+##############################################################################
+# Start of Service: Message
+##############################################################################
+# region
+
+
+class TestMessage:
+ """
+ Test Class for message
+ """
+
+ @responses.activate
+ def test_message_all_params(self):
+ """
+ message()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message')
+ mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id", "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a RuntimeIntent model
+ runtime_intent_model = {}
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ # Construct a dict representation of a CaptureGroup model
+ capture_group_model = {}
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ # Construct a dict representation of a RuntimeEntityInterpretation model
+ runtime_entity_interpretation_model = {}
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ # Construct a dict representation of a RuntimeEntityAlternative model
+ runtime_entity_alternative_model = {}
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ # Construct a dict representation of a RuntimeEntityRole model
+ runtime_entity_role_model = {}
+ runtime_entity_role_model['type'] = 'date_from'
+
+ # Construct a dict representation of a RuntimeEntity model
+ runtime_entity_model = {}
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ # Construct a dict representation of a MessageInputAttachment model
+ message_input_attachment_model = {}
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ # Construct a dict representation of a RequestAnalytics model
+ request_analytics_model = {}
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ # Construct a dict representation of a MessageInputOptionsSpelling model
+ message_input_options_spelling_model = {}
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ # Construct a dict representation of a MessageInputOptions model
+ message_input_options_model = {}
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ # Construct a dict representation of a MessageInput model
+ message_input_model = {}
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ # Construct a dict representation of a MessageContextGlobalSystem model
+ message_context_global_system_model = {}
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ # Construct a dict representation of a MessageContextGlobal model
+ message_context_global_model = {}
+ message_context_global_model['system'] = message_context_global_system_model
+
+ # Construct a dict representation of a MessageContextSkillSystem model
+ message_context_skill_system_model = {}
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ # Construct a dict representation of a MessageContextDialogSkill model
+ message_context_dialog_skill_model = {}
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ # Construct a dict representation of a MessageContextActionSkill model
+ message_context_action_skill_model = {}
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ # Construct a dict representation of a MessageContextSkills model
+ message_context_skills_model = {}
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ # Construct a dict representation of a MessageContext model
+ message_context_model = {}
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ session_id = 'testString'
+ input = message_input_model
+ context = message_context_model
+ user_id = 'testString'
+
+ # Invoke method
+ response = _service.message(
+ assistant_id,
+ environment_id,
+ session_id,
+ input=input,
+ context=context,
+ user_id=user_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['input'] == message_input_model
+ assert req_body['context'] == message_context_model
+ assert req_body['user_id'] == 'testString'
+
+ def test_message_all_params_with_retries(self):
+ # Enable retries and run test_message_all_params.
+ _service.enable_retries()
+ self.test_message_all_params()
+
+ # Disable retries and run test_message_all_params.
+ _service.disable_retries()
+ self.test_message_all_params()
+
+ @responses.activate
+ def test_message_required_params(self):
+ """
+ test_message_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message')
+ mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id", "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ session_id = 'testString'
+
+ # Invoke method
+ response = _service.message(
+ assistant_id,
+ environment_id,
+ session_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_message_required_params_with_retries(self):
+ # Enable retries and run test_message_required_params.
+ _service.enable_retries()
+ self.test_message_required_params()
+
+ # Disable retries and run test_message_required_params.
+ _service.disable_retries()
+ self.test_message_required_params()
+
+ @responses.activate
+ def test_message_value_error(self):
+ """
+ test_message_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message')
+ mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id", "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ session_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "environment_id": environment_id,
+ "session_id": session_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.message(**req_copy)
+
+ def test_message_value_error_with_retries(self):
+ # Enable retries and run test_message_value_error.
+ _service.enable_retries()
+ self.test_message_value_error()
+
+ # Disable retries and run test_message_value_error.
+ _service.disable_retries()
+ self.test_message_value_error()
+
+
+class TestMessageStateless:
+ """
+ Test Class for message_stateless
+ """
+
+ @responses.activate
+ def test_message_stateless_all_params(self):
+ """
+ message_stateless()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/message')
+ mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}, "private_action_variables": {"anyKey": "anyValue"}, "private_skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "user_id": "user_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a RuntimeIntent model
+ runtime_intent_model = {}
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ # Construct a dict representation of a CaptureGroup model
+ capture_group_model = {}
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ # Construct a dict representation of a RuntimeEntityInterpretation model
+ runtime_entity_interpretation_model = {}
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ # Construct a dict representation of a RuntimeEntityAlternative model
+ runtime_entity_alternative_model = {}
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ # Construct a dict representation of a RuntimeEntityRole model
+ runtime_entity_role_model = {}
+ runtime_entity_role_model['type'] = 'date_from'
+
+ # Construct a dict representation of a RuntimeEntity model
+ runtime_entity_model = {}
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ # Construct a dict representation of a MessageInputAttachment model
+ message_input_attachment_model = {}
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ # Construct a dict representation of a RequestAnalytics model
+ request_analytics_model = {}
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ # Construct a dict representation of a MessageInputOptionsSpelling model
+ message_input_options_spelling_model = {}
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ # Construct a dict representation of a StatelessMessageInputOptions model
+ stateless_message_input_options_model = {}
+ stateless_message_input_options_model['restart'] = False
+ stateless_message_input_options_model['alternate_intents'] = False
+ stateless_message_input_options_model['async_callout'] = False
+ stateless_message_input_options_model['spelling'] = message_input_options_spelling_model
+ stateless_message_input_options_model['debug'] = False
+
+ # Construct a dict representation of a StatelessMessageInput model
+ stateless_message_input_model = {}
+ stateless_message_input_model['message_type'] = 'text'
+ stateless_message_input_model['text'] = 'testString'
+ stateless_message_input_model['intents'] = [runtime_intent_model]
+ stateless_message_input_model['entities'] = [runtime_entity_model]
+ stateless_message_input_model['suggestion_id'] = 'testString'
+ stateless_message_input_model['attachments'] = [message_input_attachment_model]
+ stateless_message_input_model['analytics'] = request_analytics_model
+ stateless_message_input_model['options'] = stateless_message_input_options_model
+
+ # Construct a dict representation of a MessageContextGlobalSystem model
+ message_context_global_system_model = {}
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ # Construct a dict representation of a StatelessMessageContextGlobal model
+ stateless_message_context_global_model = {}
+ stateless_message_context_global_model['system'] = message_context_global_system_model
+ stateless_message_context_global_model['session_id'] = 'testString'
+
+ # Construct a dict representation of a MessageContextSkillSystem model
+ message_context_skill_system_model = {}
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ # Construct a dict representation of a MessageContextDialogSkill model
+ message_context_dialog_skill_model = {}
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ # Construct a dict representation of a StatelessMessageContextSkillsActionsSkill model
+ stateless_message_context_skills_actions_skill_model = {}
+ stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model
+ stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'}
+
+ # Construct a dict representation of a StatelessMessageContextSkills model
+ stateless_message_context_skills_model = {}
+ stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model
+
+ # Construct a dict representation of a StatelessMessageContext model
+ stateless_message_context_model = {}
+ stateless_message_context_model['global'] = stateless_message_context_global_model
+ stateless_message_context_model['skills'] = stateless_message_context_skills_model
+ stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ input = stateless_message_input_model
+ context = stateless_message_context_model
+ user_id = 'testString'
+
+ # Invoke method
+ response = _service.message_stateless(
+ assistant_id,
+ environment_id,
+ input=input,
+ context=context,
+ user_id=user_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['input'] == stateless_message_input_model
+ assert req_body['context'] == stateless_message_context_model
+ assert req_body['user_id'] == 'testString'
+
+ def test_message_stateless_all_params_with_retries(self):
+ # Enable retries and run test_message_stateless_all_params.
+ _service.enable_retries()
+ self.test_message_stateless_all_params()
+
+ # Disable retries and run test_message_stateless_all_params.
+ _service.disable_retries()
+ self.test_message_stateless_all_params()
+
+ @responses.activate
+ def test_message_stateless_required_params(self):
+ """
+ test_message_stateless_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/message')
+ mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}, "private_action_variables": {"anyKey": "anyValue"}, "private_skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "user_id": "user_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Invoke method
+ response = _service.message_stateless(
+ assistant_id,
+ environment_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_message_stateless_required_params_with_retries(self):
+ # Enable retries and run test_message_stateless_required_params.
+ _service.enable_retries()
+ self.test_message_stateless_required_params()
+
+ # Disable retries and run test_message_stateless_required_params.
+ _service.disable_retries()
+ self.test_message_stateless_required_params()
+
+ @responses.activate
+ def test_message_stateless_value_error(self):
+ """
+ test_message_stateless_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/message')
+ mock_response = '{"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}, "private_action_variables": {"anyKey": "anyValue"}, "private_skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "masked_output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "masked_input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "user_id": "user_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "environment_id": environment_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.message_stateless(**req_copy)
+
+ def test_message_stateless_value_error_with_retries(self):
+ # Enable retries and run test_message_stateless_value_error.
+ _service.enable_retries()
+ self.test_message_stateless_value_error()
+
+ # Disable retries and run test_message_stateless_value_error.
+ _service.disable_retries()
+ self.test_message_stateless_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Message
+##############################################################################
+
+##############################################################################
+# Start of Service: MessageStream
+##############################################################################
+# region
+
+
+class TestMessageStream:
+ """
+ Test Class for message_stream
+ """
+
+ @responses.activate
+ def test_message_stream_all_params(self):
+ """
+ message_stream()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message_stream')
+ mock_response = 'This is a mock binary response.'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='text/event-stream',
+ status=200,
+ )
+
+ # Construct a dict representation of a RuntimeIntent model
+ runtime_intent_model = {}
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ # Construct a dict representation of a CaptureGroup model
+ capture_group_model = {}
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ # Construct a dict representation of a RuntimeEntityInterpretation model
+ runtime_entity_interpretation_model = {}
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ # Construct a dict representation of a RuntimeEntityAlternative model
+ runtime_entity_alternative_model = {}
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ # Construct a dict representation of a RuntimeEntityRole model
+ runtime_entity_role_model = {}
+ runtime_entity_role_model['type'] = 'date_from'
+
+ # Construct a dict representation of a RuntimeEntity model
+ runtime_entity_model = {}
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ # Construct a dict representation of a MessageInputAttachment model
+ message_input_attachment_model = {}
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ # Construct a dict representation of a RequestAnalytics model
+ request_analytics_model = {}
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ # Construct a dict representation of a MessageInputOptionsSpelling model
+ message_input_options_spelling_model = {}
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ # Construct a dict representation of a MessageInputOptions model
+ message_input_options_model = {}
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ # Construct a dict representation of a MessageInput model
+ message_input_model = {}
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ # Construct a dict representation of a MessageContextGlobalSystem model
+ message_context_global_system_model = {}
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ # Construct a dict representation of a MessageContextGlobal model
+ message_context_global_model = {}
+ message_context_global_model['system'] = message_context_global_system_model
+
+ # Construct a dict representation of a MessageContextSkillSystem model
+ message_context_skill_system_model = {}
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ # Construct a dict representation of a MessageContextDialogSkill model
+ message_context_dialog_skill_model = {}
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ # Construct a dict representation of a MessageContextActionSkill model
+ message_context_action_skill_model = {}
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ # Construct a dict representation of a MessageContextSkills model
+ message_context_skills_model = {}
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ # Construct a dict representation of a MessageContext model
+ message_context_model = {}
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ session_id = 'testString'
+ input = message_input_model
+ context = message_context_model
+ user_id = 'testString'
+
+ # Invoke method
+ response = _service.message_stream(
+ assistant_id,
+ environment_id,
+ session_id,
+ input=input,
+ context=context,
+ user_id=user_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['input'] == message_input_model
+ assert req_body['context'] == message_context_model
+ assert req_body['user_id'] == 'testString'
+
+ def test_message_stream_all_params_with_retries(self):
+ # Enable retries and run test_message_stream_all_params.
+ _service.enable_retries()
+ self.test_message_stream_all_params()
+
+ # Disable retries and run test_message_stream_all_params.
+ _service.disable_retries()
+ self.test_message_stream_all_params()
+
+ @responses.activate
+ def test_message_stream_required_params(self):
+ """
+ test_message_stream_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message_stream')
+ mock_response = 'This is a mock binary response.'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='text/event-stream',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ session_id = 'testString'
+
+ # Invoke method
+ response = _service.message_stream(
+ assistant_id,
+ environment_id,
+ session_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_message_stream_required_params_with_retries(self):
+ # Enable retries and run test_message_stream_required_params.
+ _service.enable_retries()
+ self.test_message_stream_required_params()
+
+ # Disable retries and run test_message_stream_required_params.
+ _service.disable_retries()
+ self.test_message_stream_required_params()
+
+ @responses.activate
+ def test_message_stream_value_error(self):
+ """
+ test_message_stream_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/sessions/testString/message_stream')
+ mock_response = 'This is a mock binary response.'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='text/event-stream',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ session_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "environment_id": environment_id,
+ "session_id": session_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.message_stream(**req_copy)
+
+ def test_message_stream_value_error_with_retries(self):
+ # Enable retries and run test_message_stream_value_error.
+ _service.enable_retries()
+ self.test_message_stream_value_error()
+
+ # Disable retries and run test_message_stream_value_error.
+ _service.disable_retries()
+ self.test_message_stream_value_error()
+
+
+class TestMessageStreamStateless:
+ """
+ Test Class for message_stream_stateless
+ """
+
+ @responses.activate
+ def test_message_stream_stateless_all_params(self):
+ """
+ message_stream_stateless()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/message_stream')
+ mock_response = 'This is a mock binary response.'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='text/event-stream',
+ status=200,
+ )
+
+ # Construct a dict representation of a RuntimeIntent model
+ runtime_intent_model = {}
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ # Construct a dict representation of a CaptureGroup model
+ capture_group_model = {}
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ # Construct a dict representation of a RuntimeEntityInterpretation model
+ runtime_entity_interpretation_model = {}
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ # Construct a dict representation of a RuntimeEntityAlternative model
+ runtime_entity_alternative_model = {}
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ # Construct a dict representation of a RuntimeEntityRole model
+ runtime_entity_role_model = {}
+ runtime_entity_role_model['type'] = 'date_from'
+
+ # Construct a dict representation of a RuntimeEntity model
+ runtime_entity_model = {}
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ # Construct a dict representation of a MessageInputAttachment model
+ message_input_attachment_model = {}
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ # Construct a dict representation of a RequestAnalytics model
+ request_analytics_model = {}
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ # Construct a dict representation of a MessageInputOptionsSpelling model
+ message_input_options_spelling_model = {}
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ # Construct a dict representation of a MessageInputOptions model
+ message_input_options_model = {}
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ # Construct a dict representation of a MessageInput model
+ message_input_model = {}
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ # Construct a dict representation of a MessageContextGlobalSystem model
+ message_context_global_system_model = {}
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ # Construct a dict representation of a MessageContextGlobal model
+ message_context_global_model = {}
+ message_context_global_model['system'] = message_context_global_system_model
+
+ # Construct a dict representation of a MessageContextSkillSystem model
+ message_context_skill_system_model = {}
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ # Construct a dict representation of a MessageContextDialogSkill model
+ message_context_dialog_skill_model = {}
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ # Construct a dict representation of a MessageContextActionSkill model
+ message_context_action_skill_model = {}
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ # Construct a dict representation of a MessageContextSkills model
+ message_context_skills_model = {}
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ # Construct a dict representation of a MessageContext model
+ message_context_model = {}
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ input = message_input_model
+ context = message_context_model
+ user_id = 'testString'
+
+ # Invoke method
+ response = _service.message_stream_stateless(
+ assistant_id,
+ environment_id,
+ input=input,
+ context=context,
+ user_id=user_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['input'] == message_input_model
+ assert req_body['context'] == message_context_model
+ assert req_body['user_id'] == 'testString'
+
+ def test_message_stream_stateless_all_params_with_retries(self):
+ # Enable retries and run test_message_stream_stateless_all_params.
+ _service.enable_retries()
+ self.test_message_stream_stateless_all_params()
+
+ # Disable retries and run test_message_stream_stateless_all_params.
+ _service.disable_retries()
+ self.test_message_stream_stateless_all_params()
+
+ @responses.activate
+ def test_message_stream_stateless_required_params(self):
+ """
+ test_message_stream_stateless_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/message_stream')
+ mock_response = 'This is a mock binary response.'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='text/event-stream',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Invoke method
+ response = _service.message_stream_stateless(
+ assistant_id,
+ environment_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_message_stream_stateless_required_params_with_retries(self):
+ # Enable retries and run test_message_stream_stateless_required_params.
+ _service.enable_retries()
+ self.test_message_stream_stateless_required_params()
+
+ # Disable retries and run test_message_stream_stateless_required_params.
+ _service.disable_retries()
+ self.test_message_stream_stateless_required_params()
+
+ @responses.activate
+ def test_message_stream_stateless_value_error(self):
+ """
+ test_message_stream_stateless_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString/message_stream')
+ mock_response = 'This is a mock binary response.'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='text/event-stream',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "environment_id": environment_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.message_stream_stateless(**req_copy)
+
+ def test_message_stream_stateless_value_error_with_retries(self):
+ # Enable retries and run test_message_stream_stateless_value_error.
+ _service.enable_retries()
+ self.test_message_stream_stateless_value_error()
+
+ # Disable retries and run test_message_stream_stateless_value_error.
+ _service.disable_retries()
+ self.test_message_stream_stateless_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: MessageStream
+##############################################################################
+
+##############################################################################
+# Start of Service: BulkClassify
+##############################################################################
+# region
+
+
+class TestBulkClassify:
+ """
+ Test Class for bulk_classify
+ """
+
+ @responses.activate
+ def test_bulk_classify_all_params(self):
+ """
+ bulk_classify()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/skills/testString/workspace/bulk_classify')
+ mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a BulkClassifyUtterance model
+ bulk_classify_utterance_model = {}
+ bulk_classify_utterance_model['text'] = 'testString'
+
+ # Set up parameter values
+ skill_id = 'testString'
+ input = [bulk_classify_utterance_model]
+
+ # Invoke method
+ response = _service.bulk_classify(
+ skill_id,
+ input,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['input'] == [bulk_classify_utterance_model]
+
+ def test_bulk_classify_all_params_with_retries(self):
+ # Enable retries and run test_bulk_classify_all_params.
+ _service.enable_retries()
+ self.test_bulk_classify_all_params()
+
+ # Disable retries and run test_bulk_classify_all_params.
+ _service.disable_retries()
+ self.test_bulk_classify_all_params()
+
+ @responses.activate
+ def test_bulk_classify_value_error(self):
+ """
+ test_bulk_classify_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/skills/testString/workspace/bulk_classify')
+ mock_response = '{"output": [{"input": {"text": "text"}, "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a BulkClassifyUtterance model
+ bulk_classify_utterance_model = {}
+ bulk_classify_utterance_model['text'] = 'testString'
+
+ # Set up parameter values
+ skill_id = 'testString'
+ input = [bulk_classify_utterance_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "skill_id": skill_id,
+ "input": input,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.bulk_classify(**req_copy)
+
+ def test_bulk_classify_value_error_with_retries(self):
+ # Enable retries and run test_bulk_classify_value_error.
+ _service.enable_retries()
+ self.test_bulk_classify_value_error()
+
+ # Disable retries and run test_bulk_classify_value_error.
+ _service.disable_retries()
+ self.test_bulk_classify_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: BulkClassify
+##############################################################################
+
+##############################################################################
+# Start of Service: Logs
+##############################################################################
+# region
+
+
+class TestListLogs:
+ """
+ Test Class for list_logs
+ """
+
+ @responses.activate
+ def test_list_logs_all_params(self):
+ """
+ list_logs()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/logs')
+ mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ sort = 'testString'
+ filter = 'testString'
+ page_limit = 100
+ cursor = 'testString'
+
+ # Invoke method
+ response = _service.list_logs(
+ assistant_id,
+ sort=sort,
+ filter=filter,
+ page_limit=page_limit,
+ cursor=cursor,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'sort={}'.format(sort) in query_string
+ assert 'filter={}'.format(filter) in query_string
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+
+ def test_list_logs_all_params_with_retries(self):
+ # Enable retries and run test_list_logs_all_params.
+ _service.enable_retries()
+ self.test_list_logs_all_params()
+
+ # Disable retries and run test_list_logs_all_params.
+ _service.disable_retries()
+ self.test_list_logs_all_params()
+
+ @responses.activate
+ def test_list_logs_required_params(self):
+ """
+ test_list_logs_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/logs')
+ mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Invoke method
+ response = _service.list_logs(
+ assistant_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_logs_required_params_with_retries(self):
+ # Enable retries and run test_list_logs_required_params.
+ _service.enable_retries()
+ self.test_list_logs_required_params()
+
+ # Disable retries and run test_list_logs_required_params.
+ _service.disable_retries()
+ self.test_list_logs_required_params()
+
+ @responses.activate
+ def test_list_logs_value_error(self):
+ """
+ test_list_logs_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/logs')
+ mock_response = '{"logs": [{"log_id": "log_id", "request": {"input": {"message_type": "text", "text": "text", "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "suggestion_id": "suggestion_id", "attachments": [{"url": "url", "media_type": "media_type"}], "analytics": {"browser": "browser", "device": "device", "pageUrl": "page_url"}, "options": {"restart": false, "alternate_intents": false, "async_callout": false, "spelling": {"suggestions": false, "auto_correct": true}, "debug": false, "return_context": false, "export": false}}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "response": {"output": {"generic": [{"response_type": "conversation_search", "text": "text", "citations_title": "citations_title", "citations": [{"title": "title", "text": "text", "body": "body", "search_result_index": 19, "ranges": [{"start": 5, "end": 3}]}], "confidence_scores": {"threshold": 9, "pre_gen": 7, "post_gen": 8, "extractiveness": 14}, "response_length_option": "response_length_option", "search_results": [{"result_metadata": {"document_retrieval_source": "document_retrieval_source", "score": 5}, "id": "id", "title": "title", "body": "body"}], "disclaimer": "disclaimer"}], "intents": [{"intent": "intent", "confidence": 10, "skill": "skill"}], "entities": [{"entity": "entity", "location": [8], "value": "value", "confidence": 10, "groups": [{"group": "group", "location": [8]}], "interpretation": {"calendar_type": "calendar_type", "datetime_link": "datetime_link", "festival": "festival", "granularity": "day", "range_link": "range_link", "range_modifier": "range_modifier", "relative_day": 12, "relative_month": 14, "relative_week": 13, "relative_weekend": 16, "relative_year": 13, "specific_day": 12, "specific_day_of_week": "specific_day_of_week", "specific_month": 14, "specific_quarter": 16, "specific_year": 13, "numeric_value": 13, "subtype": "subtype", "part_of_day": "part_of_day", "relative_hour": 13, "relative_minute": 15, "relative_second": 15, "specific_hour": 13, "specific_minute": 15, "specific_second": 15, "timezone": "timezone"}, "alternatives": [{"value": "value", "confidence": 10}], "role": {"type": "date_from"}, "skill": "skill"}], "actions": [{"name": "name", "type": "client", "parameters": {"anyKey": "anyValue"}, "result_variable": "result_variable", "credentials": "credentials"}], "debug": {"nodes_visited": [{"dialog_node": "dialog_node", "title": "title", "conditions": "conditions"}], "log_messages": [{"level": "info", "message": "message", "code": "code", "source": {"type": "dialog_node", "dialog_node": "dialog_node"}}], "branch_exited": false, "branch_exited_reason": "completed", "turn_events": [{"event": "action_visited", "source": {"type": "action", "action": "action", "action_title": "action_title", "condition": "condition"}, "action_start_time": "action_start_time", "condition_type": "user_defined", "reason": "intent", "result_variable": "result_variable"}]}, "user_defined": {"anyKey": "anyValue"}, "spelling": {"text": "text", "original_text": "original_text", "suggested_text": "suggested_text"}, "llm_metadata": [{"task": "task", "model_id": "model_id"}]}, "context": {"global": {"system": {"timezone": "timezone", "user_id": "user_id", "turn_count": 10, "locale": "en-us", "reference_time": "reference_time", "session_start_time": "session_start_time", "state": "state", "skip_user_input": false}, "session_id": "session_id"}, "skills": {"main skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}}, "actions skill": {"user_defined": {"anyKey": "anyValue"}, "system": {"state": "state"}, "action_variables": {"anyKey": "anyValue"}, "skill_variables": {"anyKey": "anyValue"}}}, "integrations": {"anyKey": "anyValue"}}, "user_id": "user_id"}, "assistant_id": "assistant_id", "session_id": "session_id", "skill_id": "skill_id", "snapshot": "snapshot", "request_timestamp": "request_timestamp", "response_timestamp": "response_timestamp", "language": "language", "customer_id": "customer_id"}], "pagination": {"next_url": "next_url", "matched": 7, "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_logs(**req_copy)
+
+ def test_list_logs_value_error_with_retries(self):
+ # Enable retries and run test_list_logs_value_error.
+ _service.enable_retries()
+ self.test_list_logs_value_error()
+
+ # Disable retries and run test_list_logs_value_error.
+ _service.disable_retries()
+ self.test_list_logs_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Logs
+##############################################################################
+
+##############################################################################
+# Start of Service: UserData
+##############################################################################
+# region
+
+
+class TestDeleteUserData:
+ """
+ Test Class for delete_user_data
+ """
+
+ @responses.activate
+ def test_delete_user_data_all_params(self):
+ """
+ delete_user_data()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=202,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_user_data(
+ customer_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'customer_id={}'.format(customer_id) in query_string
+
+ def test_delete_user_data_all_params_with_retries(self):
+ # Enable retries and run test_delete_user_data_all_params.
+ _service.enable_retries()
+ self.test_delete_user_data_all_params()
+
+ # Disable retries and run test_delete_user_data_all_params.
+ _service.disable_retries()
+ self.test_delete_user_data_all_params()
+
+ @responses.activate
+ def test_delete_user_data_value_error(self):
+ """
+ test_delete_user_data_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=202,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customer_id": customer_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_user_data(**req_copy)
+
+ def test_delete_user_data_value_error_with_retries(self):
+ # Enable retries and run test_delete_user_data_value_error.
+ _service.enable_retries()
+ self.test_delete_user_data_value_error()
+
+ # Disable retries and run test_delete_user_data_value_error.
+ _service.disable_retries()
+ self.test_delete_user_data_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: UserData
+##############################################################################
+
+##############################################################################
+# Start of Service: Environments
+##############################################################################
+# region
+
+
+class TestListEnvironments:
+ """
+ Test Class for list_environments
+ """
+
+ @responses.activate
+ def test_list_environments_all_params(self):
+ """
+ list_environments()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments')
+ mock_response = '{"environments": [{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ page_limit = 100
+ include_count = False
+ sort = 'name'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_environments(
+ assistant_id,
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_environments_all_params_with_retries(self):
+ # Enable retries and run test_list_environments_all_params.
+ _service.enable_retries()
+ self.test_list_environments_all_params()
+
+ # Disable retries and run test_list_environments_all_params.
+ _service.disable_retries()
+ self.test_list_environments_all_params()
+
+ @responses.activate
+ def test_list_environments_required_params(self):
+ """
+ test_list_environments_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments')
+ mock_response = '{"environments": [{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Invoke method
+ response = _service.list_environments(
+ assistant_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_environments_required_params_with_retries(self):
+ # Enable retries and run test_list_environments_required_params.
+ _service.enable_retries()
+ self.test_list_environments_required_params()
+
+ # Disable retries and run test_list_environments_required_params.
+ _service.disable_retries()
+ self.test_list_environments_required_params()
+
+ @responses.activate
+ def test_list_environments_value_error(self):
+ """
+ test_list_environments_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments')
+ mock_response = '{"environments": [{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_environments(**req_copy)
+
+ def test_list_environments_value_error_with_retries(self):
+ # Enable retries and run test_list_environments_value_error.
+ _service.enable_retries()
+ self.test_list_environments_value_error()
+
+ # Disable retries and run test_list_environments_value_error.
+ _service.disable_retries()
+ self.test_list_environments_value_error()
+
+
+class TestGetEnvironment:
+ """
+ Test Class for get_environment
+ """
+
+ @responses.activate
+ def test_get_environment_all_params(self):
+ """
+ get_environment()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString')
+ mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_environment(
+ assistant_id,
+ environment_id,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_environment_all_params_with_retries(self):
+ # Enable retries and run test_get_environment_all_params.
+ _service.enable_retries()
+ self.test_get_environment_all_params()
+
+ # Disable retries and run test_get_environment_all_params.
+ _service.disable_retries()
+ self.test_get_environment_all_params()
+
+ @responses.activate
+ def test_get_environment_required_params(self):
+ """
+ test_get_environment_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString')
+ mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Invoke method
+ response = _service.get_environment(
+ assistant_id,
+ environment_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_environment_required_params_with_retries(self):
+ # Enable retries and run test_get_environment_required_params.
+ _service.enable_retries()
+ self.test_get_environment_required_params()
+
+ # Disable retries and run test_get_environment_required_params.
+ _service.disable_retries()
+ self.test_get_environment_required_params()
+
+ @responses.activate
+ def test_get_environment_value_error(self):
+ """
+ test_get_environment_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString')
+ mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "environment_id": environment_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_environment(**req_copy)
+
+ def test_get_environment_value_error_with_retries(self):
+ # Enable retries and run test_get_environment_value_error.
+ _service.enable_retries()
+ self.test_get_environment_value_error()
+
+ # Disable retries and run test_get_environment_value_error.
+ _service.disable_retries()
+ self.test_get_environment_value_error()
+
+
+class TestUpdateEnvironment:
+ """
+ Test Class for update_environment
+ """
+
+ @responses.activate
+ def test_update_environment_all_params(self):
+ """
+ update_environment()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString')
+ mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a UpdateEnvironmentOrchestration model
+ update_environment_orchestration_model = {}
+ update_environment_orchestration_model['search_skill_fallback'] = True
+
+ # Construct a dict representation of a EnvironmentSkill model
+ environment_skill_model = {}
+ environment_skill_model['skill_id'] = 'testString'
+ environment_skill_model['type'] = 'dialog'
+ environment_skill_model['disabled'] = True
+ environment_skill_model['snapshot'] = 'testString'
+ environment_skill_model['skill_reference'] = 'testString'
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ orchestration = update_environment_orchestration_model
+ session_timeout = 10
+ skill_references = [environment_skill_model]
+
+ # Invoke method
+ response = _service.update_environment(
+ assistant_id,
+ environment_id,
+ name=name,
+ description=description,
+ orchestration=orchestration,
+ session_timeout=session_timeout,
+ skill_references=skill_references,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['orchestration'] == update_environment_orchestration_model
+ assert req_body['session_timeout'] == 10
+ assert req_body['skill_references'] == [environment_skill_model]
+
+ def test_update_environment_all_params_with_retries(self):
+ # Enable retries and run test_update_environment_all_params.
+ _service.enable_retries()
+ self.test_update_environment_all_params()
+
+ # Disable retries and run test_update_environment_all_params.
+ _service.disable_retries()
+ self.test_update_environment_all_params()
+
+ @responses.activate
+ def test_update_environment_required_params(self):
+ """
+ test_update_environment_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString')
+ mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Invoke method
+ response = _service.update_environment(
+ assistant_id,
+ environment_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_update_environment_required_params_with_retries(self):
+ # Enable retries and run test_update_environment_required_params.
+ _service.enable_retries()
+ self.test_update_environment_required_params()
+
+ # Disable retries and run test_update_environment_required_params.
+ _service.disable_retries()
+ self.test_update_environment_required_params()
+
+ @responses.activate
+ def test_update_environment_value_error(self):
+ """
+ test_update_environment_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/environments/testString')
+ mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ environment_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "environment_id": environment_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_environment(**req_copy)
+
+ def test_update_environment_value_error_with_retries(self):
+ # Enable retries and run test_update_environment_value_error.
+ _service.enable_retries()
+ self.test_update_environment_value_error()
+
+ # Disable retries and run test_update_environment_value_error.
+ _service.disable_retries()
+ self.test_update_environment_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Environments
+##############################################################################
+
+##############################################################################
+# Start of Service: Releases
+##############################################################################
+# region
+
+
+class TestCreateRelease:
+ """
+ Test Class for create_release
+ """
+
+ @responses.activate
+ def test_create_release_all_params(self):
+ """
+ create_release()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases')
+ mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ description = 'testString'
+
+ # Invoke method
+ response = _service.create_release(
+ assistant_id,
+ description=description,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['description'] == 'testString'
+
+ def test_create_release_all_params_with_retries(self):
+ # Enable retries and run test_create_release_all_params.
+ _service.enable_retries()
+ self.test_create_release_all_params()
+
+ # Disable retries and run test_create_release_all_params.
+ _service.disable_retries()
+ self.test_create_release_all_params()
+
+ @responses.activate
+ def test_create_release_required_params(self):
+ """
+ test_create_release_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases')
+ mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Invoke method
+ response = _service.create_release(
+ assistant_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+
+ def test_create_release_required_params_with_retries(self):
+ # Enable retries and run test_create_release_required_params.
+ _service.enable_retries()
+ self.test_create_release_required_params()
+
+ # Disable retries and run test_create_release_required_params.
+ _service.disable_retries()
+ self.test_create_release_required_params()
+
+ @responses.activate
+ def test_create_release_value_error(self):
+ """
+ test_create_release_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases')
+ mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_release(**req_copy)
+
+ def test_create_release_value_error_with_retries(self):
+ # Enable retries and run test_create_release_value_error.
+ _service.enable_retries()
+ self.test_create_release_value_error()
+
+ # Disable retries and run test_create_release_value_error.
+ _service.disable_retries()
+ self.test_create_release_value_error()
+
+
+class TestListReleases:
+ """
+ Test Class for list_releases
+ """
+
+ @responses.activate
+ def test_list_releases_all_params(self):
+ """
+ list_releases()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases')
+ mock_response = '{"releases": [{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ page_limit = 100
+ include_count = False
+ sort = 'name'
+ cursor = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.list_releases(
+ assistant_id,
+ page_limit=page_limit,
+ include_count=include_count,
+ sort=sort,
+ cursor=cursor,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'page_limit={}'.format(page_limit) in query_string
+ assert 'include_count={}'.format('true' if include_count else 'false') in query_string
+ assert 'sort={}'.format(sort) in query_string
+ assert 'cursor={}'.format(cursor) in query_string
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_list_releases_all_params_with_retries(self):
+ # Enable retries and run test_list_releases_all_params.
+ _service.enable_retries()
+ self.test_list_releases_all_params()
+
+ # Disable retries and run test_list_releases_all_params.
+ _service.disable_retries()
+ self.test_list_releases_all_params()
+
+ @responses.activate
+ def test_list_releases_required_params(self):
+ """
+ test_list_releases_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases')
+ mock_response = '{"releases": [{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Invoke method
+ response = _service.list_releases(
+ assistant_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_releases_required_params_with_retries(self):
+ # Enable retries and run test_list_releases_required_params.
+ _service.enable_retries()
+ self.test_list_releases_required_params()
+
+ # Disable retries and run test_list_releases_required_params.
+ _service.disable_retries()
+ self.test_list_releases_required_params()
+
+ @responses.activate
+ def test_list_releases_value_error(self):
+ """
+ test_list_releases_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases')
+ mock_response = '{"releases": [{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}], "pagination": {"refresh_url": "refresh_url", "next_url": "next_url", "total": 5, "matched": 7, "refresh_cursor": "refresh_cursor", "next_cursor": "next_cursor"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_releases(**req_copy)
+
+ def test_list_releases_value_error_with_retries(self):
+ # Enable retries and run test_list_releases_value_error.
+ _service.enable_retries()
+ self.test_list_releases_value_error()
+
+ # Disable retries and run test_list_releases_value_error.
+ _service.disable_retries()
+ self.test_list_releases_value_error()
+
+
+class TestGetRelease:
+ """
+ Test Class for get_release
+ """
+
+ @responses.activate
+ def test_get_release_all_params(self):
+ """
+ get_release()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString')
+ mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_release(
+ assistant_id,
+ release,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_release_all_params_with_retries(self):
+ # Enable retries and run test_get_release_all_params.
+ _service.enable_retries()
+ self.test_get_release_all_params()
+
+ # Disable retries and run test_get_release_all_params.
+ _service.disable_retries()
+ self.test_get_release_all_params()
+
+ @responses.activate
+ def test_get_release_required_params(self):
+ """
+ test_get_release_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString')
+ mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+
+ # Invoke method
+ response = _service.get_release(
+ assistant_id,
+ release,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_release_required_params_with_retries(self):
+ # Enable retries and run test_get_release_required_params.
+ _service.enable_retries()
+ self.test_get_release_required_params()
+
+ # Disable retries and run test_get_release_required_params.
+ _service.disable_retries()
+ self.test_get_release_required_params()
+
+ @responses.activate
+ def test_get_release_value_error(self):
+ """
+ test_get_release_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString')
+ mock_response = '{"release": "release", "description": "description", "environment_references": [{"name": "name", "environment_id": "environment_id", "environment": "draft"}], "content": {"skills": [{"skill_id": "skill_id", "type": "dialog", "snapshot": "snapshot"}]}, "status": "Available", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "release": release,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_release(**req_copy)
+
+ def test_get_release_value_error_with_retries(self):
+ # Enable retries and run test_get_release_value_error.
+ _service.enable_retries()
+ self.test_get_release_value_error()
+
+ # Disable retries and run test_get_release_value_error.
+ _service.disable_retries()
+ self.test_get_release_value_error()
+
+
+class TestDeleteRelease:
+ """
+ Test Class for delete_release
+ """
+
+ @responses.activate
+ def test_delete_release_all_params(self):
+ """
+ delete_release()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+
+ # Invoke method
+ response = _service.delete_release(
+ assistant_id,
+ release,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_release_all_params_with_retries(self):
+ # Enable retries and run test_delete_release_all_params.
+ _service.enable_retries()
+ self.test_delete_release_all_params()
+
+ # Disable retries and run test_delete_release_all_params.
+ _service.disable_retries()
+ self.test_delete_release_all_params()
+
+ @responses.activate
+ def test_delete_release_value_error(self):
+ """
+ test_delete_release_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "release": release,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_release(**req_copy)
+
+ def test_delete_release_value_error_with_retries(self):
+ # Enable retries and run test_delete_release_value_error.
+ _service.enable_retries()
+ self.test_delete_release_value_error()
+
+ # Disable retries and run test_delete_release_value_error.
+ _service.disable_retries()
+ self.test_delete_release_value_error()
+
+
+class TestDeployRelease:
+ """
+ Test Class for deploy_release
+ """
+
+ @responses.activate
+ def test_deploy_release_all_params(self):
+ """
+ deploy_release()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString/deploy')
+ mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+ environment_id = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.deploy_release(
+ assistant_id,
+ release,
+ environment_id,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['environment_id'] == 'testString'
+
+ def test_deploy_release_all_params_with_retries(self):
+ # Enable retries and run test_deploy_release_all_params.
+ _service.enable_retries()
+ self.test_deploy_release_all_params()
+
+ # Disable retries and run test_deploy_release_all_params.
+ _service.disable_retries()
+ self.test_deploy_release_all_params()
+
+ @responses.activate
+ def test_deploy_release_required_params(self):
+ """
+ test_deploy_release_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString/deploy')
+ mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+ environment_id = 'testString'
+
+ # Invoke method
+ response = _service.deploy_release(
+ assistant_id,
+ release,
+ environment_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['environment_id'] == 'testString'
+
+ def test_deploy_release_required_params_with_retries(self):
+ # Enable retries and run test_deploy_release_required_params.
+ _service.enable_retries()
+ self.test_deploy_release_required_params()
+
+ # Disable retries and run test_deploy_release_required_params.
+ _service.disable_retries()
+ self.test_deploy_release_required_params()
+
+ @responses.activate
+ def test_deploy_release_value_error(self):
+ """
+ test_deploy_release_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString/deploy')
+ mock_response = '{"name": "name", "description": "description", "assistant_id": "assistant_id", "environment_id": "environment_id", "environment": "environment", "release_reference": {"release": "release"}, "orchestration": {"search_skill_fallback": false}, "session_timeout": 10, "integration_references": [{"integration_id": "integration_id", "type": "type"}], "skill_references": [{"skill_id": "skill_id", "type": "dialog", "disabled": true, "snapshot": "snapshot", "skill_reference": "skill_reference"}], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+ environment_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "release": release,
+ "environment_id": environment_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.deploy_release(**req_copy)
+
+ def test_deploy_release_value_error_with_retries(self):
+ # Enable retries and run test_deploy_release_value_error.
+ _service.enable_retries()
+ self.test_deploy_release_value_error()
+
+ # Disable retries and run test_deploy_release_value_error.
+ _service.disable_retries()
+ self.test_deploy_release_value_error()
+
+
+class TestCreateReleaseExport:
+ """
+ Test Class for create_release_export
+ """
+
+ @responses.activate
+ def test_create_release_export_all_params(self):
+ """
+ create_release_export()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString/export')
+ mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_release_export(
+ assistant_id,
+ release,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_create_release_export_all_params_with_retries(self):
+ # Enable retries and run test_create_release_export_all_params.
+ _service.enable_retries()
+ self.test_create_release_export_all_params()
+
+ # Disable retries and run test_create_release_export_all_params.
+ _service.disable_retries()
+ self.test_create_release_export_all_params()
+
+ @responses.activate
+ def test_create_release_export_required_params(self):
+ """
+ test_create_release_export_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString/export')
+ mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+
+ # Invoke method
+ response = _service.create_release_export(
+ assistant_id,
+ release,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_create_release_export_required_params_with_retries(self):
+ # Enable retries and run test_create_release_export_required_params.
+ _service.enable_retries()
+ self.test_create_release_export_required_params()
+
+ # Disable retries and run test_create_release_export_required_params.
+ _service.disable_retries()
+ self.test_create_release_export_required_params()
+
+ @responses.activate
+ def test_create_release_export_value_error(self):
+ """
+ test_create_release_export_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString/export')
+ mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "release": release,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_release_export(**req_copy)
+
+ def test_create_release_export_value_error_with_retries(self):
+ # Enable retries and run test_create_release_export_value_error.
+ _service.enable_retries()
+ self.test_create_release_export_value_error()
+
+ # Disable retries and run test_create_release_export_value_error.
+ _service.disable_retries()
+ self.test_create_release_export_value_error()
+
+
+class TestDownloadReleaseExport:
+ """
+ Test Class for download_release_export
+ """
+
+ @responses.activate
+ def test_download_release_export_all_params(self):
+ """
+ download_release_export()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString/export')
+ mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+ accept = 'application/json'
+ include_audit = False
+
+ # Invoke method
+ response = _service.download_release_export(
+ assistant_id,
+ release,
+ accept=accept,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_download_release_export_all_params_with_retries(self):
+ # Enable retries and run test_download_release_export_all_params.
+ _service.enable_retries()
+ self.test_download_release_export_all_params()
+
+ # Disable retries and run test_download_release_export_all_params.
+ _service.disable_retries()
+ self.test_download_release_export_all_params()
+
+ @responses.activate
+ def test_download_release_export_required_params(self):
+ """
+ test_download_release_export_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString/export')
+ mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+
+ # Invoke method
+ response = _service.download_release_export(
+ assistant_id,
+ release,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_download_release_export_required_params_with_retries(self):
+ # Enable retries and run test_download_release_export_required_params.
+ _service.enable_retries()
+ self.test_download_release_export_required_params()
+
+ # Disable retries and run test_download_release_export_required_params.
+ _service.disable_retries()
+ self.test_download_release_export_required_params()
+
+ @responses.activate
+ def test_download_release_export_value_error(self):
+ """
+ test_download_release_export_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/releases/testString/export')
+ mock_response = '{"status": "Available", "task_id": "task_id", "assistant_id": "assistant_id", "release": "release", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status_errors": [{"message": "message"}], "status_description": "status_description"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ release = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "release": release,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.download_release_export(**req_copy)
+
+ def test_download_release_export_value_error_with_retries(self):
+ # Enable retries and run test_download_release_export_value_error.
+ _service.enable_retries()
+ self.test_download_release_export_value_error()
+
+ # Disable retries and run test_download_release_export_value_error.
+ _service.disable_retries()
+ self.test_download_release_export_value_error()
+
+
+class TestCreateReleaseImport:
+ """
+ Test Class for create_release_import
+ """
+
+ @responses.activate
+ def test_create_release_import_all_params(self):
+ """
+ create_release_import()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/import')
+ mock_response = '{"status": "Failed", "task_id": "task_id", "assistant_id": "assistant_id", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ body = io.BytesIO(b'This is a mock file.').getvalue()
+ include_audit = False
+
+ # Invoke method
+ response = _service.create_release_import(
+ assistant_id,
+ body,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ assert responses.calls[0].request.body == body
+
+ def test_create_release_import_all_params_with_retries(self):
+ # Enable retries and run test_create_release_import_all_params.
+ _service.enable_retries()
+ self.test_create_release_import_all_params()
+
+ # Disable retries and run test_create_release_import_all_params.
+ _service.disable_retries()
+ self.test_create_release_import_all_params()
+
+ @responses.activate
+ def test_create_release_import_required_params(self):
+ """
+ test_create_release_import_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/import')
+ mock_response = '{"status": "Failed", "task_id": "task_id", "assistant_id": "assistant_id", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ body = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.create_release_import(
+ assistant_id,
+ body,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate body params
+ assert responses.calls[0].request.body == body
+
+ def test_create_release_import_required_params_with_retries(self):
+ # Enable retries and run test_create_release_import_required_params.
+ _service.enable_retries()
+ self.test_create_release_import_required_params()
+
+ # Disable retries and run test_create_release_import_required_params.
+ _service.disable_retries()
+ self.test_create_release_import_required_params()
+
+ @responses.activate
+ def test_create_release_import_value_error(self):
+ """
+ test_create_release_import_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/import')
+ mock_response = '{"status": "Failed", "task_id": "task_id", "assistant_id": "assistant_id", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ body = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "body": body,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_release_import(**req_copy)
+
+ def test_create_release_import_value_error_with_retries(self):
+ # Enable retries and run test_create_release_import_value_error.
+ _service.enable_retries()
+ self.test_create_release_import_value_error()
+
+ # Disable retries and run test_create_release_import_value_error.
+ _service.disable_retries()
+ self.test_create_release_import_value_error()
+
+
+class TestGetReleaseImportStatus:
+ """
+ Test Class for get_release_import_status
+ """
+
+ @responses.activate
+ def test_get_release_import_status_all_params(self):
+ """
+ get_release_import_status()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/import')
+ mock_response = '{"status": "Completed", "task_id": "task_id", "assistant_id": "assistant_id", "status_errors": [{"message": "message"}], "status_description": "status_description", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.get_release_import_status(
+ assistant_id,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_get_release_import_status_all_params_with_retries(self):
+ # Enable retries and run test_get_release_import_status_all_params.
+ _service.enable_retries()
+ self.test_get_release_import_status_all_params()
+
+ # Disable retries and run test_get_release_import_status_all_params.
+ _service.disable_retries()
+ self.test_get_release_import_status_all_params()
+
+ @responses.activate
+ def test_get_release_import_status_required_params(self):
+ """
+ test_get_release_import_status_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/import')
+ mock_response = '{"status": "Completed", "task_id": "task_id", "assistant_id": "assistant_id", "status_errors": [{"message": "message"}], "status_description": "status_description", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Invoke method
+ response = _service.get_release_import_status(
+ assistant_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_release_import_status_required_params_with_retries(self):
+ # Enable retries and run test_get_release_import_status_required_params.
+ _service.enable_retries()
+ self.test_get_release_import_status_required_params()
+
+ # Disable retries and run test_get_release_import_status_required_params.
+ _service.disable_retries()
+ self.test_get_release_import_status_required_params()
+
+ @responses.activate
+ def test_get_release_import_status_value_error(self):
+ """
+ test_get_release_import_status_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/import')
+ mock_response = '{"status": "Completed", "task_id": "task_id", "assistant_id": "assistant_id", "status_errors": [{"message": "message"}], "status_description": "status_description", "skill_impact_in_draft": ["action"], "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_release_import_status(**req_copy)
+
+ def test_get_release_import_status_value_error_with_retries(self):
+ # Enable retries and run test_get_release_import_status_value_error.
+ _service.enable_retries()
+ self.test_get_release_import_status_value_error()
+
+ # Disable retries and run test_get_release_import_status_value_error.
+ _service.disable_retries()
+ self.test_get_release_import_status_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Releases
+##############################################################################
+
+##############################################################################
+# Start of Service: Skills
+##############################################################################
+# region
+
+
+class TestGetSkill:
+ """
+ Test Class for get_skill
+ """
+
+ @responses.activate
+ def test_get_skill_all_params(self):
+ """
+ get_skill()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills/testString')
+ mock_response = '{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ skill_id = 'testString'
+
+ # Invoke method
+ response = _service.get_skill(
+ assistant_id,
+ skill_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_skill_all_params_with_retries(self):
+ # Enable retries and run test_get_skill_all_params.
+ _service.enable_retries()
+ self.test_get_skill_all_params()
+
+ # Disable retries and run test_get_skill_all_params.
+ _service.disable_retries()
+ self.test_get_skill_all_params()
+
+ @responses.activate
+ def test_get_skill_value_error(self):
+ """
+ test_get_skill_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills/testString')
+ mock_response = '{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ skill_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "skill_id": skill_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_skill(**req_copy)
+
+ def test_get_skill_value_error_with_retries(self):
+ # Enable retries and run test_get_skill_value_error.
+ _service.enable_retries()
+ self.test_get_skill_value_error()
+
+ # Disable retries and run test_get_skill_value_error.
+ _service.disable_retries()
+ self.test_get_skill_value_error()
+
+
+class TestUpdateSkill:
+ """
+ Test Class for update_skill
+ """
+
+ @responses.activate
+ def test_update_skill_all_params(self):
+ """
+ update_skill()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills/testString')
+ mock_response = '{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model
+ search_settings_discovery_authentication_model = {}
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsDiscovery model
+ search_settings_discovery_model = {}
+ search_settings_discovery_model['instance_id'] = 'testString'
+ search_settings_discovery_model['project_id'] = 'testString'
+ search_settings_discovery_model['url'] = 'testString'
+ search_settings_discovery_model['max_primary_results'] = 10000
+ search_settings_discovery_model['max_total_results'] = 10000
+ search_settings_discovery_model['confidence_threshold'] = 0.0
+ search_settings_discovery_model['highlight'] = True
+ search_settings_discovery_model['find_answers'] = True
+ search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model
+
+ # Construct a dict representation of a SearchSettingsMessages model
+ search_settings_messages_model = {}
+ search_settings_messages_model['success'] = 'testString'
+ search_settings_messages_model['error'] = 'testString'
+ search_settings_messages_model['no_result'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsSchemaMapping model
+ search_settings_schema_mapping_model = {}
+ search_settings_schema_mapping_model['url'] = 'testString'
+ search_settings_schema_mapping_model['body'] = 'testString'
+ search_settings_schema_mapping_model['title'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsElasticSearch model
+ search_settings_elastic_search_model = {}
+ search_settings_elastic_search_model['url'] = 'testString'
+ search_settings_elastic_search_model['port'] = 'testString'
+ search_settings_elastic_search_model['username'] = 'testString'
+ search_settings_elastic_search_model['password'] = 'testString'
+ search_settings_elastic_search_model['index'] = 'testString'
+ search_settings_elastic_search_model['filter'] = ['testString']
+ search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model['managed_index'] = 'testString'
+ search_settings_elastic_search_model['apikey'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model
+ search_settings_conversational_search_response_length_model = {}
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model
+ search_settings_conversational_search_search_confidence_model = {}
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearch model
+ search_settings_conversational_search_model = {}
+ search_settings_conversational_search_model['enabled'] = True
+ search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ # Construct a dict representation of a SearchSettingsServerSideSearch model
+ search_settings_server_side_search_model = {}
+ search_settings_server_side_search_model['url'] = 'testString'
+ search_settings_server_side_search_model['port'] = 'testString'
+ search_settings_server_side_search_model['username'] = 'testString'
+ search_settings_server_side_search_model['password'] = 'testString'
+ search_settings_server_side_search_model['filter'] = 'testString'
+ search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model['apikey'] = 'testString'
+ search_settings_server_side_search_model['no_auth'] = True
+ search_settings_server_side_search_model['auth_type'] = 'basic'
+
+ # Construct a dict representation of a SearchSettingsClientSideSearch model
+ search_settings_client_side_search_model = {}
+ search_settings_client_side_search_model['filter'] = 'testString'
+ search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+
+ # Construct a dict representation of a SearchSettings model
+ search_settings_model = {}
+ search_settings_model['discovery'] = search_settings_discovery_model
+ search_settings_model['messages'] = search_settings_messages_model
+ search_settings_model['schema_mapping'] = search_settings_schema_mapping_model
+ search_settings_model['elastic_search'] = search_settings_elastic_search_model
+ search_settings_model['conversational_search'] = search_settings_conversational_search_model
+ search_settings_model['server_side_search'] = search_settings_server_side_search_model
+ search_settings_model['client_side_search'] = search_settings_client_side_search_model
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ skill_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ workspace = {'anyKey': 'anyValue'}
+ dialog_settings = {'anyKey': 'anyValue'}
+ search_settings = search_settings_model
+
+ # Invoke method
+ response = _service.update_skill(
+ assistant_id,
+ skill_id,
+ name=name,
+ description=description,
+ workspace=workspace,
+ dialog_settings=dialog_settings,
+ search_settings=search_settings,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['workspace'] == {'anyKey': 'anyValue'}
+ assert req_body['dialog_settings'] == {'anyKey': 'anyValue'}
+ assert req_body['search_settings'] == search_settings_model
+
+ def test_update_skill_all_params_with_retries(self):
+ # Enable retries and run test_update_skill_all_params.
+ _service.enable_retries()
+ self.test_update_skill_all_params()
+
+ # Disable retries and run test_update_skill_all_params.
+ _service.disable_retries()
+ self.test_update_skill_all_params()
+
+ @responses.activate
+ def test_update_skill_value_error(self):
+ """
+ test_update_skill_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills/testString')
+ mock_response = '{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model
+ search_settings_discovery_authentication_model = {}
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsDiscovery model
+ search_settings_discovery_model = {}
+ search_settings_discovery_model['instance_id'] = 'testString'
+ search_settings_discovery_model['project_id'] = 'testString'
+ search_settings_discovery_model['url'] = 'testString'
+ search_settings_discovery_model['max_primary_results'] = 10000
+ search_settings_discovery_model['max_total_results'] = 10000
+ search_settings_discovery_model['confidence_threshold'] = 0.0
+ search_settings_discovery_model['highlight'] = True
+ search_settings_discovery_model['find_answers'] = True
+ search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model
+
+ # Construct a dict representation of a SearchSettingsMessages model
+ search_settings_messages_model = {}
+ search_settings_messages_model['success'] = 'testString'
+ search_settings_messages_model['error'] = 'testString'
+ search_settings_messages_model['no_result'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsSchemaMapping model
+ search_settings_schema_mapping_model = {}
+ search_settings_schema_mapping_model['url'] = 'testString'
+ search_settings_schema_mapping_model['body'] = 'testString'
+ search_settings_schema_mapping_model['title'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsElasticSearch model
+ search_settings_elastic_search_model = {}
+ search_settings_elastic_search_model['url'] = 'testString'
+ search_settings_elastic_search_model['port'] = 'testString'
+ search_settings_elastic_search_model['username'] = 'testString'
+ search_settings_elastic_search_model['password'] = 'testString'
+ search_settings_elastic_search_model['index'] = 'testString'
+ search_settings_elastic_search_model['filter'] = ['testString']
+ search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model['managed_index'] = 'testString'
+ search_settings_elastic_search_model['apikey'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model
+ search_settings_conversational_search_response_length_model = {}
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model
+ search_settings_conversational_search_search_confidence_model = {}
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearch model
+ search_settings_conversational_search_model = {}
+ search_settings_conversational_search_model['enabled'] = True
+ search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ # Construct a dict representation of a SearchSettingsServerSideSearch model
+ search_settings_server_side_search_model = {}
+ search_settings_server_side_search_model['url'] = 'testString'
+ search_settings_server_side_search_model['port'] = 'testString'
+ search_settings_server_side_search_model['username'] = 'testString'
+ search_settings_server_side_search_model['password'] = 'testString'
+ search_settings_server_side_search_model['filter'] = 'testString'
+ search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model['apikey'] = 'testString'
+ search_settings_server_side_search_model['no_auth'] = True
+ search_settings_server_side_search_model['auth_type'] = 'basic'
+
+ # Construct a dict representation of a SearchSettingsClientSideSearch model
+ search_settings_client_side_search_model = {}
+ search_settings_client_side_search_model['filter'] = 'testString'
+ search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+
+ # Construct a dict representation of a SearchSettings model
+ search_settings_model = {}
+ search_settings_model['discovery'] = search_settings_discovery_model
+ search_settings_model['messages'] = search_settings_messages_model
+ search_settings_model['schema_mapping'] = search_settings_schema_mapping_model
+ search_settings_model['elastic_search'] = search_settings_elastic_search_model
+ search_settings_model['conversational_search'] = search_settings_conversational_search_model
+ search_settings_model['server_side_search'] = search_settings_server_side_search_model
+ search_settings_model['client_side_search'] = search_settings_client_side_search_model
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ skill_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ workspace = {'anyKey': 'anyValue'}
+ dialog_settings = {'anyKey': 'anyValue'}
+ search_settings = search_settings_model
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "skill_id": skill_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_skill(**req_copy)
+
+ def test_update_skill_value_error_with_retries(self):
+ # Enable retries and run test_update_skill_value_error.
+ _service.enable_retries()
+ self.test_update_skill_value_error()
+
+ # Disable retries and run test_update_skill_value_error.
+ _service.disable_retries()
+ self.test_update_skill_value_error()
+
+
+class TestExportSkills:
+ """
+ Test Class for export_skills
+ """
+
+ @responses.activate
+ def test_export_skills_all_params(self):
+ """
+ export_skills()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills_export')
+ mock_response = '{"assistant_skills": [{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}], "assistant_state": {"action_disabled": false, "dialog_disabled": false}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ include_audit = False
+
+ # Invoke method
+ response = _service.export_skills(
+ assistant_id,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+
+ def test_export_skills_all_params_with_retries(self):
+ # Enable retries and run test_export_skills_all_params.
+ _service.enable_retries()
+ self.test_export_skills_all_params()
+
+ # Disable retries and run test_export_skills_all_params.
+ _service.disable_retries()
+ self.test_export_skills_all_params()
+
+ @responses.activate
+ def test_export_skills_required_params(self):
+ """
+ test_export_skills_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills_export')
+ mock_response = '{"assistant_skills": [{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}], "assistant_state": {"action_disabled": false, "dialog_disabled": false}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Invoke method
+ response = _service.export_skills(
+ assistant_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_export_skills_required_params_with_retries(self):
+ # Enable retries and run test_export_skills_required_params.
+ _service.enable_retries()
+ self.test_export_skills_required_params()
+
+ # Disable retries and run test_export_skills_required_params.
+ _service.disable_retries()
+ self.test_export_skills_required_params()
+
+ @responses.activate
+ def test_export_skills_value_error(self):
+ """
+ test_export_skills_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills_export')
+ mock_response = '{"assistant_skills": [{"name": "name", "description": "description", "workspace": {"anyKey": "anyValue"}, "skill_id": "skill_id", "status": "Available", "status_errors": [{"message": "message"}], "status_description": "status_description", "dialog_settings": {"anyKey": "anyValue"}, "assistant_id": "assistant_id", "workspace_id": "workspace_id", "environment_id": "environment_id", "valid": false, "next_snapshot_version": "next_snapshot_version", "search_settings": {"discovery": {"instance_id": "instance_id", "project_id": "project_id", "url": "url", "max_primary_results": 10000, "max_total_results": 10000, "confidence_threshold": 0.0, "highlight": false, "find_answers": true, "authentication": {"basic": "basic", "bearer": "bearer"}}, "messages": {"success": "success", "error": "error", "no_result": "no_result"}, "schema_mapping": {"url": "url", "body": "body", "title": "title"}, "elastic_search": {"url": "url", "port": "port", "username": "username", "password": "password", "index": "index", "filter": ["anyValue"], "query_body": {"anyKey": "anyValue"}, "managed_index": "managed_index", "apikey": "apikey"}, "conversational_search": {"enabled": true, "response_length": {"option": "moderate"}, "search_confidence": {"threshold": "less_often"}}, "server_side_search": {"url": "url", "port": "port", "username": "username", "password": "password", "filter": "filter", "metadata": {"anyKey": "anyValue"}, "apikey": "apikey", "no_auth": false, "auth_type": "basic"}, "client_side_search": {"filter": "filter", "metadata": {"anyKey": "anyValue"}}}, "warnings": [{"code": "code", "path": "path", "message": "message"}], "language": "language", "type": "action"}], "assistant_state": {"action_disabled": false, "dialog_disabled": false}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.export_skills(**req_copy)
+
+ def test_export_skills_value_error_with_retries(self):
+ # Enable retries and run test_export_skills_value_error.
+ _service.enable_retries()
+ self.test_export_skills_value_error()
+
+ # Disable retries and run test_export_skills_value_error.
+ _service.disable_retries()
+ self.test_export_skills_value_error()
+
+
+class TestImportSkills:
+ """
+ Test Class for import_skills
+ """
+
+ @responses.activate
+ def test_import_skills_all_params(self):
+ """
+ import_skills()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills_import')
+ mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model
+ search_settings_discovery_authentication_model = {}
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsDiscovery model
+ search_settings_discovery_model = {}
+ search_settings_discovery_model['instance_id'] = 'testString'
+ search_settings_discovery_model['project_id'] = 'testString'
+ search_settings_discovery_model['url'] = 'testString'
+ search_settings_discovery_model['max_primary_results'] = 10000
+ search_settings_discovery_model['max_total_results'] = 10000
+ search_settings_discovery_model['confidence_threshold'] = 0.0
+ search_settings_discovery_model['highlight'] = True
+ search_settings_discovery_model['find_answers'] = True
+ search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model
+
+ # Construct a dict representation of a SearchSettingsMessages model
+ search_settings_messages_model = {}
+ search_settings_messages_model['success'] = 'testString'
+ search_settings_messages_model['error'] = 'testString'
+ search_settings_messages_model['no_result'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsSchemaMapping model
+ search_settings_schema_mapping_model = {}
+ search_settings_schema_mapping_model['url'] = 'testString'
+ search_settings_schema_mapping_model['body'] = 'testString'
+ search_settings_schema_mapping_model['title'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsElasticSearch model
+ search_settings_elastic_search_model = {}
+ search_settings_elastic_search_model['url'] = 'testString'
+ search_settings_elastic_search_model['port'] = 'testString'
+ search_settings_elastic_search_model['username'] = 'testString'
+ search_settings_elastic_search_model['password'] = 'testString'
+ search_settings_elastic_search_model['index'] = 'testString'
+ search_settings_elastic_search_model['filter'] = ['testString']
+ search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model['managed_index'] = 'testString'
+ search_settings_elastic_search_model['apikey'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model
+ search_settings_conversational_search_response_length_model = {}
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model
+ search_settings_conversational_search_search_confidence_model = {}
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearch model
+ search_settings_conversational_search_model = {}
+ search_settings_conversational_search_model['enabled'] = True
+ search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ # Construct a dict representation of a SearchSettingsServerSideSearch model
+ search_settings_server_side_search_model = {}
+ search_settings_server_side_search_model['url'] = 'testString'
+ search_settings_server_side_search_model['port'] = 'testString'
+ search_settings_server_side_search_model['username'] = 'testString'
+ search_settings_server_side_search_model['password'] = 'testString'
+ search_settings_server_side_search_model['filter'] = 'testString'
+ search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model['apikey'] = 'testString'
+ search_settings_server_side_search_model['no_auth'] = True
+ search_settings_server_side_search_model['auth_type'] = 'basic'
+
+ # Construct a dict representation of a SearchSettingsClientSideSearch model
+ search_settings_client_side_search_model = {}
+ search_settings_client_side_search_model['filter'] = 'testString'
+ search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+
+ # Construct a dict representation of a SearchSettings model
+ search_settings_model = {}
+ search_settings_model['discovery'] = search_settings_discovery_model
+ search_settings_model['messages'] = search_settings_messages_model
+ search_settings_model['schema_mapping'] = search_settings_schema_mapping_model
+ search_settings_model['elastic_search'] = search_settings_elastic_search_model
+ search_settings_model['conversational_search'] = search_settings_conversational_search_model
+ search_settings_model['server_side_search'] = search_settings_server_side_search_model
+ search_settings_model['client_side_search'] = search_settings_client_side_search_model
+
+ # Construct a dict representation of a SkillImport model
+ skill_import_model = {}
+ skill_import_model['name'] = 'testString'
+ skill_import_model['description'] = 'testString'
+ skill_import_model['workspace'] = {'anyKey': 'anyValue'}
+ skill_import_model['dialog_settings'] = {'anyKey': 'anyValue'}
+ skill_import_model['search_settings'] = search_settings_model
+ skill_import_model['language'] = 'testString'
+ skill_import_model['type'] = 'action'
+
+ # Construct a dict representation of a AssistantState model
+ assistant_state_model = {}
+ assistant_state_model['action_disabled'] = True
+ assistant_state_model['dialog_disabled'] = True
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ assistant_skills = [skill_import_model]
+ assistant_state = assistant_state_model
+ include_audit = False
+
+ # Invoke method
+ response = _service.import_skills(
+ assistant_id,
+ assistant_skills,
+ assistant_state,
+ include_audit=include_audit,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'include_audit={}'.format('true' if include_audit else 'false') in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['assistant_skills'] == [skill_import_model]
+ assert req_body['assistant_state'] == assistant_state_model
+
+ def test_import_skills_all_params_with_retries(self):
+ # Enable retries and run test_import_skills_all_params.
+ _service.enable_retries()
+ self.test_import_skills_all_params()
+
+ # Disable retries and run test_import_skills_all_params.
+ _service.disable_retries()
+ self.test_import_skills_all_params()
+
+ @responses.activate
+ def test_import_skills_required_params(self):
+ """
+ test_import_skills_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills_import')
+ mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model
+ search_settings_discovery_authentication_model = {}
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsDiscovery model
+ search_settings_discovery_model = {}
+ search_settings_discovery_model['instance_id'] = 'testString'
+ search_settings_discovery_model['project_id'] = 'testString'
+ search_settings_discovery_model['url'] = 'testString'
+ search_settings_discovery_model['max_primary_results'] = 10000
+ search_settings_discovery_model['max_total_results'] = 10000
+ search_settings_discovery_model['confidence_threshold'] = 0.0
+ search_settings_discovery_model['highlight'] = True
+ search_settings_discovery_model['find_answers'] = True
+ search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model
+
+ # Construct a dict representation of a SearchSettingsMessages model
+ search_settings_messages_model = {}
+ search_settings_messages_model['success'] = 'testString'
+ search_settings_messages_model['error'] = 'testString'
+ search_settings_messages_model['no_result'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsSchemaMapping model
+ search_settings_schema_mapping_model = {}
+ search_settings_schema_mapping_model['url'] = 'testString'
+ search_settings_schema_mapping_model['body'] = 'testString'
+ search_settings_schema_mapping_model['title'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsElasticSearch model
+ search_settings_elastic_search_model = {}
+ search_settings_elastic_search_model['url'] = 'testString'
+ search_settings_elastic_search_model['port'] = 'testString'
+ search_settings_elastic_search_model['username'] = 'testString'
+ search_settings_elastic_search_model['password'] = 'testString'
+ search_settings_elastic_search_model['index'] = 'testString'
+ search_settings_elastic_search_model['filter'] = ['testString']
+ search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model['managed_index'] = 'testString'
+ search_settings_elastic_search_model['apikey'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model
+ search_settings_conversational_search_response_length_model = {}
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model
+ search_settings_conversational_search_search_confidence_model = {}
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearch model
+ search_settings_conversational_search_model = {}
+ search_settings_conversational_search_model['enabled'] = True
+ search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ # Construct a dict representation of a SearchSettingsServerSideSearch model
+ search_settings_server_side_search_model = {}
+ search_settings_server_side_search_model['url'] = 'testString'
+ search_settings_server_side_search_model['port'] = 'testString'
+ search_settings_server_side_search_model['username'] = 'testString'
+ search_settings_server_side_search_model['password'] = 'testString'
+ search_settings_server_side_search_model['filter'] = 'testString'
+ search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model['apikey'] = 'testString'
+ search_settings_server_side_search_model['no_auth'] = True
+ search_settings_server_side_search_model['auth_type'] = 'basic'
+
+ # Construct a dict representation of a SearchSettingsClientSideSearch model
+ search_settings_client_side_search_model = {}
+ search_settings_client_side_search_model['filter'] = 'testString'
+ search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+
+ # Construct a dict representation of a SearchSettings model
+ search_settings_model = {}
+ search_settings_model['discovery'] = search_settings_discovery_model
+ search_settings_model['messages'] = search_settings_messages_model
+ search_settings_model['schema_mapping'] = search_settings_schema_mapping_model
+ search_settings_model['elastic_search'] = search_settings_elastic_search_model
+ search_settings_model['conversational_search'] = search_settings_conversational_search_model
+ search_settings_model['server_side_search'] = search_settings_server_side_search_model
+ search_settings_model['client_side_search'] = search_settings_client_side_search_model
+
+ # Construct a dict representation of a SkillImport model
+ skill_import_model = {}
+ skill_import_model['name'] = 'testString'
+ skill_import_model['description'] = 'testString'
+ skill_import_model['workspace'] = {'anyKey': 'anyValue'}
+ skill_import_model['dialog_settings'] = {'anyKey': 'anyValue'}
+ skill_import_model['search_settings'] = search_settings_model
+ skill_import_model['language'] = 'testString'
+ skill_import_model['type'] = 'action'
+
+ # Construct a dict representation of a AssistantState model
+ assistant_state_model = {}
+ assistant_state_model['action_disabled'] = True
+ assistant_state_model['dialog_disabled'] = True
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ assistant_skills = [skill_import_model]
+ assistant_state = assistant_state_model
+
+ # Invoke method
+ response = _service.import_skills(
+ assistant_id,
+ assistant_skills,
+ assistant_state,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['assistant_skills'] == [skill_import_model]
+ assert req_body['assistant_state'] == assistant_state_model
+
+ def test_import_skills_required_params_with_retries(self):
+ # Enable retries and run test_import_skills_required_params.
+ _service.enable_retries()
+ self.test_import_skills_required_params()
+
+ # Disable retries and run test_import_skills_required_params.
+ _service.disable_retries()
+ self.test_import_skills_required_params()
+
+ @responses.activate
+ def test_import_skills_value_error(self):
+ """
+ test_import_skills_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills_import')
+ mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Construct a dict representation of a SearchSettingsDiscoveryAuthentication model
+ search_settings_discovery_authentication_model = {}
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsDiscovery model
+ search_settings_discovery_model = {}
+ search_settings_discovery_model['instance_id'] = 'testString'
+ search_settings_discovery_model['project_id'] = 'testString'
+ search_settings_discovery_model['url'] = 'testString'
+ search_settings_discovery_model['max_primary_results'] = 10000
+ search_settings_discovery_model['max_total_results'] = 10000
+ search_settings_discovery_model['confidence_threshold'] = 0.0
+ search_settings_discovery_model['highlight'] = True
+ search_settings_discovery_model['find_answers'] = True
+ search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model
+
+ # Construct a dict representation of a SearchSettingsMessages model
+ search_settings_messages_model = {}
+ search_settings_messages_model['success'] = 'testString'
+ search_settings_messages_model['error'] = 'testString'
+ search_settings_messages_model['no_result'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsSchemaMapping model
+ search_settings_schema_mapping_model = {}
+ search_settings_schema_mapping_model['url'] = 'testString'
+ search_settings_schema_mapping_model['body'] = 'testString'
+ search_settings_schema_mapping_model['title'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsElasticSearch model
+ search_settings_elastic_search_model = {}
+ search_settings_elastic_search_model['url'] = 'testString'
+ search_settings_elastic_search_model['port'] = 'testString'
+ search_settings_elastic_search_model['username'] = 'testString'
+ search_settings_elastic_search_model['password'] = 'testString'
+ search_settings_elastic_search_model['index'] = 'testString'
+ search_settings_elastic_search_model['filter'] = ['testString']
+ search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model['managed_index'] = 'testString'
+ search_settings_elastic_search_model['apikey'] = 'testString'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchResponseLength model
+ search_settings_conversational_search_response_length_model = {}
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearchSearchConfidence model
+ search_settings_conversational_search_search_confidence_model = {}
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ # Construct a dict representation of a SearchSettingsConversationalSearch model
+ search_settings_conversational_search_model = {}
+ search_settings_conversational_search_model['enabled'] = True
+ search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ # Construct a dict representation of a SearchSettingsServerSideSearch model
+ search_settings_server_side_search_model = {}
+ search_settings_server_side_search_model['url'] = 'testString'
+ search_settings_server_side_search_model['port'] = 'testString'
+ search_settings_server_side_search_model['username'] = 'testString'
+ search_settings_server_side_search_model['password'] = 'testString'
+ search_settings_server_side_search_model['filter'] = 'testString'
+ search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model['apikey'] = 'testString'
+ search_settings_server_side_search_model['no_auth'] = True
+ search_settings_server_side_search_model['auth_type'] = 'basic'
+
+ # Construct a dict representation of a SearchSettingsClientSideSearch model
+ search_settings_client_side_search_model = {}
+ search_settings_client_side_search_model['filter'] = 'testString'
+ search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+
+ # Construct a dict representation of a SearchSettings model
+ search_settings_model = {}
+ search_settings_model['discovery'] = search_settings_discovery_model
+ search_settings_model['messages'] = search_settings_messages_model
+ search_settings_model['schema_mapping'] = search_settings_schema_mapping_model
+ search_settings_model['elastic_search'] = search_settings_elastic_search_model
+ search_settings_model['conversational_search'] = search_settings_conversational_search_model
+ search_settings_model['server_side_search'] = search_settings_server_side_search_model
+ search_settings_model['client_side_search'] = search_settings_client_side_search_model
+
+ # Construct a dict representation of a SkillImport model
+ skill_import_model = {}
+ skill_import_model['name'] = 'testString'
+ skill_import_model['description'] = 'testString'
+ skill_import_model['workspace'] = {'anyKey': 'anyValue'}
+ skill_import_model['dialog_settings'] = {'anyKey': 'anyValue'}
+ skill_import_model['search_settings'] = search_settings_model
+ skill_import_model['language'] = 'testString'
+ skill_import_model['type'] = 'action'
+
+ # Construct a dict representation of a AssistantState model
+ assistant_state_model = {}
+ assistant_state_model['action_disabled'] = True
+ assistant_state_model['dialog_disabled'] = True
+
+ # Set up parameter values
+ assistant_id = 'testString'
+ assistant_skills = [skill_import_model]
+ assistant_state = assistant_state_model
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ "assistant_skills": assistant_skills,
+ "assistant_state": assistant_state,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.import_skills(**req_copy)
+
+ def test_import_skills_value_error_with_retries(self):
+ # Enable retries and run test_import_skills_value_error.
+ _service.enable_retries()
+ self.test_import_skills_value_error()
+
+ # Disable retries and run test_import_skills_value_error.
+ _service.disable_retries()
+ self.test_import_skills_value_error()
+
+
+class TestImportSkillsStatus:
+ """
+ Test Class for import_skills_status
+ """
+
+ @responses.activate
+ def test_import_skills_status_all_params(self):
+ """
+ import_skills_status()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills_import/status')
+ mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Invoke method
+ response = _service.import_skills_status(
+ assistant_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_import_skills_status_all_params_with_retries(self):
+ # Enable retries and run test_import_skills_status_all_params.
+ _service.enable_retries()
+ self.test_import_skills_status_all_params()
+
+ # Disable retries and run test_import_skills_status_all_params.
+ _service.disable_retries()
+ self.test_import_skills_status_all_params()
+
+ @responses.activate
+ def test_import_skills_status_value_error(self):
+ """
+ test_import_skills_status_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/assistants/testString/skills_import/status')
+ mock_response = '{"assistant_id": "assistant_id", "status": "Available", "status_description": "status_description", "status_errors": [{"message": "message"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ assistant_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "assistant_id": assistant_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.import_skills_status(**req_copy)
+
+ def test_import_skills_status_value_error_with_retries(self):
+ # Enable retries and run test_import_skills_status_value_error.
+ _service.enable_retries()
+ self.test_import_skills_status_value_error()
+
+ # Disable retries and run test_import_skills_status_value_error.
+ _service.disable_retries()
+ self.test_import_skills_status_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Skills
+##############################################################################
+
+
+##############################################################################
+# Start of Model Tests
+##############################################################################
+# region
+
+
+class TestModel_AgentAvailabilityMessage:
+ """
+ Test Class for AgentAvailabilityMessage
+ """
+
+ def test_agent_availability_message_serialization(self):
+ """
+ Test serialization/deserialization for AgentAvailabilityMessage
+ """
+
+ # Construct a json representation of a AgentAvailabilityMessage model
+ agent_availability_message_model_json = {}
+ agent_availability_message_model_json['message'] = 'testString'
+
+ # Construct a model instance of AgentAvailabilityMessage by calling from_dict on the json representation
+ agent_availability_message_model = AgentAvailabilityMessage.from_dict(agent_availability_message_model_json)
+ assert agent_availability_message_model != False
+
+ # Construct a model instance of AgentAvailabilityMessage by calling from_dict on the json representation
+ agent_availability_message_model_dict = AgentAvailabilityMessage.from_dict(agent_availability_message_model_json).__dict__
+ agent_availability_message_model2 = AgentAvailabilityMessage(**agent_availability_message_model_dict)
+
+ # Verify the model instances are equivalent
+ assert agent_availability_message_model == agent_availability_message_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ agent_availability_message_model_json2 = agent_availability_message_model.to_dict()
+ assert agent_availability_message_model_json2 == agent_availability_message_model_json
+
+
+class TestModel_AssistantCollection:
+ """
+ Test Class for AssistantCollection
+ """
+
+ def test_assistant_collection_serialization(self):
+ """
+ Test serialization/deserialization for AssistantCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ assistant_data_model = {} # AssistantData
+ assistant_data_model['name'] = 'testString'
+ assistant_data_model['description'] = 'testString'
+ assistant_data_model['language'] = 'testString'
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a AssistantCollection model
+ assistant_collection_model_json = {}
+ assistant_collection_model_json['assistants'] = [assistant_data_model]
+ assistant_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of AssistantCollection by calling from_dict on the json representation
+ assistant_collection_model = AssistantCollection.from_dict(assistant_collection_model_json)
+ assert assistant_collection_model != False
+
+ # Construct a model instance of AssistantCollection by calling from_dict on the json representation
+ assistant_collection_model_dict = AssistantCollection.from_dict(assistant_collection_model_json).__dict__
+ assistant_collection_model2 = AssistantCollection(**assistant_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert assistant_collection_model == assistant_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ assistant_collection_model_json2 = assistant_collection_model.to_dict()
+ assert assistant_collection_model_json2 == assistant_collection_model_json
+
+
+class TestModel_AssistantData:
+ """
+ Test Class for AssistantData
+ """
+
+ def test_assistant_data_serialization(self):
+ """
+ Test serialization/deserialization for AssistantData
+ """
+
+ # Construct a json representation of a AssistantData model
+ assistant_data_model_json = {}
+ assistant_data_model_json['name'] = 'testString'
+ assistant_data_model_json['description'] = 'testString'
+ assistant_data_model_json['language'] = 'testString'
+
+ # Construct a model instance of AssistantData by calling from_dict on the json representation
+ assistant_data_model = AssistantData.from_dict(assistant_data_model_json)
+ assert assistant_data_model != False
+
+ # Construct a model instance of AssistantData by calling from_dict on the json representation
+ assistant_data_model_dict = AssistantData.from_dict(assistant_data_model_json).__dict__
+ assistant_data_model2 = AssistantData(**assistant_data_model_dict)
+
+ # Verify the model instances are equivalent
+ assert assistant_data_model == assistant_data_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ assistant_data_model_json2 = assistant_data_model.to_dict()
+ assert assistant_data_model_json2 == assistant_data_model_json
+
+
+class TestModel_AssistantSkill:
+ """
+ Test Class for AssistantSkill
+ """
+
+ def test_assistant_skill_serialization(self):
+ """
+ Test serialization/deserialization for AssistantSkill
+ """
+
+ # Construct a json representation of a AssistantSkill model
+ assistant_skill_model_json = {}
+ assistant_skill_model_json['skill_id'] = 'testString'
+ assistant_skill_model_json['type'] = 'dialog'
+
+ # Construct a model instance of AssistantSkill by calling from_dict on the json representation
+ assistant_skill_model = AssistantSkill.from_dict(assistant_skill_model_json)
+ assert assistant_skill_model != False
+
+ # Construct a model instance of AssistantSkill by calling from_dict on the json representation
+ assistant_skill_model_dict = AssistantSkill.from_dict(assistant_skill_model_json).__dict__
+ assistant_skill_model2 = AssistantSkill(**assistant_skill_model_dict)
+
+ # Verify the model instances are equivalent
+ assert assistant_skill_model == assistant_skill_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ assistant_skill_model_json2 = assistant_skill_model.to_dict()
+ assert assistant_skill_model_json2 == assistant_skill_model_json
+
+
+class TestModel_AssistantState:
+ """
+ Test Class for AssistantState
+ """
+
+ def test_assistant_state_serialization(self):
+ """
+ Test serialization/deserialization for AssistantState
+ """
+
+ # Construct a json representation of a AssistantState model
+ assistant_state_model_json = {}
+ assistant_state_model_json['action_disabled'] = True
+ assistant_state_model_json['dialog_disabled'] = True
+
+ # Construct a model instance of AssistantState by calling from_dict on the json representation
+ assistant_state_model = AssistantState.from_dict(assistant_state_model_json)
+ assert assistant_state_model != False
+
+ # Construct a model instance of AssistantState by calling from_dict on the json representation
+ assistant_state_model_dict = AssistantState.from_dict(assistant_state_model_json).__dict__
+ assistant_state_model2 = AssistantState(**assistant_state_model_dict)
+
+ # Verify the model instances are equivalent
+ assert assistant_state_model == assistant_state_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ assistant_state_model_json2 = assistant_state_model.to_dict()
+ assert assistant_state_model_json2 == assistant_state_model_json
+
+
+class TestModel_BaseEnvironmentOrchestration:
+ """
+ Test Class for BaseEnvironmentOrchestration
+ """
+
+ def test_base_environment_orchestration_serialization(self):
+ """
+ Test serialization/deserialization for BaseEnvironmentOrchestration
+ """
+
+ # Construct a json representation of a BaseEnvironmentOrchestration model
+ base_environment_orchestration_model_json = {}
+ base_environment_orchestration_model_json['search_skill_fallback'] = True
+
+ # Construct a model instance of BaseEnvironmentOrchestration by calling from_dict on the json representation
+ base_environment_orchestration_model = BaseEnvironmentOrchestration.from_dict(base_environment_orchestration_model_json)
+ assert base_environment_orchestration_model != False
+
+ # Construct a model instance of BaseEnvironmentOrchestration by calling from_dict on the json representation
+ base_environment_orchestration_model_dict = BaseEnvironmentOrchestration.from_dict(base_environment_orchestration_model_json).__dict__
+ base_environment_orchestration_model2 = BaseEnvironmentOrchestration(**base_environment_orchestration_model_dict)
+
+ # Verify the model instances are equivalent
+ assert base_environment_orchestration_model == base_environment_orchestration_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ base_environment_orchestration_model_json2 = base_environment_orchestration_model.to_dict()
+ assert base_environment_orchestration_model_json2 == base_environment_orchestration_model_json
+
+
+class TestModel_BaseEnvironmentReleaseReference:
+ """
+ Test Class for BaseEnvironmentReleaseReference
+ """
+
+ def test_base_environment_release_reference_serialization(self):
+ """
+ Test serialization/deserialization for BaseEnvironmentReleaseReference
+ """
+
+ # Construct a json representation of a BaseEnvironmentReleaseReference model
+ base_environment_release_reference_model_json = {}
+ base_environment_release_reference_model_json['release'] = 'testString'
+
+ # Construct a model instance of BaseEnvironmentReleaseReference by calling from_dict on the json representation
+ base_environment_release_reference_model = BaseEnvironmentReleaseReference.from_dict(base_environment_release_reference_model_json)
+ assert base_environment_release_reference_model != False
+
+ # Construct a model instance of BaseEnvironmentReleaseReference by calling from_dict on the json representation
+ base_environment_release_reference_model_dict = BaseEnvironmentReleaseReference.from_dict(base_environment_release_reference_model_json).__dict__
+ base_environment_release_reference_model2 = BaseEnvironmentReleaseReference(**base_environment_release_reference_model_dict)
+
+ # Verify the model instances are equivalent
+ assert base_environment_release_reference_model == base_environment_release_reference_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ base_environment_release_reference_model_json2 = base_environment_release_reference_model.to_dict()
+ assert base_environment_release_reference_model_json2 == base_environment_release_reference_model_json
+
+
+class TestModel_BulkClassifyOutput:
+ """
+ Test Class for BulkClassifyOutput
+ """
+
+ def test_bulk_classify_output_serialization(self):
+ """
+ Test serialization/deserialization for BulkClassifyOutput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ bulk_classify_utterance_model = {} # BulkClassifyUtterance
+ bulk_classify_utterance_model['text'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ # Construct a json representation of a BulkClassifyOutput model
+ bulk_classify_output_model_json = {}
+ bulk_classify_output_model_json['input'] = bulk_classify_utterance_model
+ bulk_classify_output_model_json['entities'] = [runtime_entity_model]
+ bulk_classify_output_model_json['intents'] = [runtime_intent_model]
+
+ # Construct a model instance of BulkClassifyOutput by calling from_dict on the json representation
+ bulk_classify_output_model = BulkClassifyOutput.from_dict(bulk_classify_output_model_json)
+ assert bulk_classify_output_model != False
+
+ # Construct a model instance of BulkClassifyOutput by calling from_dict on the json representation
+ bulk_classify_output_model_dict = BulkClassifyOutput.from_dict(bulk_classify_output_model_json).__dict__
+ bulk_classify_output_model2 = BulkClassifyOutput(**bulk_classify_output_model_dict)
+
+ # Verify the model instances are equivalent
+ assert bulk_classify_output_model == bulk_classify_output_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ bulk_classify_output_model_json2 = bulk_classify_output_model.to_dict()
+ assert bulk_classify_output_model_json2 == bulk_classify_output_model_json
+
+
+class TestModel_BulkClassifyResponse:
+ """
+ Test Class for BulkClassifyResponse
+ """
+
+ def test_bulk_classify_response_serialization(self):
+ """
+ Test serialization/deserialization for BulkClassifyResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ bulk_classify_utterance_model = {} # BulkClassifyUtterance
+ bulk_classify_utterance_model['text'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ bulk_classify_output_model = {} # BulkClassifyOutput
+ bulk_classify_output_model['input'] = bulk_classify_utterance_model
+ bulk_classify_output_model['entities'] = [runtime_entity_model]
+ bulk_classify_output_model['intents'] = [runtime_intent_model]
+
+ # Construct a json representation of a BulkClassifyResponse model
+ bulk_classify_response_model_json = {}
+ bulk_classify_response_model_json['output'] = [bulk_classify_output_model]
+
+ # Construct a model instance of BulkClassifyResponse by calling from_dict on the json representation
+ bulk_classify_response_model = BulkClassifyResponse.from_dict(bulk_classify_response_model_json)
+ assert bulk_classify_response_model != False
+
+ # Construct a model instance of BulkClassifyResponse by calling from_dict on the json representation
+ bulk_classify_response_model_dict = BulkClassifyResponse.from_dict(bulk_classify_response_model_json).__dict__
+ bulk_classify_response_model2 = BulkClassifyResponse(**bulk_classify_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert bulk_classify_response_model == bulk_classify_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ bulk_classify_response_model_json2 = bulk_classify_response_model.to_dict()
+ assert bulk_classify_response_model_json2 == bulk_classify_response_model_json
+
+
+class TestModel_BulkClassifyUtterance:
+ """
+ Test Class for BulkClassifyUtterance
+ """
+
+ def test_bulk_classify_utterance_serialization(self):
+ """
+ Test serialization/deserialization for BulkClassifyUtterance
+ """
+
+ # Construct a json representation of a BulkClassifyUtterance model
+ bulk_classify_utterance_model_json = {}
+ bulk_classify_utterance_model_json['text'] = 'testString'
+
+ # Construct a model instance of BulkClassifyUtterance by calling from_dict on the json representation
+ bulk_classify_utterance_model = BulkClassifyUtterance.from_dict(bulk_classify_utterance_model_json)
+ assert bulk_classify_utterance_model != False
+
+ # Construct a model instance of BulkClassifyUtterance by calling from_dict on the json representation
+ bulk_classify_utterance_model_dict = BulkClassifyUtterance.from_dict(bulk_classify_utterance_model_json).__dict__
+ bulk_classify_utterance_model2 = BulkClassifyUtterance(**bulk_classify_utterance_model_dict)
+
+ # Verify the model instances are equivalent
+ assert bulk_classify_utterance_model == bulk_classify_utterance_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ bulk_classify_utterance_model_json2 = bulk_classify_utterance_model.to_dict()
+ assert bulk_classify_utterance_model_json2 == bulk_classify_utterance_model_json
+
+
+class TestModel_CaptureGroup:
+ """
+ Test Class for CaptureGroup
+ """
+
+ def test_capture_group_serialization(self):
+ """
+ Test serialization/deserialization for CaptureGroup
+ """
+
+ # Construct a json representation of a CaptureGroup model
+ capture_group_model_json = {}
+ capture_group_model_json['group'] = 'testString'
+ capture_group_model_json['location'] = [38]
+
+ # Construct a model instance of CaptureGroup by calling from_dict on the json representation
+ capture_group_model = CaptureGroup.from_dict(capture_group_model_json)
+ assert capture_group_model != False
+
+ # Construct a model instance of CaptureGroup by calling from_dict on the json representation
+ capture_group_model_dict = CaptureGroup.from_dict(capture_group_model_json).__dict__
+ capture_group_model2 = CaptureGroup(**capture_group_model_dict)
+
+ # Verify the model instances are equivalent
+ assert capture_group_model == capture_group_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ capture_group_model_json2 = capture_group_model.to_dict()
+ assert capture_group_model_json2 == capture_group_model_json
+
+
+class TestModel_ChannelTransferInfo:
+ """
+ Test Class for ChannelTransferInfo
+ """
+
+ def test_channel_transfer_info_serialization(self):
+ """
+ Test serialization/deserialization for ChannelTransferInfo
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ channel_transfer_target_chat_model = {} # ChannelTransferTargetChat
+ channel_transfer_target_chat_model['url'] = 'testString'
+
+ channel_transfer_target_model = {} # ChannelTransferTarget
+ channel_transfer_target_model['chat'] = channel_transfer_target_chat_model
+
+ # Construct a json representation of a ChannelTransferInfo model
+ channel_transfer_info_model_json = {}
+ channel_transfer_info_model_json['target'] = channel_transfer_target_model
+
+ # Construct a model instance of ChannelTransferInfo by calling from_dict on the json representation
+ channel_transfer_info_model = ChannelTransferInfo.from_dict(channel_transfer_info_model_json)
+ assert channel_transfer_info_model != False
+
+ # Construct a model instance of ChannelTransferInfo by calling from_dict on the json representation
+ channel_transfer_info_model_dict = ChannelTransferInfo.from_dict(channel_transfer_info_model_json).__dict__
+ channel_transfer_info_model2 = ChannelTransferInfo(**channel_transfer_info_model_dict)
+
+ # Verify the model instances are equivalent
+ assert channel_transfer_info_model == channel_transfer_info_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ channel_transfer_info_model_json2 = channel_transfer_info_model.to_dict()
+ assert channel_transfer_info_model_json2 == channel_transfer_info_model_json
+
+
+class TestModel_ChannelTransferTarget:
+ """
+ Test Class for ChannelTransferTarget
+ """
+
+ def test_channel_transfer_target_serialization(self):
+ """
+ Test serialization/deserialization for ChannelTransferTarget
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ channel_transfer_target_chat_model = {} # ChannelTransferTargetChat
+ channel_transfer_target_chat_model['url'] = 'testString'
+
+ # Construct a json representation of a ChannelTransferTarget model
+ channel_transfer_target_model_json = {}
+ channel_transfer_target_model_json['chat'] = channel_transfer_target_chat_model
+
+ # Construct a model instance of ChannelTransferTarget by calling from_dict on the json representation
+ channel_transfer_target_model = ChannelTransferTarget.from_dict(channel_transfer_target_model_json)
+ assert channel_transfer_target_model != False
+
+ # Construct a model instance of ChannelTransferTarget by calling from_dict on the json representation
+ channel_transfer_target_model_dict = ChannelTransferTarget.from_dict(channel_transfer_target_model_json).__dict__
+ channel_transfer_target_model2 = ChannelTransferTarget(**channel_transfer_target_model_dict)
+
+ # Verify the model instances are equivalent
+ assert channel_transfer_target_model == channel_transfer_target_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ channel_transfer_target_model_json2 = channel_transfer_target_model.to_dict()
+ assert channel_transfer_target_model_json2 == channel_transfer_target_model_json
+
+
+class TestModel_ChannelTransferTargetChat:
+ """
+ Test Class for ChannelTransferTargetChat
+ """
+
+ def test_channel_transfer_target_chat_serialization(self):
+ """
+ Test serialization/deserialization for ChannelTransferTargetChat
+ """
+
+ # Construct a json representation of a ChannelTransferTargetChat model
+ channel_transfer_target_chat_model_json = {}
+ channel_transfer_target_chat_model_json['url'] = 'testString'
+
+ # Construct a model instance of ChannelTransferTargetChat by calling from_dict on the json representation
+ channel_transfer_target_chat_model = ChannelTransferTargetChat.from_dict(channel_transfer_target_chat_model_json)
+ assert channel_transfer_target_chat_model != False
+
+ # Construct a model instance of ChannelTransferTargetChat by calling from_dict on the json representation
+ channel_transfer_target_chat_model_dict = ChannelTransferTargetChat.from_dict(channel_transfer_target_chat_model_json).__dict__
+ channel_transfer_target_chat_model2 = ChannelTransferTargetChat(**channel_transfer_target_chat_model_dict)
+
+ # Verify the model instances are equivalent
+ assert channel_transfer_target_chat_model == channel_transfer_target_chat_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ channel_transfer_target_chat_model_json2 = channel_transfer_target_chat_model.to_dict()
+ assert channel_transfer_target_chat_model_json2 == channel_transfer_target_chat_model_json
+
+
+class TestModel_ClientAction:
+ """
+ Test Class for ClientAction
+ """
+
+ def test_client_action_serialization(self):
+ """
+ Test serialization/deserialization for ClientAction
+ """
+
+ # Construct a json representation of a ClientAction model
+ client_action_model_json = {}
+ client_action_model_json['name'] = 'testString'
+ client_action_model_json['result_variable'] = 'testString'
+ client_action_model_json['type'] = 'testString'
+ client_action_model_json['skill'] = 'main skill'
+ client_action_model_json['parameters'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of ClientAction by calling from_dict on the json representation
+ client_action_model = ClientAction.from_dict(client_action_model_json)
+ assert client_action_model != False
+
+ # Construct a model instance of ClientAction by calling from_dict on the json representation
+ client_action_model_dict = ClientAction.from_dict(client_action_model_json).__dict__
+ client_action_model2 = ClientAction(**client_action_model_dict)
+
+ # Verify the model instances are equivalent
+ assert client_action_model == client_action_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ client_action_model_json2 = client_action_model.to_dict()
+ assert client_action_model_json2 == client_action_model_json
+
+
+class TestModel_CreateAssistantReleaseImportResponse:
+ """
+ Test Class for CreateAssistantReleaseImportResponse
+ """
+
+ def test_create_assistant_release_import_response_serialization(self):
+ """
+ Test serialization/deserialization for CreateAssistantReleaseImportResponse
+ """
+
+ # Construct a json representation of a CreateAssistantReleaseImportResponse model
+ create_assistant_release_import_response_model_json = {}
+ create_assistant_release_import_response_model_json['skill_impact_in_draft'] = ['action']
+
+ # Construct a model instance of CreateAssistantReleaseImportResponse by calling from_dict on the json representation
+ create_assistant_release_import_response_model = CreateAssistantReleaseImportResponse.from_dict(create_assistant_release_import_response_model_json)
+ assert create_assistant_release_import_response_model != False
+
+ # Construct a model instance of CreateAssistantReleaseImportResponse by calling from_dict on the json representation
+ create_assistant_release_import_response_model_dict = CreateAssistantReleaseImportResponse.from_dict(create_assistant_release_import_response_model_json).__dict__
+ create_assistant_release_import_response_model2 = CreateAssistantReleaseImportResponse(**create_assistant_release_import_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert create_assistant_release_import_response_model == create_assistant_release_import_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ create_assistant_release_import_response_model_json2 = create_assistant_release_import_response_model.to_dict()
+ assert create_assistant_release_import_response_model_json2 == create_assistant_release_import_response_model_json
+
+
+class TestModel_CreateReleaseExportWithStatusErrors:
+ """
+ Test Class for CreateReleaseExportWithStatusErrors
+ """
+
+ def test_create_release_export_with_status_errors_serialization(self):
+ """
+ Test serialization/deserialization for CreateReleaseExportWithStatusErrors
+ """
+
+ # Construct a json representation of a CreateReleaseExportWithStatusErrors model
+ create_release_export_with_status_errors_model_json = {}
+
+ # Construct a model instance of CreateReleaseExportWithStatusErrors by calling from_dict on the json representation
+ create_release_export_with_status_errors_model = CreateReleaseExportWithStatusErrors.from_dict(create_release_export_with_status_errors_model_json)
+ assert create_release_export_with_status_errors_model != False
+
+ # Construct a model instance of CreateReleaseExportWithStatusErrors by calling from_dict on the json representation
+ create_release_export_with_status_errors_model_dict = CreateReleaseExportWithStatusErrors.from_dict(create_release_export_with_status_errors_model_json).__dict__
+ create_release_export_with_status_errors_model2 = CreateReleaseExportWithStatusErrors(**create_release_export_with_status_errors_model_dict)
+
+ # Verify the model instances are equivalent
+ assert create_release_export_with_status_errors_model == create_release_export_with_status_errors_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ create_release_export_with_status_errors_model_json2 = create_release_export_with_status_errors_model.to_dict()
+ assert create_release_export_with_status_errors_model_json2 == create_release_export_with_status_errors_model_json
+
+
+class TestModel_DialogLogMessage:
+ """
+ Test Class for DialogLogMessage
+ """
+
+ def test_dialog_log_message_serialization(self):
+ """
+ Test serialization/deserialization for DialogLogMessage
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ # Construct a json representation of a DialogLogMessage model
+ dialog_log_message_model_json = {}
+ dialog_log_message_model_json['level'] = 'info'
+ dialog_log_message_model_json['message'] = 'testString'
+ dialog_log_message_model_json['code'] = 'testString'
+ dialog_log_message_model_json['source'] = log_message_source_model
+
+ # Construct a model instance of DialogLogMessage by calling from_dict on the json representation
+ dialog_log_message_model = DialogLogMessage.from_dict(dialog_log_message_model_json)
+ assert dialog_log_message_model != False
+
+ # Construct a model instance of DialogLogMessage by calling from_dict on the json representation
+ dialog_log_message_model_dict = DialogLogMessage.from_dict(dialog_log_message_model_json).__dict__
+ dialog_log_message_model2 = DialogLogMessage(**dialog_log_message_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_log_message_model == dialog_log_message_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_log_message_model_json2 = dialog_log_message_model.to_dict()
+ assert dialog_log_message_model_json2 == dialog_log_message_model_json
+
+
+class TestModel_DialogNodeAction:
+ """
+ Test Class for DialogNodeAction
+ """
+
+ def test_dialog_node_action_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeAction
+ """
+
+ # Construct a json representation of a DialogNodeAction model
+ dialog_node_action_model_json = {}
+ dialog_node_action_model_json['name'] = 'testString'
+ dialog_node_action_model_json['type'] = 'client'
+ dialog_node_action_model_json['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model_json['result_variable'] = 'testString'
+ dialog_node_action_model_json['credentials'] = 'testString'
+
+ # Construct a model instance of DialogNodeAction by calling from_dict on the json representation
+ dialog_node_action_model = DialogNodeAction.from_dict(dialog_node_action_model_json)
+ assert dialog_node_action_model != False
+
+ # Construct a model instance of DialogNodeAction by calling from_dict on the json representation
+ dialog_node_action_model_dict = DialogNodeAction.from_dict(dialog_node_action_model_json).__dict__
+ dialog_node_action_model2 = DialogNodeAction(**dialog_node_action_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_action_model == dialog_node_action_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_action_model_json2 = dialog_node_action_model.to_dict()
+ assert dialog_node_action_model_json2 == dialog_node_action_model_json
+
+
+class TestModel_DialogNodeOutputConnectToAgentTransferInfo:
+ """
+ Test Class for DialogNodeOutputConnectToAgentTransferInfo
+ """
+
+ def test_dialog_node_output_connect_to_agent_transfer_info_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputConnectToAgentTransferInfo
+ """
+
+ # Construct a json representation of a DialogNodeOutputConnectToAgentTransferInfo model
+ dialog_node_output_connect_to_agent_transfer_info_model_json = {}
+ dialog_node_output_connect_to_agent_transfer_info_model_json['target'] = {'key1': {'anyKey': 'anyValue'}}
+
+ # Construct a model instance of DialogNodeOutputConnectToAgentTransferInfo by calling from_dict on the json representation
+ dialog_node_output_connect_to_agent_transfer_info_model = DialogNodeOutputConnectToAgentTransferInfo.from_dict(dialog_node_output_connect_to_agent_transfer_info_model_json)
+ assert dialog_node_output_connect_to_agent_transfer_info_model != False
+
+ # Construct a model instance of DialogNodeOutputConnectToAgentTransferInfo by calling from_dict on the json representation
+ dialog_node_output_connect_to_agent_transfer_info_model_dict = DialogNodeOutputConnectToAgentTransferInfo.from_dict(dialog_node_output_connect_to_agent_transfer_info_model_json).__dict__
+ dialog_node_output_connect_to_agent_transfer_info_model2 = DialogNodeOutputConnectToAgentTransferInfo(**dialog_node_output_connect_to_agent_transfer_info_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_connect_to_agent_transfer_info_model == dialog_node_output_connect_to_agent_transfer_info_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_connect_to_agent_transfer_info_model_json2 = dialog_node_output_connect_to_agent_transfer_info_model.to_dict()
+ assert dialog_node_output_connect_to_agent_transfer_info_model_json2 == dialog_node_output_connect_to_agent_transfer_info_model_json
+
+
+class TestModel_DialogNodeOutputOptionsElement:
+ """
+ Test Class for DialogNodeOutputOptionsElement
+ """
+
+ def test_dialog_node_output_options_element_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputOptionsElement
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue
+ dialog_node_output_options_element_value_model['input'] = message_input_model
+
+ # Construct a json representation of a DialogNodeOutputOptionsElement model
+ dialog_node_output_options_element_model_json = {}
+ dialog_node_output_options_element_model_json['label'] = 'testString'
+ dialog_node_output_options_element_model_json['value'] = dialog_node_output_options_element_value_model
+
+ # Construct a model instance of DialogNodeOutputOptionsElement by calling from_dict on the json representation
+ dialog_node_output_options_element_model = DialogNodeOutputOptionsElement.from_dict(dialog_node_output_options_element_model_json)
+ assert dialog_node_output_options_element_model != False
+
+ # Construct a model instance of DialogNodeOutputOptionsElement by calling from_dict on the json representation
+ dialog_node_output_options_element_model_dict = DialogNodeOutputOptionsElement.from_dict(dialog_node_output_options_element_model_json).__dict__
+ dialog_node_output_options_element_model2 = DialogNodeOutputOptionsElement(**dialog_node_output_options_element_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_options_element_model == dialog_node_output_options_element_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_options_element_model_json2 = dialog_node_output_options_element_model.to_dict()
+ assert dialog_node_output_options_element_model_json2 == dialog_node_output_options_element_model_json
+
+
+class TestModel_DialogNodeOutputOptionsElementValue:
+ """
+ Test Class for DialogNodeOutputOptionsElementValue
+ """
+
+ def test_dialog_node_output_options_element_value_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeOutputOptionsElementValue
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ # Construct a json representation of a DialogNodeOutputOptionsElementValue model
+ dialog_node_output_options_element_value_model_json = {}
+ dialog_node_output_options_element_value_model_json['input'] = message_input_model
+
+ # Construct a model instance of DialogNodeOutputOptionsElementValue by calling from_dict on the json representation
+ dialog_node_output_options_element_value_model = DialogNodeOutputOptionsElementValue.from_dict(dialog_node_output_options_element_value_model_json)
+ assert dialog_node_output_options_element_value_model != False
+
+ # Construct a model instance of DialogNodeOutputOptionsElementValue by calling from_dict on the json representation
+ dialog_node_output_options_element_value_model_dict = DialogNodeOutputOptionsElementValue.from_dict(dialog_node_output_options_element_value_model_json).__dict__
+ dialog_node_output_options_element_value_model2 = DialogNodeOutputOptionsElementValue(**dialog_node_output_options_element_value_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_output_options_element_value_model == dialog_node_output_options_element_value_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_output_options_element_value_model_json2 = dialog_node_output_options_element_value_model.to_dict()
+ assert dialog_node_output_options_element_value_model_json2 == dialog_node_output_options_element_value_model_json
+
+
+class TestModel_DialogNodeVisited:
+ """
+ Test Class for DialogNodeVisited
+ """
+
+ def test_dialog_node_visited_serialization(self):
+ """
+ Test serialization/deserialization for DialogNodeVisited
+ """
+
+ # Construct a json representation of a DialogNodeVisited model
+ dialog_node_visited_model_json = {}
+ dialog_node_visited_model_json['dialog_node'] = 'testString'
+ dialog_node_visited_model_json['title'] = 'testString'
+ dialog_node_visited_model_json['conditions'] = 'testString'
+
+ # Construct a model instance of DialogNodeVisited by calling from_dict on the json representation
+ dialog_node_visited_model = DialogNodeVisited.from_dict(dialog_node_visited_model_json)
+ assert dialog_node_visited_model != False
+
+ # Construct a model instance of DialogNodeVisited by calling from_dict on the json representation
+ dialog_node_visited_model_dict = DialogNodeVisited.from_dict(dialog_node_visited_model_json).__dict__
+ dialog_node_visited_model2 = DialogNodeVisited(**dialog_node_visited_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_node_visited_model == dialog_node_visited_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_node_visited_model_json2 = dialog_node_visited_model.to_dict()
+ assert dialog_node_visited_model_json2 == dialog_node_visited_model_json
+
+
+class TestModel_DialogSuggestion:
+ """
+ Test Class for DialogSuggestion
+ """
+
+ def test_dialog_suggestion_serialization(self):
+ """
+ Test serialization/deserialization for DialogSuggestion
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ dialog_suggestion_value_model = {} # DialogSuggestionValue
+ dialog_suggestion_value_model['input'] = message_input_model
+
+ # Construct a json representation of a DialogSuggestion model
+ dialog_suggestion_model_json = {}
+ dialog_suggestion_model_json['label'] = 'testString'
+ dialog_suggestion_model_json['value'] = dialog_suggestion_value_model
+ dialog_suggestion_model_json['output'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of DialogSuggestion by calling from_dict on the json representation
+ dialog_suggestion_model = DialogSuggestion.from_dict(dialog_suggestion_model_json)
+ assert dialog_suggestion_model != False
+
+ # Construct a model instance of DialogSuggestion by calling from_dict on the json representation
+ dialog_suggestion_model_dict = DialogSuggestion.from_dict(dialog_suggestion_model_json).__dict__
+ dialog_suggestion_model2 = DialogSuggestion(**dialog_suggestion_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_suggestion_model == dialog_suggestion_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_suggestion_model_json2 = dialog_suggestion_model.to_dict()
+ assert dialog_suggestion_model_json2 == dialog_suggestion_model_json
+
+
+class TestModel_DialogSuggestionValue:
+ """
+ Test Class for DialogSuggestionValue
+ """
+
+ def test_dialog_suggestion_value_serialization(self):
+ """
+ Test serialization/deserialization for DialogSuggestionValue
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ # Construct a json representation of a DialogSuggestionValue model
+ dialog_suggestion_value_model_json = {}
+ dialog_suggestion_value_model_json['input'] = message_input_model
+
+ # Construct a model instance of DialogSuggestionValue by calling from_dict on the json representation
+ dialog_suggestion_value_model = DialogSuggestionValue.from_dict(dialog_suggestion_value_model_json)
+ assert dialog_suggestion_value_model != False
+
+ # Construct a model instance of DialogSuggestionValue by calling from_dict on the json representation
+ dialog_suggestion_value_model_dict = DialogSuggestionValue.from_dict(dialog_suggestion_value_model_json).__dict__
+ dialog_suggestion_value_model2 = DialogSuggestionValue(**dialog_suggestion_value_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dialog_suggestion_value_model == dialog_suggestion_value_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dialog_suggestion_value_model_json2 = dialog_suggestion_value_model.to_dict()
+ assert dialog_suggestion_value_model_json2 == dialog_suggestion_value_model_json
+
+
+class TestModel_DtmfCommandInfo:
+ """
+ Test Class for DtmfCommandInfo
+ """
+
+ def test_dtmf_command_info_serialization(self):
+ """
+ Test serialization/deserialization for DtmfCommandInfo
+ """
+
+ # Construct a json representation of a DtmfCommandInfo model
+ dtmf_command_info_model_json = {}
+ dtmf_command_info_model_json['type'] = 'collect'
+ dtmf_command_info_model_json['parameters'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of DtmfCommandInfo by calling from_dict on the json representation
+ dtmf_command_info_model = DtmfCommandInfo.from_dict(dtmf_command_info_model_json)
+ assert dtmf_command_info_model != False
+
+ # Construct a model instance of DtmfCommandInfo by calling from_dict on the json representation
+ dtmf_command_info_model_dict = DtmfCommandInfo.from_dict(dtmf_command_info_model_json).__dict__
+ dtmf_command_info_model2 = DtmfCommandInfo(**dtmf_command_info_model_dict)
+
+ # Verify the model instances are equivalent
+ assert dtmf_command_info_model == dtmf_command_info_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ dtmf_command_info_model_json2 = dtmf_command_info_model.to_dict()
+ assert dtmf_command_info_model_json2 == dtmf_command_info_model_json
+
+
+class TestModel_Environment:
+ """
+ Test Class for Environment
+ """
+
+ def test_environment_serialization(self):
+ """
+ Test serialization/deserialization for Environment
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ base_environment_orchestration_model = {} # BaseEnvironmentOrchestration
+ base_environment_orchestration_model['search_skill_fallback'] = True
+
+ environment_skill_model = {} # EnvironmentSkill
+ environment_skill_model['skill_id'] = 'testString'
+ environment_skill_model['type'] = 'dialog'
+ environment_skill_model['disabled'] = True
+ environment_skill_model['snapshot'] = 'testString'
+ environment_skill_model['skill_reference'] = 'testString'
+
+ # Construct a json representation of a Environment model
+ environment_model_json = {}
+ environment_model_json['name'] = 'testString'
+ environment_model_json['description'] = 'testString'
+ environment_model_json['orchestration'] = base_environment_orchestration_model
+ environment_model_json['session_timeout'] = 10
+ environment_model_json['skill_references'] = [environment_skill_model]
+
+ # Construct a model instance of Environment by calling from_dict on the json representation
+ environment_model = Environment.from_dict(environment_model_json)
+ assert environment_model != False
+
+ # Construct a model instance of Environment by calling from_dict on the json representation
+ environment_model_dict = Environment.from_dict(environment_model_json).__dict__
+ environment_model2 = Environment(**environment_model_dict)
+
+ # Verify the model instances are equivalent
+ assert environment_model == environment_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ environment_model_json2 = environment_model.to_dict()
+ assert environment_model_json2 == environment_model_json
+
+
+class TestModel_EnvironmentCollection:
+ """
+ Test Class for EnvironmentCollection
+ """
+
+ def test_environment_collection_serialization(self):
+ """
+ Test serialization/deserialization for EnvironmentCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ base_environment_orchestration_model = {} # BaseEnvironmentOrchestration
+ base_environment_orchestration_model['search_skill_fallback'] = True
+
+ environment_skill_model = {} # EnvironmentSkill
+ environment_skill_model['skill_id'] = 'testString'
+ environment_skill_model['type'] = 'dialog'
+ environment_skill_model['disabled'] = True
+ environment_skill_model['snapshot'] = 'testString'
+ environment_skill_model['skill_reference'] = 'testString'
+
+ environment_model = {} # Environment
+ environment_model['name'] = 'testString'
+ environment_model['description'] = 'testString'
+ environment_model['orchestration'] = base_environment_orchestration_model
+ environment_model['session_timeout'] = 10
+ environment_model['skill_references'] = [environment_skill_model]
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a EnvironmentCollection model
+ environment_collection_model_json = {}
+ environment_collection_model_json['environments'] = [environment_model]
+ environment_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of EnvironmentCollection by calling from_dict on the json representation
+ environment_collection_model = EnvironmentCollection.from_dict(environment_collection_model_json)
+ assert environment_collection_model != False
+
+ # Construct a model instance of EnvironmentCollection by calling from_dict on the json representation
+ environment_collection_model_dict = EnvironmentCollection.from_dict(environment_collection_model_json).__dict__
+ environment_collection_model2 = EnvironmentCollection(**environment_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert environment_collection_model == environment_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ environment_collection_model_json2 = environment_collection_model.to_dict()
+ assert environment_collection_model_json2 == environment_collection_model_json
+
+
+class TestModel_EnvironmentReference:
+ """
+ Test Class for EnvironmentReference
+ """
+
+ def test_environment_reference_serialization(self):
+ """
+ Test serialization/deserialization for EnvironmentReference
+ """
+
+ # Construct a json representation of a EnvironmentReference model
+ environment_reference_model_json = {}
+ environment_reference_model_json['name'] = 'testString'
+
+ # Construct a model instance of EnvironmentReference by calling from_dict on the json representation
+ environment_reference_model = EnvironmentReference.from_dict(environment_reference_model_json)
+ assert environment_reference_model != False
+
+ # Construct a model instance of EnvironmentReference by calling from_dict on the json representation
+ environment_reference_model_dict = EnvironmentReference.from_dict(environment_reference_model_json).__dict__
+ environment_reference_model2 = EnvironmentReference(**environment_reference_model_dict)
+
+ # Verify the model instances are equivalent
+ assert environment_reference_model == environment_reference_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ environment_reference_model_json2 = environment_reference_model.to_dict()
+ assert environment_reference_model_json2 == environment_reference_model_json
+
+
+class TestModel_EnvironmentSkill:
+ """
+ Test Class for EnvironmentSkill
+ """
+
+ def test_environment_skill_serialization(self):
+ """
+ Test serialization/deserialization for EnvironmentSkill
+ """
+
+ # Construct a json representation of a EnvironmentSkill model
+ environment_skill_model_json = {}
+ environment_skill_model_json['skill_id'] = 'testString'
+ environment_skill_model_json['type'] = 'dialog'
+ environment_skill_model_json['disabled'] = True
+ environment_skill_model_json['snapshot'] = 'testString'
+ environment_skill_model_json['skill_reference'] = 'testString'
+
+ # Construct a model instance of EnvironmentSkill by calling from_dict on the json representation
+ environment_skill_model = EnvironmentSkill.from_dict(environment_skill_model_json)
+ assert environment_skill_model != False
+
+ # Construct a model instance of EnvironmentSkill by calling from_dict on the json representation
+ environment_skill_model_dict = EnvironmentSkill.from_dict(environment_skill_model_json).__dict__
+ environment_skill_model2 = EnvironmentSkill(**environment_skill_model_dict)
+
+ # Verify the model instances are equivalent
+ assert environment_skill_model == environment_skill_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ environment_skill_model_json2 = environment_skill_model.to_dict()
+ assert environment_skill_model_json2 == environment_skill_model_json
+
+
+class TestModel_FinalResponse:
+ """
+ Test Class for FinalResponse
+ """
+
+ def test_final_response_serialization(self):
+ """
+ Test serialization/deserialization for FinalResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ message_stream_metadata_model = {} # MessageStreamMetadata
+ message_stream_metadata_model['streaming_metadata'] = metadata_model
+
+ final_response_output_model = {} # FinalResponseOutput
+ final_response_output_model['generic'] = [runtime_response_generic_model]
+ final_response_output_model['intents'] = [runtime_intent_model]
+ final_response_output_model['entities'] = [runtime_entity_model]
+ final_response_output_model['actions'] = [dialog_node_action_model]
+ final_response_output_model['debug'] = message_output_debug_model
+ final_response_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ final_response_output_model['spelling'] = message_output_spelling_model
+ final_response_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+ final_response_output_model['streaming_metadata'] = message_stream_metadata_model
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ message_context_global_model = {} # MessageContextGlobal
+ message_context_global_model['system'] = message_context_global_system_model
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ message_context_action_skill_model = {} # MessageContextActionSkill
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ message_context_skills_model = {} # MessageContextSkills
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ message_context_model = {} # MessageContext
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ message_output_model = {} # MessageOutput
+ message_output_model['generic'] = [runtime_response_generic_model]
+ message_output_model['intents'] = [runtime_intent_model]
+ message_output_model['entities'] = [runtime_entity_model]
+ message_output_model['actions'] = [dialog_node_action_model]
+ message_output_model['debug'] = message_output_debug_model
+ message_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_output_model['spelling'] = message_output_spelling_model
+ message_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ # Construct a json representation of a FinalResponse model
+ final_response_model_json = {}
+ final_response_model_json['output'] = final_response_output_model
+ final_response_model_json['context'] = message_context_model
+ final_response_model_json['user_id'] = 'testString'
+ final_response_model_json['masked_output'] = message_output_model
+ final_response_model_json['masked_input'] = message_input_model
+
+ # Construct a model instance of FinalResponse by calling from_dict on the json representation
+ final_response_model = FinalResponse.from_dict(final_response_model_json)
+ assert final_response_model != False
+
+ # Construct a model instance of FinalResponse by calling from_dict on the json representation
+ final_response_model_dict = FinalResponse.from_dict(final_response_model_json).__dict__
+ final_response_model2 = FinalResponse(**final_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert final_response_model == final_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ final_response_model_json2 = final_response_model.to_dict()
+ assert final_response_model_json2 == final_response_model_json
+
+
+class TestModel_FinalResponseOutput:
+ """
+ Test Class for FinalResponseOutput
+ """
+
+ def test_final_response_output_serialization(self):
+ """
+ Test serialization/deserialization for FinalResponseOutput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ message_stream_metadata_model = {} # MessageStreamMetadata
+ message_stream_metadata_model['streaming_metadata'] = metadata_model
+
+ # Construct a json representation of a FinalResponseOutput model
+ final_response_output_model_json = {}
+ final_response_output_model_json['generic'] = [runtime_response_generic_model]
+ final_response_output_model_json['intents'] = [runtime_intent_model]
+ final_response_output_model_json['entities'] = [runtime_entity_model]
+ final_response_output_model_json['actions'] = [dialog_node_action_model]
+ final_response_output_model_json['debug'] = message_output_debug_model
+ final_response_output_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ final_response_output_model_json['spelling'] = message_output_spelling_model
+ final_response_output_model_json['llm_metadata'] = [message_output_llm_metadata_model]
+ final_response_output_model_json['streaming_metadata'] = message_stream_metadata_model
+
+ # Construct a model instance of FinalResponseOutput by calling from_dict on the json representation
+ final_response_output_model = FinalResponseOutput.from_dict(final_response_output_model_json)
+ assert final_response_output_model != False
+
+ # Construct a model instance of FinalResponseOutput by calling from_dict on the json representation
+ final_response_output_model_dict = FinalResponseOutput.from_dict(final_response_output_model_json).__dict__
+ final_response_output_model2 = FinalResponseOutput(**final_response_output_model_dict)
+
+ # Verify the model instances are equivalent
+ assert final_response_output_model == final_response_output_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ final_response_output_model_json2 = final_response_output_model.to_dict()
+ assert final_response_output_model_json2 == final_response_output_model_json
+
+
+class TestModel_GenerativeAITaskConfidenceScores:
+ """
+ Test Class for GenerativeAITaskConfidenceScores
+ """
+
+ def test_generative_ai_task_confidence_scores_serialization(self):
+ """
+ Test serialization/deserialization for GenerativeAITaskConfidenceScores
+ """
+
+ # Construct a json representation of a GenerativeAITaskConfidenceScores model
+ generative_ai_task_confidence_scores_model_json = {}
+ generative_ai_task_confidence_scores_model_json['pre_gen'] = 72.5
+ generative_ai_task_confidence_scores_model_json['pre_gen_threshold'] = 72.5
+ generative_ai_task_confidence_scores_model_json['post_gen'] = 72.5
+ generative_ai_task_confidence_scores_model_json['post_gen_threshold'] = 72.5
+
+ # Construct a model instance of GenerativeAITaskConfidenceScores by calling from_dict on the json representation
+ generative_ai_task_confidence_scores_model = GenerativeAITaskConfidenceScores.from_dict(generative_ai_task_confidence_scores_model_json)
+ assert generative_ai_task_confidence_scores_model != False
+
+ # Construct a model instance of GenerativeAITaskConfidenceScores by calling from_dict on the json representation
+ generative_ai_task_confidence_scores_model_dict = GenerativeAITaskConfidenceScores.from_dict(generative_ai_task_confidence_scores_model_json).__dict__
+ generative_ai_task_confidence_scores_model2 = GenerativeAITaskConfidenceScores(**generative_ai_task_confidence_scores_model_dict)
+
+ # Verify the model instances are equivalent
+ assert generative_ai_task_confidence_scores_model == generative_ai_task_confidence_scores_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ generative_ai_task_confidence_scores_model_json2 = generative_ai_task_confidence_scores_model.to_dict()
+ assert generative_ai_task_confidence_scores_model_json2 == generative_ai_task_confidence_scores_model_json
+
+
+class TestModel_IntegrationReference:
+ """
+ Test Class for IntegrationReference
+ """
+
+ def test_integration_reference_serialization(self):
+ """
+ Test serialization/deserialization for IntegrationReference
+ """
+
+ # Construct a json representation of a IntegrationReference model
+ integration_reference_model_json = {}
+ integration_reference_model_json['integration_id'] = 'testString'
+ integration_reference_model_json['type'] = 'testString'
+
+ # Construct a model instance of IntegrationReference by calling from_dict on the json representation
+ integration_reference_model = IntegrationReference.from_dict(integration_reference_model_json)
+ assert integration_reference_model != False
+
+ # Construct a model instance of IntegrationReference by calling from_dict on the json representation
+ integration_reference_model_dict = IntegrationReference.from_dict(integration_reference_model_json).__dict__
+ integration_reference_model2 = IntegrationReference(**integration_reference_model_dict)
+
+ # Verify the model instances are equivalent
+ assert integration_reference_model == integration_reference_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ integration_reference_model_json2 = integration_reference_model.to_dict()
+ assert integration_reference_model_json2 == integration_reference_model_json
+
+
+class TestModel_Log:
+ """
+ Test Class for Log
+ """
+
+ def test_log_serialization(self):
+ """
+ Test serialization/deserialization for Log
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ log_request_input_model = {} # LogRequestInput
+ log_request_input_model['message_type'] = 'text'
+ log_request_input_model['text'] = 'testString'
+ log_request_input_model['intents'] = [runtime_intent_model]
+ log_request_input_model['entities'] = [runtime_entity_model]
+ log_request_input_model['suggestion_id'] = 'testString'
+ log_request_input_model['attachments'] = [message_input_attachment_model]
+ log_request_input_model['analytics'] = request_analytics_model
+ log_request_input_model['options'] = message_input_options_model
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ message_context_global_model = {} # MessageContextGlobal
+ message_context_global_model['system'] = message_context_global_system_model
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ message_context_action_skill_model = {} # MessageContextActionSkill
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ message_context_skills_model = {} # MessageContextSkills
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ message_context_model = {} # MessageContext
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ log_request_model = {} # LogRequest
+ log_request_model['input'] = log_request_input_model
+ log_request_model['context'] = message_context_model
+ log_request_model['user_id'] = 'testString'
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ log_response_output_model = {} # LogResponseOutput
+ log_response_output_model['generic'] = [runtime_response_generic_model]
+ log_response_output_model['intents'] = [runtime_intent_model]
+ log_response_output_model['entities'] = [runtime_entity_model]
+ log_response_output_model['actions'] = [dialog_node_action_model]
+ log_response_output_model['debug'] = message_output_debug_model
+ log_response_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ log_response_output_model['spelling'] = message_output_spelling_model
+ log_response_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+
+ log_response_model = {} # LogResponse
+ log_response_model['output'] = log_response_output_model
+ log_response_model['context'] = message_context_model
+ log_response_model['user_id'] = 'testString'
+
+ # Construct a json representation of a Log model
+ log_model_json = {}
+ log_model_json['log_id'] = 'testString'
+ log_model_json['request'] = log_request_model
+ log_model_json['response'] = log_response_model
+ log_model_json['assistant_id'] = 'testString'
+ log_model_json['session_id'] = 'testString'
+ log_model_json['skill_id'] = 'testString'
+ log_model_json['snapshot'] = 'testString'
+ log_model_json['request_timestamp'] = 'testString'
+ log_model_json['response_timestamp'] = 'testString'
+ log_model_json['language'] = 'testString'
+ log_model_json['customer_id'] = 'testString'
+
+ # Construct a model instance of Log by calling from_dict on the json representation
+ log_model = Log.from_dict(log_model_json)
+ assert log_model != False
+
+ # Construct a model instance of Log by calling from_dict on the json representation
+ log_model_dict = Log.from_dict(log_model_json).__dict__
+ log_model2 = Log(**log_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_model == log_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_model_json2 = log_model.to_dict()
+ assert log_model_json2 == log_model_json
+
+
+class TestModel_LogCollection:
+ """
+ Test Class for LogCollection
+ """
+
+ def test_log_collection_serialization(self):
+ """
+ Test serialization/deserialization for LogCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ log_request_input_model = {} # LogRequestInput
+ log_request_input_model['message_type'] = 'text'
+ log_request_input_model['text'] = 'testString'
+ log_request_input_model['intents'] = [runtime_intent_model]
+ log_request_input_model['entities'] = [runtime_entity_model]
+ log_request_input_model['suggestion_id'] = 'testString'
+ log_request_input_model['attachments'] = [message_input_attachment_model]
+ log_request_input_model['analytics'] = request_analytics_model
+ log_request_input_model['options'] = message_input_options_model
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ message_context_global_model = {} # MessageContextGlobal
+ message_context_global_model['system'] = message_context_global_system_model
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ message_context_action_skill_model = {} # MessageContextActionSkill
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ message_context_skills_model = {} # MessageContextSkills
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ message_context_model = {} # MessageContext
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ log_request_model = {} # LogRequest
+ log_request_model['input'] = log_request_input_model
+ log_request_model['context'] = message_context_model
+ log_request_model['user_id'] = 'testString'
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ log_response_output_model = {} # LogResponseOutput
+ log_response_output_model['generic'] = [runtime_response_generic_model]
+ log_response_output_model['intents'] = [runtime_intent_model]
+ log_response_output_model['entities'] = [runtime_entity_model]
+ log_response_output_model['actions'] = [dialog_node_action_model]
+ log_response_output_model['debug'] = message_output_debug_model
+ log_response_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ log_response_output_model['spelling'] = message_output_spelling_model
+ log_response_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+
+ log_response_model = {} # LogResponse
+ log_response_model['output'] = log_response_output_model
+ log_response_model['context'] = message_context_model
+ log_response_model['user_id'] = 'testString'
+
+ log_model = {} # Log
+ log_model['log_id'] = 'testString'
+ log_model['request'] = log_request_model
+ log_model['response'] = log_response_model
+ log_model['assistant_id'] = 'testString'
+ log_model['session_id'] = 'testString'
+ log_model['skill_id'] = 'testString'
+ log_model['snapshot'] = 'testString'
+ log_model['request_timestamp'] = 'testString'
+ log_model['response_timestamp'] = 'testString'
+ log_model['language'] = 'testString'
+ log_model['customer_id'] = 'testString'
+
+ log_pagination_model = {} # LogPagination
+ log_pagination_model['next_url'] = 'testString'
+ log_pagination_model['matched'] = 38
+ log_pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a LogCollection model
+ log_collection_model_json = {}
+ log_collection_model_json['logs'] = [log_model]
+ log_collection_model_json['pagination'] = log_pagination_model
+
+ # Construct a model instance of LogCollection by calling from_dict on the json representation
+ log_collection_model = LogCollection.from_dict(log_collection_model_json)
+ assert log_collection_model != False
+
+ # Construct a model instance of LogCollection by calling from_dict on the json representation
+ log_collection_model_dict = LogCollection.from_dict(log_collection_model_json).__dict__
+ log_collection_model2 = LogCollection(**log_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_collection_model == log_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_collection_model_json2 = log_collection_model.to_dict()
+ assert log_collection_model_json2 == log_collection_model_json
+
+
+class TestModel_LogPagination:
+ """
+ Test Class for LogPagination
+ """
+
+ def test_log_pagination_serialization(self):
+ """
+ Test serialization/deserialization for LogPagination
+ """
+
+ # Construct a json representation of a LogPagination model
+ log_pagination_model_json = {}
+ log_pagination_model_json['next_url'] = 'testString'
+ log_pagination_model_json['matched'] = 38
+ log_pagination_model_json['next_cursor'] = 'testString'
+
+ # Construct a model instance of LogPagination by calling from_dict on the json representation
+ log_pagination_model = LogPagination.from_dict(log_pagination_model_json)
+ assert log_pagination_model != False
+
+ # Construct a model instance of LogPagination by calling from_dict on the json representation
+ log_pagination_model_dict = LogPagination.from_dict(log_pagination_model_json).__dict__
+ log_pagination_model2 = LogPagination(**log_pagination_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_pagination_model == log_pagination_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_pagination_model_json2 = log_pagination_model.to_dict()
+ assert log_pagination_model_json2 == log_pagination_model_json
+
+
+class TestModel_LogRequest:
+ """
+ Test Class for LogRequest
+ """
+
+ def test_log_request_serialization(self):
+ """
+ Test serialization/deserialization for LogRequest
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = True
+ message_input_options_model['export'] = True
+
+ log_request_input_model = {} # LogRequestInput
+ log_request_input_model['message_type'] = 'text'
+ log_request_input_model['text'] = 'Hello'
+ log_request_input_model['intents'] = [runtime_intent_model]
+ log_request_input_model['entities'] = [runtime_entity_model]
+ log_request_input_model['suggestion_id'] = 'testString'
+ log_request_input_model['attachments'] = [message_input_attachment_model]
+ log_request_input_model['analytics'] = request_analytics_model
+ log_request_input_model['options'] = message_input_options_model
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'my_user_id'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ message_context_global_model = {} # MessageContextGlobal
+ message_context_global_model['system'] = message_context_global_system_model
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ message_context_action_skill_model = {} # MessageContextActionSkill
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ message_context_skills_model = {} # MessageContextSkills
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ message_context_model = {} # MessageContext
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ # Construct a json representation of a LogRequest model
+ log_request_model_json = {}
+ log_request_model_json['input'] = log_request_input_model
+ log_request_model_json['context'] = message_context_model
+ log_request_model_json['user_id'] = 'testString'
+
+ # Construct a model instance of LogRequest by calling from_dict on the json representation
+ log_request_model = LogRequest.from_dict(log_request_model_json)
+ assert log_request_model != False
+
+ # Construct a model instance of LogRequest by calling from_dict on the json representation
+ log_request_model_dict = LogRequest.from_dict(log_request_model_json).__dict__
+ log_request_model2 = LogRequest(**log_request_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_request_model == log_request_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_request_model_json2 = log_request_model.to_dict()
+ assert log_request_model_json2 == log_request_model_json
+
+
+class TestModel_LogRequestInput:
+ """
+ Test Class for LogRequestInput
+ """
+
+ def test_log_request_input_serialization(self):
+ """
+ Test serialization/deserialization for LogRequestInput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ # Construct a json representation of a LogRequestInput model
+ log_request_input_model_json = {}
+ log_request_input_model_json['message_type'] = 'text'
+ log_request_input_model_json['text'] = 'testString'
+ log_request_input_model_json['intents'] = [runtime_intent_model]
+ log_request_input_model_json['entities'] = [runtime_entity_model]
+ log_request_input_model_json['suggestion_id'] = 'testString'
+ log_request_input_model_json['attachments'] = [message_input_attachment_model]
+ log_request_input_model_json['analytics'] = request_analytics_model
+ log_request_input_model_json['options'] = message_input_options_model
+
+ # Construct a model instance of LogRequestInput by calling from_dict on the json representation
+ log_request_input_model = LogRequestInput.from_dict(log_request_input_model_json)
+ assert log_request_input_model != False
+
+ # Construct a model instance of LogRequestInput by calling from_dict on the json representation
+ log_request_input_model_dict = LogRequestInput.from_dict(log_request_input_model_json).__dict__
+ log_request_input_model2 = LogRequestInput(**log_request_input_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_request_input_model == log_request_input_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_request_input_model_json2 = log_request_input_model.to_dict()
+ assert log_request_input_model_json2 == log_request_input_model_json
+
+
+class TestModel_LogResponse:
+ """
+ Test Class for LogResponse
+ """
+
+ def test_log_response_serialization(self):
+ """
+ Test serialization/deserialization for LogResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ log_response_output_model = {} # LogResponseOutput
+ log_response_output_model['generic'] = [runtime_response_generic_model]
+ log_response_output_model['intents'] = [runtime_intent_model]
+ log_response_output_model['entities'] = [runtime_entity_model]
+ log_response_output_model['actions'] = [dialog_node_action_model]
+ log_response_output_model['debug'] = message_output_debug_model
+ log_response_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ log_response_output_model['spelling'] = message_output_spelling_model
+ log_response_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ message_context_global_model = {} # MessageContextGlobal
+ message_context_global_model['system'] = message_context_global_system_model
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ message_context_action_skill_model = {} # MessageContextActionSkill
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ message_context_skills_model = {} # MessageContextSkills
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ message_context_model = {} # MessageContext
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ # Construct a json representation of a LogResponse model
+ log_response_model_json = {}
+ log_response_model_json['output'] = log_response_output_model
+ log_response_model_json['context'] = message_context_model
+ log_response_model_json['user_id'] = 'testString'
+
+ # Construct a model instance of LogResponse by calling from_dict on the json representation
+ log_response_model = LogResponse.from_dict(log_response_model_json)
+ assert log_response_model != False
+
+ # Construct a model instance of LogResponse by calling from_dict on the json representation
+ log_response_model_dict = LogResponse.from_dict(log_response_model_json).__dict__
+ log_response_model2 = LogResponse(**log_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_response_model == log_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_response_model_json2 = log_response_model.to_dict()
+ assert log_response_model_json2 == log_response_model_json
+
+
+class TestModel_LogResponseOutput:
+ """
+ Test Class for LogResponseOutput
+ """
+
+ def test_log_response_output_serialization(self):
+ """
+ Test serialization/deserialization for LogResponseOutput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ # Construct a json representation of a LogResponseOutput model
+ log_response_output_model_json = {}
+ log_response_output_model_json['generic'] = [runtime_response_generic_model]
+ log_response_output_model_json['intents'] = [runtime_intent_model]
+ log_response_output_model_json['entities'] = [runtime_entity_model]
+ log_response_output_model_json['actions'] = [dialog_node_action_model]
+ log_response_output_model_json['debug'] = message_output_debug_model
+ log_response_output_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ log_response_output_model_json['spelling'] = message_output_spelling_model
+ log_response_output_model_json['llm_metadata'] = [message_output_llm_metadata_model]
+
+ # Construct a model instance of LogResponseOutput by calling from_dict on the json representation
+ log_response_output_model = LogResponseOutput.from_dict(log_response_output_model_json)
+ assert log_response_output_model != False
+
+ # Construct a model instance of LogResponseOutput by calling from_dict on the json representation
+ log_response_output_model_dict = LogResponseOutput.from_dict(log_response_output_model_json).__dict__
+ log_response_output_model2 = LogResponseOutput(**log_response_output_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_response_output_model == log_response_output_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_response_output_model_json2 = log_response_output_model.to_dict()
+ assert log_response_output_model_json2 == log_response_output_model_json
+
+
+class TestModel_MessageContext:
+ """
+ Test Class for MessageContext
+ """
+
+ def test_message_context_serialization(self):
+ """
+ Test serialization/deserialization for MessageContext
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ message_context_global_model = {} # MessageContextGlobal
+ message_context_global_model['system'] = message_context_global_system_model
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ message_context_action_skill_model = {} # MessageContextActionSkill
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ message_context_skills_model = {} # MessageContextSkills
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ # Construct a json representation of a MessageContext model
+ message_context_model_json = {}
+ message_context_model_json['global'] = message_context_global_model
+ message_context_model_json['skills'] = message_context_skills_model
+ message_context_model_json['integrations'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of MessageContext by calling from_dict on the json representation
+ message_context_model = MessageContext.from_dict(message_context_model_json)
+ assert message_context_model != False
+
+ # Construct a model instance of MessageContext by calling from_dict on the json representation
+ message_context_model_dict = MessageContext.from_dict(message_context_model_json).__dict__
+ message_context_model2 = MessageContext(**message_context_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_context_model == message_context_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_context_model_json2 = message_context_model.to_dict()
+ assert message_context_model_json2 == message_context_model_json
+
+
+class TestModel_MessageContextActionSkill:
+ """
+ Test Class for MessageContextActionSkill
+ """
+
+ def test_message_context_action_skill_serialization(self):
+ """
+ Test serialization/deserialization for MessageContextActionSkill
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ # Construct a json representation of a MessageContextActionSkill model
+ message_context_action_skill_model_json = {}
+ message_context_action_skill_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model_json['system'] = message_context_skill_system_model
+ message_context_action_skill_model_json['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model_json['skill_variables'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of MessageContextActionSkill by calling from_dict on the json representation
+ message_context_action_skill_model = MessageContextActionSkill.from_dict(message_context_action_skill_model_json)
+ assert message_context_action_skill_model != False
+
+ # Construct a model instance of MessageContextActionSkill by calling from_dict on the json representation
+ message_context_action_skill_model_dict = MessageContextActionSkill.from_dict(message_context_action_skill_model_json).__dict__
+ message_context_action_skill_model2 = MessageContextActionSkill(**message_context_action_skill_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_context_action_skill_model == message_context_action_skill_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_context_action_skill_model_json2 = message_context_action_skill_model.to_dict()
+ assert message_context_action_skill_model_json2 == message_context_action_skill_model_json
+
+
+class TestModel_MessageContextDialogSkill:
+ """
+ Test Class for MessageContextDialogSkill
+ """
+
+ def test_message_context_dialog_skill_serialization(self):
+ """
+ Test serialization/deserialization for MessageContextDialogSkill
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ # Construct a json representation of a MessageContextDialogSkill model
+ message_context_dialog_skill_model_json = {}
+ message_context_dialog_skill_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model_json['system'] = message_context_skill_system_model
+
+ # Construct a model instance of MessageContextDialogSkill by calling from_dict on the json representation
+ message_context_dialog_skill_model = MessageContextDialogSkill.from_dict(message_context_dialog_skill_model_json)
+ assert message_context_dialog_skill_model != False
+
+ # Construct a model instance of MessageContextDialogSkill by calling from_dict on the json representation
+ message_context_dialog_skill_model_dict = MessageContextDialogSkill.from_dict(message_context_dialog_skill_model_json).__dict__
+ message_context_dialog_skill_model2 = MessageContextDialogSkill(**message_context_dialog_skill_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_context_dialog_skill_model == message_context_dialog_skill_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_context_dialog_skill_model_json2 = message_context_dialog_skill_model.to_dict()
+ assert message_context_dialog_skill_model_json2 == message_context_dialog_skill_model_json
+
+
+class TestModel_MessageContextGlobal:
+ """
+ Test Class for MessageContextGlobal
+ """
+
+ def test_message_context_global_serialization(self):
+ """
+ Test serialization/deserialization for MessageContextGlobal
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ # Construct a json representation of a MessageContextGlobal model
+ message_context_global_model_json = {}
+ message_context_global_model_json['system'] = message_context_global_system_model
+
+ # Construct a model instance of MessageContextGlobal by calling from_dict on the json representation
+ message_context_global_model = MessageContextGlobal.from_dict(message_context_global_model_json)
+ assert message_context_global_model != False
+
+ # Construct a model instance of MessageContextGlobal by calling from_dict on the json representation
+ message_context_global_model_dict = MessageContextGlobal.from_dict(message_context_global_model_json).__dict__
+ message_context_global_model2 = MessageContextGlobal(**message_context_global_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_context_global_model == message_context_global_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_context_global_model_json2 = message_context_global_model.to_dict()
+ assert message_context_global_model_json2 == message_context_global_model_json
+
+
+class TestModel_MessageContextGlobalSystem:
+ """
+ Test Class for MessageContextGlobalSystem
+ """
+
+ def test_message_context_global_system_serialization(self):
+ """
+ Test serialization/deserialization for MessageContextGlobalSystem
+ """
+
+ # Construct a json representation of a MessageContextGlobalSystem model
+ message_context_global_system_model_json = {}
+ message_context_global_system_model_json['timezone'] = 'testString'
+ message_context_global_system_model_json['user_id'] = 'testString'
+ message_context_global_system_model_json['turn_count'] = 38
+ message_context_global_system_model_json['locale'] = 'en-us'
+ message_context_global_system_model_json['reference_time'] = 'testString'
+ message_context_global_system_model_json['session_start_time'] = 'testString'
+ message_context_global_system_model_json['state'] = 'testString'
+ message_context_global_system_model_json['skip_user_input'] = True
+
+ # Construct a model instance of MessageContextGlobalSystem by calling from_dict on the json representation
+ message_context_global_system_model = MessageContextGlobalSystem.from_dict(message_context_global_system_model_json)
+ assert message_context_global_system_model != False
+
+ # Construct a model instance of MessageContextGlobalSystem by calling from_dict on the json representation
+ message_context_global_system_model_dict = MessageContextGlobalSystem.from_dict(message_context_global_system_model_json).__dict__
+ message_context_global_system_model2 = MessageContextGlobalSystem(**message_context_global_system_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_context_global_system_model == message_context_global_system_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_context_global_system_model_json2 = message_context_global_system_model.to_dict()
+ assert message_context_global_system_model_json2 == message_context_global_system_model_json
+
+
+class TestModel_MessageContextSkillSystem:
+ """
+ Test Class for MessageContextSkillSystem
+ """
+
+ def test_message_context_skill_system_serialization(self):
+ """
+ Test serialization/deserialization for MessageContextSkillSystem
+ """
+
+ # Construct a json representation of a MessageContextSkillSystem model
+ message_context_skill_system_model_json = {}
+ message_context_skill_system_model_json['state'] = 'testString'
+ message_context_skill_system_model_json['foo'] = 'testString'
+
+ # Construct a model instance of MessageContextSkillSystem by calling from_dict on the json representation
+ message_context_skill_system_model = MessageContextSkillSystem.from_dict(message_context_skill_system_model_json)
+ assert message_context_skill_system_model != False
+
+ # Construct a model instance of MessageContextSkillSystem by calling from_dict on the json representation
+ message_context_skill_system_model_dict = MessageContextSkillSystem.from_dict(message_context_skill_system_model_json).__dict__
+ message_context_skill_system_model2 = MessageContextSkillSystem(**message_context_skill_system_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_context_skill_system_model == message_context_skill_system_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_context_skill_system_model_json2 = message_context_skill_system_model.to_dict()
+ assert message_context_skill_system_model_json2 == message_context_skill_system_model_json
+
+ # Test get_properties and set_properties methods.
+ message_context_skill_system_model.set_properties({})
+ actual_dict = message_context_skill_system_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': 'testString'}
+ message_context_skill_system_model.set_properties(expected_dict)
+ actual_dict = message_context_skill_system_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_MessageContextSkills:
+ """
+ Test Class for MessageContextSkills
+ """
+
+ def test_message_context_skills_serialization(self):
+ """
+ Test serialization/deserialization for MessageContextSkills
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ message_context_action_skill_model = {} # MessageContextActionSkill
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ # Construct a json representation of a MessageContextSkills model
+ message_context_skills_model_json = {}
+ message_context_skills_model_json['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model_json['actions skill'] = message_context_action_skill_model
+
+ # Construct a model instance of MessageContextSkills by calling from_dict on the json representation
+ message_context_skills_model = MessageContextSkills.from_dict(message_context_skills_model_json)
+ assert message_context_skills_model != False
+
+ # Construct a model instance of MessageContextSkills by calling from_dict on the json representation
+ message_context_skills_model_dict = MessageContextSkills.from_dict(message_context_skills_model_json).__dict__
+ message_context_skills_model2 = MessageContextSkills(**message_context_skills_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_context_skills_model == message_context_skills_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_context_skills_model_json2 = message_context_skills_model.to_dict()
+ assert message_context_skills_model_json2 == message_context_skills_model_json
+
+
+class TestModel_MessageInput:
+ """
+ Test Class for MessageInput
+ """
+
+ def test_message_input_serialization(self):
+ """
+ Test serialization/deserialization for MessageInput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ # Construct a json representation of a MessageInput model
+ message_input_model_json = {}
+ message_input_model_json['message_type'] = 'text'
+ message_input_model_json['text'] = 'testString'
+ message_input_model_json['intents'] = [runtime_intent_model]
+ message_input_model_json['entities'] = [runtime_entity_model]
+ message_input_model_json['suggestion_id'] = 'testString'
+ message_input_model_json['attachments'] = [message_input_attachment_model]
+ message_input_model_json['analytics'] = request_analytics_model
+ message_input_model_json['options'] = message_input_options_model
+
+ # Construct a model instance of MessageInput by calling from_dict on the json representation
+ message_input_model = MessageInput.from_dict(message_input_model_json)
+ assert message_input_model != False
+
+ # Construct a model instance of MessageInput by calling from_dict on the json representation
+ message_input_model_dict = MessageInput.from_dict(message_input_model_json).__dict__
+ message_input_model2 = MessageInput(**message_input_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_input_model == message_input_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_input_model_json2 = message_input_model.to_dict()
+ assert message_input_model_json2 == message_input_model_json
+
+
+class TestModel_MessageInputAttachment:
+ """
+ Test Class for MessageInputAttachment
+ """
+
+ def test_message_input_attachment_serialization(self):
+ """
+ Test serialization/deserialization for MessageInputAttachment
+ """
+
+ # Construct a json representation of a MessageInputAttachment model
+ message_input_attachment_model_json = {}
+ message_input_attachment_model_json['url'] = 'testString'
+ message_input_attachment_model_json['media_type'] = 'testString'
+
+ # Construct a model instance of MessageInputAttachment by calling from_dict on the json representation
+ message_input_attachment_model = MessageInputAttachment.from_dict(message_input_attachment_model_json)
+ assert message_input_attachment_model != False
+
+ # Construct a model instance of MessageInputAttachment by calling from_dict on the json representation
+ message_input_attachment_model_dict = MessageInputAttachment.from_dict(message_input_attachment_model_json).__dict__
+ message_input_attachment_model2 = MessageInputAttachment(**message_input_attachment_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_input_attachment_model == message_input_attachment_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_input_attachment_model_json2 = message_input_attachment_model.to_dict()
+ assert message_input_attachment_model_json2 == message_input_attachment_model_json
+
+
+class TestModel_MessageInputOptions:
+ """
+ Test Class for MessageInputOptions
+ """
+
+ def test_message_input_options_serialization(self):
+ """
+ Test serialization/deserialization for MessageInputOptions
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ # Construct a json representation of a MessageInputOptions model
+ message_input_options_model_json = {}
+ message_input_options_model_json['restart'] = False
+ message_input_options_model_json['alternate_intents'] = False
+ message_input_options_model_json['async_callout'] = False
+ message_input_options_model_json['spelling'] = message_input_options_spelling_model
+ message_input_options_model_json['debug'] = False
+ message_input_options_model_json['return_context'] = False
+ message_input_options_model_json['export'] = False
+
+ # Construct a model instance of MessageInputOptions by calling from_dict on the json representation
+ message_input_options_model = MessageInputOptions.from_dict(message_input_options_model_json)
+ assert message_input_options_model != False
+
+ # Construct a model instance of MessageInputOptions by calling from_dict on the json representation
+ message_input_options_model_dict = MessageInputOptions.from_dict(message_input_options_model_json).__dict__
+ message_input_options_model2 = MessageInputOptions(**message_input_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_input_options_model == message_input_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_input_options_model_json2 = message_input_options_model.to_dict()
+ assert message_input_options_model_json2 == message_input_options_model_json
+
+
+class TestModel_MessageInputOptionsSpelling:
+ """
+ Test Class for MessageInputOptionsSpelling
+ """
+
+ def test_message_input_options_spelling_serialization(self):
+ """
+ Test serialization/deserialization for MessageInputOptionsSpelling
+ """
+
+ # Construct a json representation of a MessageInputOptionsSpelling model
+ message_input_options_spelling_model_json = {}
+ message_input_options_spelling_model_json['suggestions'] = True
+ message_input_options_spelling_model_json['auto_correct'] = True
+
+ # Construct a model instance of MessageInputOptionsSpelling by calling from_dict on the json representation
+ message_input_options_spelling_model = MessageInputOptionsSpelling.from_dict(message_input_options_spelling_model_json)
+ assert message_input_options_spelling_model != False
+
+ # Construct a model instance of MessageInputOptionsSpelling by calling from_dict on the json representation
+ message_input_options_spelling_model_dict = MessageInputOptionsSpelling.from_dict(message_input_options_spelling_model_json).__dict__
+ message_input_options_spelling_model2 = MessageInputOptionsSpelling(**message_input_options_spelling_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_input_options_spelling_model == message_input_options_spelling_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_input_options_spelling_model_json2 = message_input_options_spelling_model.to_dict()
+ assert message_input_options_spelling_model_json2 == message_input_options_spelling_model_json
+
+
+class TestModel_MessageOutput:
+ """
+ Test Class for MessageOutput
+ """
+
+ def test_message_output_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ # Construct a json representation of a MessageOutput model
+ message_output_model_json = {}
+ message_output_model_json['generic'] = [runtime_response_generic_model]
+ message_output_model_json['intents'] = [runtime_intent_model]
+ message_output_model_json['entities'] = [runtime_entity_model]
+ message_output_model_json['actions'] = [dialog_node_action_model]
+ message_output_model_json['debug'] = message_output_debug_model
+ message_output_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ message_output_model_json['spelling'] = message_output_spelling_model
+ message_output_model_json['llm_metadata'] = [message_output_llm_metadata_model]
+
+ # Construct a model instance of MessageOutput by calling from_dict on the json representation
+ message_output_model = MessageOutput.from_dict(message_output_model_json)
+ assert message_output_model != False
+
+ # Construct a model instance of MessageOutput by calling from_dict on the json representation
+ message_output_model_dict = MessageOutput.from_dict(message_output_model_json).__dict__
+ message_output_model2 = MessageOutput(**message_output_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_model == message_output_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_model_json2 = message_output_model.to_dict()
+ assert message_output_model_json2 == message_output_model_json
+
+
+class TestModel_MessageOutputDebug:
+ """
+ Test Class for MessageOutputDebug
+ """
+
+ def test_message_output_debug_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebug
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebug model
+ message_output_debug_model_json = {}
+ message_output_debug_model_json['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model_json['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model_json['branch_exited'] = True
+ message_output_debug_model_json['branch_exited_reason'] = 'completed'
+ message_output_debug_model_json['turn_events'] = [message_output_debug_turn_event_model]
+
+ # Construct a model instance of MessageOutputDebug by calling from_dict on the json representation
+ message_output_debug_model = MessageOutputDebug.from_dict(message_output_debug_model_json)
+ assert message_output_debug_model != False
+
+ # Construct a model instance of MessageOutputDebug by calling from_dict on the json representation
+ message_output_debug_model_dict = MessageOutputDebug.from_dict(message_output_debug_model_json).__dict__
+ message_output_debug_model2 = MessageOutputDebug(**message_output_debug_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_model == message_output_debug_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_model_json2 = message_output_debug_model.to_dict()
+ assert message_output_debug_model_json2 == message_output_debug_model_json
+
+
+class TestModel_MessageOutputLLMMetadata:
+ """
+ Test Class for MessageOutputLLMMetadata
+ """
+
+ def test_message_output_llm_metadata_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputLLMMetadata
+ """
+
+ # Construct a json representation of a MessageOutputLLMMetadata model
+ message_output_llm_metadata_model_json = {}
+ message_output_llm_metadata_model_json['task'] = 'testString'
+ message_output_llm_metadata_model_json['model_id'] = 'testString'
+
+ # Construct a model instance of MessageOutputLLMMetadata by calling from_dict on the json representation
+ message_output_llm_metadata_model = MessageOutputLLMMetadata.from_dict(message_output_llm_metadata_model_json)
+ assert message_output_llm_metadata_model != False
+
+ # Construct a model instance of MessageOutputLLMMetadata by calling from_dict on the json representation
+ message_output_llm_metadata_model_dict = MessageOutputLLMMetadata.from_dict(message_output_llm_metadata_model_json).__dict__
+ message_output_llm_metadata_model2 = MessageOutputLLMMetadata(**message_output_llm_metadata_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_llm_metadata_model == message_output_llm_metadata_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_llm_metadata_model_json2 = message_output_llm_metadata_model.to_dict()
+ assert message_output_llm_metadata_model_json2 == message_output_llm_metadata_model_json
+
+
+class TestModel_MessageOutputSpelling:
+ """
+ Test Class for MessageOutputSpelling
+ """
+
+ def test_message_output_spelling_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputSpelling
+ """
+
+ # Construct a json representation of a MessageOutputSpelling model
+ message_output_spelling_model_json = {}
+ message_output_spelling_model_json['text'] = 'testString'
+ message_output_spelling_model_json['original_text'] = 'testString'
+ message_output_spelling_model_json['suggested_text'] = 'testString'
+
+ # Construct a model instance of MessageOutputSpelling by calling from_dict on the json representation
+ message_output_spelling_model = MessageOutputSpelling.from_dict(message_output_spelling_model_json)
+ assert message_output_spelling_model != False
+
+ # Construct a model instance of MessageOutputSpelling by calling from_dict on the json representation
+ message_output_spelling_model_dict = MessageOutputSpelling.from_dict(message_output_spelling_model_json).__dict__
+ message_output_spelling_model2 = MessageOutputSpelling(**message_output_spelling_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_spelling_model == message_output_spelling_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_spelling_model_json2 = message_output_spelling_model.to_dict()
+ assert message_output_spelling_model_json2 == message_output_spelling_model_json
+
+
+class TestModel_MessageStreamMetadata:
+ """
+ Test Class for MessageStreamMetadata
+ """
+
+ def test_message_stream_metadata_serialization(self):
+ """
+ Test serialization/deserialization for MessageStreamMetadata
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ # Construct a json representation of a MessageStreamMetadata model
+ message_stream_metadata_model_json = {}
+ message_stream_metadata_model_json['streaming_metadata'] = metadata_model
+
+ # Construct a model instance of MessageStreamMetadata by calling from_dict on the json representation
+ message_stream_metadata_model = MessageStreamMetadata.from_dict(message_stream_metadata_model_json)
+ assert message_stream_metadata_model != False
+
+ # Construct a model instance of MessageStreamMetadata by calling from_dict on the json representation
+ message_stream_metadata_model_dict = MessageStreamMetadata.from_dict(message_stream_metadata_model_json).__dict__
+ message_stream_metadata_model2 = MessageStreamMetadata(**message_stream_metadata_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_stream_metadata_model == message_stream_metadata_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_stream_metadata_model_json2 = message_stream_metadata_model.to_dict()
+ assert message_stream_metadata_model_json2 == message_stream_metadata_model_json
+
+
+class TestModel_Metadata:
+ """
+ Test Class for Metadata
+ """
+
+ def test_metadata_serialization(self):
+ """
+ Test serialization/deserialization for Metadata
+ """
+
+ # Construct a json representation of a Metadata model
+ metadata_model_json = {}
+ metadata_model_json['id'] = 38
+
+ # Construct a model instance of Metadata by calling from_dict on the json representation
+ metadata_model = Metadata.from_dict(metadata_model_json)
+ assert metadata_model != False
+
+ # Construct a model instance of Metadata by calling from_dict on the json representation
+ metadata_model_dict = Metadata.from_dict(metadata_model_json).__dict__
+ metadata_model2 = Metadata(**metadata_model_dict)
+
+ # Verify the model instances are equivalent
+ assert metadata_model == metadata_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ metadata_model_json2 = metadata_model.to_dict()
+ assert metadata_model_json2 == metadata_model_json
+
+
+class TestModel_MonitorAssistantReleaseImportArtifactResponse:
+ """
+ Test Class for MonitorAssistantReleaseImportArtifactResponse
+ """
+
+ def test_monitor_assistant_release_import_artifact_response_serialization(self):
+ """
+ Test serialization/deserialization for MonitorAssistantReleaseImportArtifactResponse
+ """
+
+ # Construct a json representation of a MonitorAssistantReleaseImportArtifactResponse model
+ monitor_assistant_release_import_artifact_response_model_json = {}
+ monitor_assistant_release_import_artifact_response_model_json['skill_impact_in_draft'] = ['action']
+
+ # Construct a model instance of MonitorAssistantReleaseImportArtifactResponse by calling from_dict on the json representation
+ monitor_assistant_release_import_artifact_response_model = MonitorAssistantReleaseImportArtifactResponse.from_dict(monitor_assistant_release_import_artifact_response_model_json)
+ assert monitor_assistant_release_import_artifact_response_model != False
+
+ # Construct a model instance of MonitorAssistantReleaseImportArtifactResponse by calling from_dict on the json representation
+ monitor_assistant_release_import_artifact_response_model_dict = MonitorAssistantReleaseImportArtifactResponse.from_dict(monitor_assistant_release_import_artifact_response_model_json).__dict__
+ monitor_assistant_release_import_artifact_response_model2 = MonitorAssistantReleaseImportArtifactResponse(**monitor_assistant_release_import_artifact_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert monitor_assistant_release_import_artifact_response_model == monitor_assistant_release_import_artifact_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ monitor_assistant_release_import_artifact_response_model_json2 = monitor_assistant_release_import_artifact_response_model.to_dict()
+ assert monitor_assistant_release_import_artifact_response_model_json2 == monitor_assistant_release_import_artifact_response_model_json
+
+
+class TestModel_Pagination:
+ """
+ Test Class for Pagination
+ """
+
+ def test_pagination_serialization(self):
+ """
+ Test serialization/deserialization for Pagination
+ """
+
+ # Construct a json representation of a Pagination model
+ pagination_model_json = {}
+ pagination_model_json['refresh_url'] = 'testString'
+ pagination_model_json['next_url'] = 'testString'
+ pagination_model_json['total'] = 38
+ pagination_model_json['matched'] = 38
+ pagination_model_json['refresh_cursor'] = 'testString'
+ pagination_model_json['next_cursor'] = 'testString'
+
+ # Construct a model instance of Pagination by calling from_dict on the json representation
+ pagination_model = Pagination.from_dict(pagination_model_json)
+ assert pagination_model != False
+
+ # Construct a model instance of Pagination by calling from_dict on the json representation
+ pagination_model_dict = Pagination.from_dict(pagination_model_json).__dict__
+ pagination_model2 = Pagination(**pagination_model_dict)
+
+ # Verify the model instances are equivalent
+ assert pagination_model == pagination_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ pagination_model_json2 = pagination_model.to_dict()
+ assert pagination_model_json2 == pagination_model_json
+
+
+class TestModel_PartialItem:
+ """
+ Test Class for PartialItem
+ """
+
+ def test_partial_item_serialization(self):
+ """
+ Test serialization/deserialization for PartialItem
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ # Construct a json representation of a PartialItem model
+ partial_item_model_json = {}
+ partial_item_model_json['response_type'] = 'testString'
+ partial_item_model_json['text'] = 'testString'
+ partial_item_model_json['streaming_metadata'] = metadata_model
+
+ # Construct a model instance of PartialItem by calling from_dict on the json representation
+ partial_item_model = PartialItem.from_dict(partial_item_model_json)
+ assert partial_item_model != False
+
+ # Construct a model instance of PartialItem by calling from_dict on the json representation
+ partial_item_model_dict = PartialItem.from_dict(partial_item_model_json).__dict__
+ partial_item_model2 = PartialItem(**partial_item_model_dict)
+
+ # Verify the model instances are equivalent
+ assert partial_item_model == partial_item_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ partial_item_model_json2 = partial_item_model.to_dict()
+ assert partial_item_model_json2 == partial_item_model_json
+
+
+class TestModel_ProviderAuthenticationOAuth2:
+ """
+ Test Class for ProviderAuthenticationOAuth2
+ """
+
+ def test_provider_authentication_o_auth2_serialization(self):
+ """
+ Test serialization/deserialization for ProviderAuthenticationOAuth2
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ # Construct a json representation of a ProviderAuthenticationOAuth2 model
+ provider_authentication_o_auth2_model_json = {}
+ provider_authentication_o_auth2_model_json['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model_json['flows'] = provider_authentication_o_auth2_flows_model
+
+ # Construct a model instance of ProviderAuthenticationOAuth2 by calling from_dict on the json representation
+ provider_authentication_o_auth2_model = ProviderAuthenticationOAuth2.from_dict(provider_authentication_o_auth2_model_json)
+ assert provider_authentication_o_auth2_model != False
+
+ # Construct a model instance of ProviderAuthenticationOAuth2 by calling from_dict on the json representation
+ provider_authentication_o_auth2_model_dict = ProviderAuthenticationOAuth2.from_dict(provider_authentication_o_auth2_model_json).__dict__
+ provider_authentication_o_auth2_model2 = ProviderAuthenticationOAuth2(**provider_authentication_o_auth2_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_authentication_o_auth2_model == provider_authentication_o_auth2_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_authentication_o_auth2_model_json2 = provider_authentication_o_auth2_model.to_dict()
+ assert provider_authentication_o_auth2_model_json2 == provider_authentication_o_auth2_model_json
+
+
+class TestModel_ProviderAuthenticationOAuth2PasswordUsername:
+ """
+ Test Class for ProviderAuthenticationOAuth2PasswordUsername
+ """
+
+ def test_provider_authentication_o_auth2_password_username_serialization(self):
+ """
+ Test serialization/deserialization for ProviderAuthenticationOAuth2PasswordUsername
+ """
+
+ # Construct a json representation of a ProviderAuthenticationOAuth2PasswordUsername model
+ provider_authentication_o_auth2_password_username_model_json = {}
+ provider_authentication_o_auth2_password_username_model_json['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model_json['value'] = 'testString'
+
+ # Construct a model instance of ProviderAuthenticationOAuth2PasswordUsername by calling from_dict on the json representation
+ provider_authentication_o_auth2_password_username_model = ProviderAuthenticationOAuth2PasswordUsername.from_dict(provider_authentication_o_auth2_password_username_model_json)
+ assert provider_authentication_o_auth2_password_username_model != False
+
+ # Construct a model instance of ProviderAuthenticationOAuth2PasswordUsername by calling from_dict on the json representation
+ provider_authentication_o_auth2_password_username_model_dict = ProviderAuthenticationOAuth2PasswordUsername.from_dict(provider_authentication_o_auth2_password_username_model_json).__dict__
+ provider_authentication_o_auth2_password_username_model2 = ProviderAuthenticationOAuth2PasswordUsername(**provider_authentication_o_auth2_password_username_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_authentication_o_auth2_password_username_model == provider_authentication_o_auth2_password_username_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_authentication_o_auth2_password_username_model_json2 = provider_authentication_o_auth2_password_username_model.to_dict()
+ assert provider_authentication_o_auth2_password_username_model_json2 == provider_authentication_o_auth2_password_username_model_json
+
+
+class TestModel_ProviderAuthenticationTypeAndValue:
+ """
+ Test Class for ProviderAuthenticationTypeAndValue
+ """
+
+ def test_provider_authentication_type_and_value_serialization(self):
+ """
+ Test serialization/deserialization for ProviderAuthenticationTypeAndValue
+ """
+
+ # Construct a json representation of a ProviderAuthenticationTypeAndValue model
+ provider_authentication_type_and_value_model_json = {}
+ provider_authentication_type_and_value_model_json['type'] = 'value'
+ provider_authentication_type_and_value_model_json['value'] = 'testString'
+
+ # Construct a model instance of ProviderAuthenticationTypeAndValue by calling from_dict on the json representation
+ provider_authentication_type_and_value_model = ProviderAuthenticationTypeAndValue.from_dict(provider_authentication_type_and_value_model_json)
+ assert provider_authentication_type_and_value_model != False
+
+ # Construct a model instance of ProviderAuthenticationTypeAndValue by calling from_dict on the json representation
+ provider_authentication_type_and_value_model_dict = ProviderAuthenticationTypeAndValue.from_dict(provider_authentication_type_and_value_model_json).__dict__
+ provider_authentication_type_and_value_model2 = ProviderAuthenticationTypeAndValue(**provider_authentication_type_and_value_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_authentication_type_and_value_model == provider_authentication_type_and_value_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_authentication_type_and_value_model_json2 = provider_authentication_type_and_value_model.to_dict()
+ assert provider_authentication_type_and_value_model_json2 == provider_authentication_type_and_value_model_json
+
+
+class TestModel_ProviderCollection:
+ """
+ Test Class for ProviderCollection
+ """
+
+ def test_provider_collection_serialization(self):
+ """
+ Test serialization/deserialization for ProviderCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_response_specification_servers_item_model = {} # ProviderResponseSpecificationServersItem
+ provider_response_specification_servers_item_model['url'] = 'testString'
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic
+ provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ provider_response_specification_components_security_schemes_model = {} # ProviderResponseSpecificationComponentsSecuritySchemes
+ provider_response_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_response_specification_components_security_schemes_model['basic'] = provider_response_specification_components_security_schemes_basic_model
+ provider_response_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ provider_response_specification_components_model = {} # ProviderResponseSpecificationComponents
+ provider_response_specification_components_model['securitySchemes'] = provider_response_specification_components_security_schemes_model
+
+ provider_response_specification_model = {} # ProviderResponseSpecification
+ provider_response_specification_model['servers'] = [provider_response_specification_servers_item_model]
+ provider_response_specification_model['components'] = provider_response_specification_components_model
+
+ provider_response_model = {} # ProviderResponse
+ provider_response_model['provider_id'] = 'testString'
+ provider_response_model['specification'] = provider_response_specification_model
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a ProviderCollection model
+ provider_collection_model_json = {}
+ provider_collection_model_json['conversational_skill_providers'] = [provider_response_model]
+ provider_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of ProviderCollection by calling from_dict on the json representation
+ provider_collection_model = ProviderCollection.from_dict(provider_collection_model_json)
+ assert provider_collection_model != False
+
+ # Construct a model instance of ProviderCollection by calling from_dict on the json representation
+ provider_collection_model_dict = ProviderCollection.from_dict(provider_collection_model_json).__dict__
+ provider_collection_model2 = ProviderCollection(**provider_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_collection_model == provider_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_collection_model_json2 = provider_collection_model.to_dict()
+ assert provider_collection_model_json2 == provider_collection_model_json
+
+
+class TestModel_ProviderPrivate:
+ """
+ Test Class for ProviderPrivate
+ """
+
+ def test_provider_private_serialization(self):
+ """
+ Test serialization/deserialization for ProviderPrivate
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ provider_private_authentication_model = {} # ProviderPrivateAuthenticationBearerFlow
+ provider_private_authentication_model['token'] = provider_authentication_type_and_value_model
+
+ # Construct a json representation of a ProviderPrivate model
+ provider_private_model_json = {}
+ provider_private_model_json['authentication'] = provider_private_authentication_model
+
+ # Construct a model instance of ProviderPrivate by calling from_dict on the json representation
+ provider_private_model = ProviderPrivate.from_dict(provider_private_model_json)
+ assert provider_private_model != False
+
+ # Construct a model instance of ProviderPrivate by calling from_dict on the json representation
+ provider_private_model_dict = ProviderPrivate.from_dict(provider_private_model_json).__dict__
+ provider_private_model2 = ProviderPrivate(**provider_private_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_private_model == provider_private_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_private_model_json2 = provider_private_model.to_dict()
+ assert provider_private_model_json2 == provider_private_model_json
+
+
+class TestModel_ProviderPrivateAuthenticationOAuth2PasswordPassword:
+ """
+ Test Class for ProviderPrivateAuthenticationOAuth2PasswordPassword
+ """
+
+ def test_provider_private_authentication_o_auth2_password_password_serialization(self):
+ """
+ Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2PasswordPassword
+ """
+
+ # Construct a json representation of a ProviderPrivateAuthenticationOAuth2PasswordPassword model
+ provider_private_authentication_o_auth2_password_password_model_json = {}
+ provider_private_authentication_o_auth2_password_password_model_json['type'] = 'value'
+ provider_private_authentication_o_auth2_password_password_model_json['value'] = 'testString'
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2PasswordPassword by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_password_password_model = ProviderPrivateAuthenticationOAuth2PasswordPassword.from_dict(provider_private_authentication_o_auth2_password_password_model_json)
+ assert provider_private_authentication_o_auth2_password_password_model != False
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2PasswordPassword by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_password_password_model_dict = ProviderPrivateAuthenticationOAuth2PasswordPassword.from_dict(provider_private_authentication_o_auth2_password_password_model_json).__dict__
+ provider_private_authentication_o_auth2_password_password_model2 = ProviderPrivateAuthenticationOAuth2PasswordPassword(**provider_private_authentication_o_auth2_password_password_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_private_authentication_o_auth2_password_password_model == provider_private_authentication_o_auth2_password_password_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_private_authentication_o_auth2_password_password_model_json2 = provider_private_authentication_o_auth2_password_password_model.to_dict()
+ assert provider_private_authentication_o_auth2_password_password_model_json2 == provider_private_authentication_o_auth2_password_password_model_json
+
+
+class TestModel_ProviderResponse:
+ """
+ Test Class for ProviderResponse
+ """
+
+ def test_provider_response_serialization(self):
+ """
+ Test serialization/deserialization for ProviderResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_response_specification_servers_item_model = {} # ProviderResponseSpecificationServersItem
+ provider_response_specification_servers_item_model['url'] = 'testString'
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic
+ provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ provider_response_specification_components_security_schemes_model = {} # ProviderResponseSpecificationComponentsSecuritySchemes
+ provider_response_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_response_specification_components_security_schemes_model['basic'] = provider_response_specification_components_security_schemes_basic_model
+ provider_response_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ provider_response_specification_components_model = {} # ProviderResponseSpecificationComponents
+ provider_response_specification_components_model['securitySchemes'] = provider_response_specification_components_security_schemes_model
+
+ provider_response_specification_model = {} # ProviderResponseSpecification
+ provider_response_specification_model['servers'] = [provider_response_specification_servers_item_model]
+ provider_response_specification_model['components'] = provider_response_specification_components_model
+
+ # Construct a json representation of a ProviderResponse model
+ provider_response_model_json = {}
+ provider_response_model_json['provider_id'] = 'testString'
+ provider_response_model_json['specification'] = provider_response_specification_model
+
+ # Construct a model instance of ProviderResponse by calling from_dict on the json representation
+ provider_response_model = ProviderResponse.from_dict(provider_response_model_json)
+ assert provider_response_model != False
+
+ # Construct a model instance of ProviderResponse by calling from_dict on the json representation
+ provider_response_model_dict = ProviderResponse.from_dict(provider_response_model_json).__dict__
+ provider_response_model2 = ProviderResponse(**provider_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_response_model == provider_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_response_model_json2 = provider_response_model.to_dict()
+ assert provider_response_model_json2 == provider_response_model_json
+
+
+class TestModel_ProviderResponseSpecification:
+ """
+ Test Class for ProviderResponseSpecification
+ """
+
+ def test_provider_response_specification_serialization(self):
+ """
+ Test serialization/deserialization for ProviderResponseSpecification
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_response_specification_servers_item_model = {} # ProviderResponseSpecificationServersItem
+ provider_response_specification_servers_item_model['url'] = 'testString'
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic
+ provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ provider_response_specification_components_security_schemes_model = {} # ProviderResponseSpecificationComponentsSecuritySchemes
+ provider_response_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_response_specification_components_security_schemes_model['basic'] = provider_response_specification_components_security_schemes_basic_model
+ provider_response_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ provider_response_specification_components_model = {} # ProviderResponseSpecificationComponents
+ provider_response_specification_components_model['securitySchemes'] = provider_response_specification_components_security_schemes_model
+
+ # Construct a json representation of a ProviderResponseSpecification model
+ provider_response_specification_model_json = {}
+ provider_response_specification_model_json['servers'] = [provider_response_specification_servers_item_model]
+ provider_response_specification_model_json['components'] = provider_response_specification_components_model
+
+ # Construct a model instance of ProviderResponseSpecification by calling from_dict on the json representation
+ provider_response_specification_model = ProviderResponseSpecification.from_dict(provider_response_specification_model_json)
+ assert provider_response_specification_model != False
+
+ # Construct a model instance of ProviderResponseSpecification by calling from_dict on the json representation
+ provider_response_specification_model_dict = ProviderResponseSpecification.from_dict(provider_response_specification_model_json).__dict__
+ provider_response_specification_model2 = ProviderResponseSpecification(**provider_response_specification_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_response_specification_model == provider_response_specification_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_response_specification_model_json2 = provider_response_specification_model.to_dict()
+ assert provider_response_specification_model_json2 == provider_response_specification_model_json
+
+
+class TestModel_ProviderResponseSpecificationComponents:
+ """
+ Test Class for ProviderResponseSpecificationComponents
+ """
+
+ def test_provider_response_specification_components_serialization(self):
+ """
+ Test serialization/deserialization for ProviderResponseSpecificationComponents
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic
+ provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ provider_response_specification_components_security_schemes_model = {} # ProviderResponseSpecificationComponentsSecuritySchemes
+ provider_response_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_response_specification_components_security_schemes_model['basic'] = provider_response_specification_components_security_schemes_basic_model
+ provider_response_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ # Construct a json representation of a ProviderResponseSpecificationComponents model
+ provider_response_specification_components_model_json = {}
+ provider_response_specification_components_model_json['securitySchemes'] = provider_response_specification_components_security_schemes_model
+
+ # Construct a model instance of ProviderResponseSpecificationComponents by calling from_dict on the json representation
+ provider_response_specification_components_model = ProviderResponseSpecificationComponents.from_dict(provider_response_specification_components_model_json)
+ assert provider_response_specification_components_model != False
+
+ # Construct a model instance of ProviderResponseSpecificationComponents by calling from_dict on the json representation
+ provider_response_specification_components_model_dict = ProviderResponseSpecificationComponents.from_dict(provider_response_specification_components_model_json).__dict__
+ provider_response_specification_components_model2 = ProviderResponseSpecificationComponents(**provider_response_specification_components_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_response_specification_components_model == provider_response_specification_components_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_response_specification_components_model_json2 = provider_response_specification_components_model.to_dict()
+ assert provider_response_specification_components_model_json2 == provider_response_specification_components_model_json
+
+
+class TestModel_ProviderResponseSpecificationComponentsSecuritySchemes:
+ """
+ Test Class for ProviderResponseSpecificationComponentsSecuritySchemes
+ """
+
+ def test_provider_response_specification_components_security_schemes_serialization(self):
+ """
+ Test serialization/deserialization for ProviderResponseSpecificationComponentsSecuritySchemes
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ provider_response_specification_components_security_schemes_basic_model = {} # ProviderResponseSpecificationComponentsSecuritySchemesBasic
+ provider_response_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ # Construct a json representation of a ProviderResponseSpecificationComponentsSecuritySchemes model
+ provider_response_specification_components_security_schemes_model_json = {}
+ provider_response_specification_components_security_schemes_model_json['authentication_method'] = 'basic'
+ provider_response_specification_components_security_schemes_model_json['basic'] = provider_response_specification_components_security_schemes_basic_model
+ provider_response_specification_components_security_schemes_model_json['oauth2'] = provider_authentication_o_auth2_model
+
+ # Construct a model instance of ProviderResponseSpecificationComponentsSecuritySchemes by calling from_dict on the json representation
+ provider_response_specification_components_security_schemes_model = ProviderResponseSpecificationComponentsSecuritySchemes.from_dict(provider_response_specification_components_security_schemes_model_json)
+ assert provider_response_specification_components_security_schemes_model != False
+
+ # Construct a model instance of ProviderResponseSpecificationComponentsSecuritySchemes by calling from_dict on the json representation
+ provider_response_specification_components_security_schemes_model_dict = ProviderResponseSpecificationComponentsSecuritySchemes.from_dict(provider_response_specification_components_security_schemes_model_json).__dict__
+ provider_response_specification_components_security_schemes_model2 = ProviderResponseSpecificationComponentsSecuritySchemes(**provider_response_specification_components_security_schemes_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_response_specification_components_security_schemes_model == provider_response_specification_components_security_schemes_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_response_specification_components_security_schemes_model_json2 = provider_response_specification_components_security_schemes_model.to_dict()
+ assert provider_response_specification_components_security_schemes_model_json2 == provider_response_specification_components_security_schemes_model_json
+
+
+class TestModel_ProviderResponseSpecificationComponentsSecuritySchemesBasic:
+ """
+ Test Class for ProviderResponseSpecificationComponentsSecuritySchemesBasic
+ """
+
+ def test_provider_response_specification_components_security_schemes_basic_serialization(self):
+ """
+ Test serialization/deserialization for ProviderResponseSpecificationComponentsSecuritySchemesBasic
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ # Construct a json representation of a ProviderResponseSpecificationComponentsSecuritySchemesBasic model
+ provider_response_specification_components_security_schemes_basic_model_json = {}
+ provider_response_specification_components_security_schemes_basic_model_json['username'] = provider_authentication_type_and_value_model
+
+ # Construct a model instance of ProviderResponseSpecificationComponentsSecuritySchemesBasic by calling from_dict on the json representation
+ provider_response_specification_components_security_schemes_basic_model = ProviderResponseSpecificationComponentsSecuritySchemesBasic.from_dict(provider_response_specification_components_security_schemes_basic_model_json)
+ assert provider_response_specification_components_security_schemes_basic_model != False
+
+ # Construct a model instance of ProviderResponseSpecificationComponentsSecuritySchemesBasic by calling from_dict on the json representation
+ provider_response_specification_components_security_schemes_basic_model_dict = ProviderResponseSpecificationComponentsSecuritySchemesBasic.from_dict(provider_response_specification_components_security_schemes_basic_model_json).__dict__
+ provider_response_specification_components_security_schemes_basic_model2 = ProviderResponseSpecificationComponentsSecuritySchemesBasic(**provider_response_specification_components_security_schemes_basic_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_response_specification_components_security_schemes_basic_model == provider_response_specification_components_security_schemes_basic_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_response_specification_components_security_schemes_basic_model_json2 = provider_response_specification_components_security_schemes_basic_model.to_dict()
+ assert provider_response_specification_components_security_schemes_basic_model_json2 == provider_response_specification_components_security_schemes_basic_model_json
+
+
+class TestModel_ProviderResponseSpecificationServersItem:
+ """
+ Test Class for ProviderResponseSpecificationServersItem
+ """
+
+ def test_provider_response_specification_servers_item_serialization(self):
+ """
+ Test serialization/deserialization for ProviderResponseSpecificationServersItem
+ """
+
+ # Construct a json representation of a ProviderResponseSpecificationServersItem model
+ provider_response_specification_servers_item_model_json = {}
+ provider_response_specification_servers_item_model_json['url'] = 'testString'
+
+ # Construct a model instance of ProviderResponseSpecificationServersItem by calling from_dict on the json representation
+ provider_response_specification_servers_item_model = ProviderResponseSpecificationServersItem.from_dict(provider_response_specification_servers_item_model_json)
+ assert provider_response_specification_servers_item_model != False
+
+ # Construct a model instance of ProviderResponseSpecificationServersItem by calling from_dict on the json representation
+ provider_response_specification_servers_item_model_dict = ProviderResponseSpecificationServersItem.from_dict(provider_response_specification_servers_item_model_json).__dict__
+ provider_response_specification_servers_item_model2 = ProviderResponseSpecificationServersItem(**provider_response_specification_servers_item_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_response_specification_servers_item_model == provider_response_specification_servers_item_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_response_specification_servers_item_model_json2 = provider_response_specification_servers_item_model.to_dict()
+ assert provider_response_specification_servers_item_model_json2 == provider_response_specification_servers_item_model_json
+
+
+class TestModel_ProviderSpecification:
+ """
+ Test Class for ProviderSpecification
+ """
+
+ def test_provider_specification_serialization(self):
+ """
+ Test serialization/deserialization for ProviderSpecification
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_specification_servers_item_model = {} # ProviderSpecificationServersItem
+ provider_specification_servers_item_model['url'] = 'testString'
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ provider_specification_components_security_schemes_basic_model = {} # ProviderSpecificationComponentsSecuritySchemesBasic
+ provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ provider_specification_components_security_schemes_model = {} # ProviderSpecificationComponentsSecuritySchemes
+ provider_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model
+ provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ provider_specification_components_model = {} # ProviderSpecificationComponents
+ provider_specification_components_model['securitySchemes'] = provider_specification_components_security_schemes_model
+
+ # Construct a json representation of a ProviderSpecification model
+ provider_specification_model_json = {}
+ provider_specification_model_json['servers'] = [provider_specification_servers_item_model]
+ provider_specification_model_json['components'] = provider_specification_components_model
+
+ # Construct a model instance of ProviderSpecification by calling from_dict on the json representation
+ provider_specification_model = ProviderSpecification.from_dict(provider_specification_model_json)
+ assert provider_specification_model != False
+
+ # Construct a model instance of ProviderSpecification by calling from_dict on the json representation
+ provider_specification_model_dict = ProviderSpecification.from_dict(provider_specification_model_json).__dict__
+ provider_specification_model2 = ProviderSpecification(**provider_specification_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_specification_model == provider_specification_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_specification_model_json2 = provider_specification_model.to_dict()
+ assert provider_specification_model_json2 == provider_specification_model_json
+
+
+class TestModel_ProviderSpecificationComponents:
+ """
+ Test Class for ProviderSpecificationComponents
+ """
+
+ def test_provider_specification_components_serialization(self):
+ """
+ Test serialization/deserialization for ProviderSpecificationComponents
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ provider_specification_components_security_schemes_basic_model = {} # ProviderSpecificationComponentsSecuritySchemesBasic
+ provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ provider_specification_components_security_schemes_model = {} # ProviderSpecificationComponentsSecuritySchemes
+ provider_specification_components_security_schemes_model['authentication_method'] = 'basic'
+ provider_specification_components_security_schemes_model['basic'] = provider_specification_components_security_schemes_basic_model
+ provider_specification_components_security_schemes_model['oauth2'] = provider_authentication_o_auth2_model
+
+ # Construct a json representation of a ProviderSpecificationComponents model
+ provider_specification_components_model_json = {}
+ provider_specification_components_model_json['securitySchemes'] = provider_specification_components_security_schemes_model
+
+ # Construct a model instance of ProviderSpecificationComponents by calling from_dict on the json representation
+ provider_specification_components_model = ProviderSpecificationComponents.from_dict(provider_specification_components_model_json)
+ assert provider_specification_components_model != False
+
+ # Construct a model instance of ProviderSpecificationComponents by calling from_dict on the json representation
+ provider_specification_components_model_dict = ProviderSpecificationComponents.from_dict(provider_specification_components_model_json).__dict__
+ provider_specification_components_model2 = ProviderSpecificationComponents(**provider_specification_components_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_specification_components_model == provider_specification_components_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_specification_components_model_json2 = provider_specification_components_model.to_dict()
+ assert provider_specification_components_model_json2 == provider_specification_components_model_json
+
+
+class TestModel_ProviderSpecificationComponentsSecuritySchemes:
+ """
+ Test Class for ProviderSpecificationComponentsSecuritySchemes
+ """
+
+ def test_provider_specification_components_security_schemes_serialization(self):
+ """
+ Test serialization/deserialization for ProviderSpecificationComponentsSecuritySchemes
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ provider_specification_components_security_schemes_basic_model = {} # ProviderSpecificationComponentsSecuritySchemesBasic
+ provider_specification_components_security_schemes_basic_model['username'] = provider_authentication_type_and_value_model
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ provider_authentication_o_auth2_flows_model = {} # ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ provider_authentication_o_auth2_flows_model['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_model['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_model['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_model['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_model['username'] = provider_authentication_o_auth2_password_username_model
+
+ provider_authentication_o_auth2_model = {} # ProviderAuthenticationOAuth2
+ provider_authentication_o_auth2_model['preferred_flow'] = 'password'
+ provider_authentication_o_auth2_model['flows'] = provider_authentication_o_auth2_flows_model
+
+ # Construct a json representation of a ProviderSpecificationComponentsSecuritySchemes model
+ provider_specification_components_security_schemes_model_json = {}
+ provider_specification_components_security_schemes_model_json['authentication_method'] = 'basic'
+ provider_specification_components_security_schemes_model_json['basic'] = provider_specification_components_security_schemes_basic_model
+ provider_specification_components_security_schemes_model_json['oauth2'] = provider_authentication_o_auth2_model
+
+ # Construct a model instance of ProviderSpecificationComponentsSecuritySchemes by calling from_dict on the json representation
+ provider_specification_components_security_schemes_model = ProviderSpecificationComponentsSecuritySchemes.from_dict(provider_specification_components_security_schemes_model_json)
+ assert provider_specification_components_security_schemes_model != False
+
+ # Construct a model instance of ProviderSpecificationComponentsSecuritySchemes by calling from_dict on the json representation
+ provider_specification_components_security_schemes_model_dict = ProviderSpecificationComponentsSecuritySchemes.from_dict(provider_specification_components_security_schemes_model_json).__dict__
+ provider_specification_components_security_schemes_model2 = ProviderSpecificationComponentsSecuritySchemes(**provider_specification_components_security_schemes_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_specification_components_security_schemes_model == provider_specification_components_security_schemes_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_specification_components_security_schemes_model_json2 = provider_specification_components_security_schemes_model.to_dict()
+ assert provider_specification_components_security_schemes_model_json2 == provider_specification_components_security_schemes_model_json
+
+
+class TestModel_ProviderSpecificationComponentsSecuritySchemesBasic:
+ """
+ Test Class for ProviderSpecificationComponentsSecuritySchemesBasic
+ """
+
+ def test_provider_specification_components_security_schemes_basic_serialization(self):
+ """
+ Test serialization/deserialization for ProviderSpecificationComponentsSecuritySchemesBasic
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ # Construct a json representation of a ProviderSpecificationComponentsSecuritySchemesBasic model
+ provider_specification_components_security_schemes_basic_model_json = {}
+ provider_specification_components_security_schemes_basic_model_json['username'] = provider_authentication_type_and_value_model
+
+ # Construct a model instance of ProviderSpecificationComponentsSecuritySchemesBasic by calling from_dict on the json representation
+ provider_specification_components_security_schemes_basic_model = ProviderSpecificationComponentsSecuritySchemesBasic.from_dict(provider_specification_components_security_schemes_basic_model_json)
+ assert provider_specification_components_security_schemes_basic_model != False
+
+ # Construct a model instance of ProviderSpecificationComponentsSecuritySchemesBasic by calling from_dict on the json representation
+ provider_specification_components_security_schemes_basic_model_dict = ProviderSpecificationComponentsSecuritySchemesBasic.from_dict(provider_specification_components_security_schemes_basic_model_json).__dict__
+ provider_specification_components_security_schemes_basic_model2 = ProviderSpecificationComponentsSecuritySchemesBasic(**provider_specification_components_security_schemes_basic_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_specification_components_security_schemes_basic_model == provider_specification_components_security_schemes_basic_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_specification_components_security_schemes_basic_model_json2 = provider_specification_components_security_schemes_basic_model.to_dict()
+ assert provider_specification_components_security_schemes_basic_model_json2 == provider_specification_components_security_schemes_basic_model_json
+
+
+class TestModel_ProviderSpecificationServersItem:
+ """
+ Test Class for ProviderSpecificationServersItem
+ """
+
+ def test_provider_specification_servers_item_serialization(self):
+ """
+ Test serialization/deserialization for ProviderSpecificationServersItem
+ """
+
+ # Construct a json representation of a ProviderSpecificationServersItem model
+ provider_specification_servers_item_model_json = {}
+ provider_specification_servers_item_model_json['url'] = 'testString'
+
+ # Construct a model instance of ProviderSpecificationServersItem by calling from_dict on the json representation
+ provider_specification_servers_item_model = ProviderSpecificationServersItem.from_dict(provider_specification_servers_item_model_json)
+ assert provider_specification_servers_item_model != False
+
+ # Construct a model instance of ProviderSpecificationServersItem by calling from_dict on the json representation
+ provider_specification_servers_item_model_dict = ProviderSpecificationServersItem.from_dict(provider_specification_servers_item_model_json).__dict__
+ provider_specification_servers_item_model2 = ProviderSpecificationServersItem(**provider_specification_servers_item_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_specification_servers_item_model == provider_specification_servers_item_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_specification_servers_item_model_json2 = provider_specification_servers_item_model.to_dict()
+ assert provider_specification_servers_item_model_json2 == provider_specification_servers_item_model_json
+
+
+class TestModel_Release:
+ """
+ Test Class for Release
+ """
+
+ def test_release_serialization(self):
+ """
+ Test serialization/deserialization for Release
+ """
+
+ # Construct a json representation of a Release model
+ release_model_json = {}
+ release_model_json['description'] = 'testString'
+
+ # Construct a model instance of Release by calling from_dict on the json representation
+ release_model = Release.from_dict(release_model_json)
+ assert release_model != False
+
+ # Construct a model instance of Release by calling from_dict on the json representation
+ release_model_dict = Release.from_dict(release_model_json).__dict__
+ release_model2 = Release(**release_model_dict)
+
+ # Verify the model instances are equivalent
+ assert release_model == release_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ release_model_json2 = release_model.to_dict()
+ assert release_model_json2 == release_model_json
+
+
+class TestModel_ReleaseCollection:
+ """
+ Test Class for ReleaseCollection
+ """
+
+ def test_release_collection_serialization(self):
+ """
+ Test serialization/deserialization for ReleaseCollection
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ release_model = {} # Release
+ release_model['description'] = 'testString'
+
+ pagination_model = {} # Pagination
+ pagination_model['refresh_url'] = 'testString'
+ pagination_model['next_url'] = 'testString'
+ pagination_model['total'] = 38
+ pagination_model['matched'] = 38
+ pagination_model['refresh_cursor'] = 'testString'
+ pagination_model['next_cursor'] = 'testString'
+
+ # Construct a json representation of a ReleaseCollection model
+ release_collection_model_json = {}
+ release_collection_model_json['releases'] = [release_model]
+ release_collection_model_json['pagination'] = pagination_model
+
+ # Construct a model instance of ReleaseCollection by calling from_dict on the json representation
+ release_collection_model = ReleaseCollection.from_dict(release_collection_model_json)
+ assert release_collection_model != False
+
+ # Construct a model instance of ReleaseCollection by calling from_dict on the json representation
+ release_collection_model_dict = ReleaseCollection.from_dict(release_collection_model_json).__dict__
+ release_collection_model2 = ReleaseCollection(**release_collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert release_collection_model == release_collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ release_collection_model_json2 = release_collection_model.to_dict()
+ assert release_collection_model_json2 == release_collection_model_json
+
+
+class TestModel_ReleaseContent:
+ """
+ Test Class for ReleaseContent
+ """
+
+ def test_release_content_serialization(self):
+ """
+ Test serialization/deserialization for ReleaseContent
+ """
+
+ # Construct a json representation of a ReleaseContent model
+ release_content_model_json = {}
+
+ # Construct a model instance of ReleaseContent by calling from_dict on the json representation
+ release_content_model = ReleaseContent.from_dict(release_content_model_json)
+ assert release_content_model != False
+
+ # Construct a model instance of ReleaseContent by calling from_dict on the json representation
+ release_content_model_dict = ReleaseContent.from_dict(release_content_model_json).__dict__
+ release_content_model2 = ReleaseContent(**release_content_model_dict)
+
+ # Verify the model instances are equivalent
+ assert release_content_model == release_content_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ release_content_model_json2 = release_content_model.to_dict()
+ assert release_content_model_json2 == release_content_model_json
+
+
+class TestModel_ReleaseSkill:
+ """
+ Test Class for ReleaseSkill
+ """
+
+ def test_release_skill_serialization(self):
+ """
+ Test serialization/deserialization for ReleaseSkill
+ """
+
+ # Construct a json representation of a ReleaseSkill model
+ release_skill_model_json = {}
+ release_skill_model_json['skill_id'] = 'testString'
+ release_skill_model_json['type'] = 'dialog'
+ release_skill_model_json['snapshot'] = 'testString'
+
+ # Construct a model instance of ReleaseSkill by calling from_dict on the json representation
+ release_skill_model = ReleaseSkill.from_dict(release_skill_model_json)
+ assert release_skill_model != False
+
+ # Construct a model instance of ReleaseSkill by calling from_dict on the json representation
+ release_skill_model_dict = ReleaseSkill.from_dict(release_skill_model_json).__dict__
+ release_skill_model2 = ReleaseSkill(**release_skill_model_dict)
+
+ # Verify the model instances are equivalent
+ assert release_skill_model == release_skill_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ release_skill_model_json2 = release_skill_model.to_dict()
+ assert release_skill_model_json2 == release_skill_model_json
+
+
+class TestModel_RequestAnalytics:
+ """
+ Test Class for RequestAnalytics
+ """
+
+ def test_request_analytics_serialization(self):
+ """
+ Test serialization/deserialization for RequestAnalytics
+ """
+
+ # Construct a json representation of a RequestAnalytics model
+ request_analytics_model_json = {}
+ request_analytics_model_json['browser'] = 'testString'
+ request_analytics_model_json['device'] = 'testString'
+ request_analytics_model_json['pageUrl'] = 'testString'
+
+ # Construct a model instance of RequestAnalytics by calling from_dict on the json representation
+ request_analytics_model = RequestAnalytics.from_dict(request_analytics_model_json)
+ assert request_analytics_model != False
+
+ # Construct a model instance of RequestAnalytics by calling from_dict on the json representation
+ request_analytics_model_dict = RequestAnalytics.from_dict(request_analytics_model_json).__dict__
+ request_analytics_model2 = RequestAnalytics(**request_analytics_model_dict)
+
+ # Verify the model instances are equivalent
+ assert request_analytics_model == request_analytics_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ request_analytics_model_json2 = request_analytics_model.to_dict()
+ assert request_analytics_model_json2 == request_analytics_model_json
+
+
+class TestModel_ResponseGenericChannel:
+ """
+ Test Class for ResponseGenericChannel
+ """
+
+ def test_response_generic_channel_serialization(self):
+ """
+ Test serialization/deserialization for ResponseGenericChannel
+ """
+
+ # Construct a json representation of a ResponseGenericChannel model
+ response_generic_channel_model_json = {}
+ response_generic_channel_model_json['channel'] = 'testString'
+
+ # Construct a model instance of ResponseGenericChannel by calling from_dict on the json representation
+ response_generic_channel_model = ResponseGenericChannel.from_dict(response_generic_channel_model_json)
+ assert response_generic_channel_model != False
+
+ # Construct a model instance of ResponseGenericChannel by calling from_dict on the json representation
+ response_generic_channel_model_dict = ResponseGenericChannel.from_dict(response_generic_channel_model_json).__dict__
+ response_generic_channel_model2 = ResponseGenericChannel(**response_generic_channel_model_dict)
+
+ # Verify the model instances are equivalent
+ assert response_generic_channel_model == response_generic_channel_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ response_generic_channel_model_json2 = response_generic_channel_model.to_dict()
+ assert response_generic_channel_model_json2 == response_generic_channel_model_json
+
+
+class TestModel_ResponseGenericCitation:
+ """
+ Test Class for ResponseGenericCitation
+ """
+
+ def test_response_generic_citation_serialization(self):
+ """
+ Test serialization/deserialization for ResponseGenericCitation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ # Construct a json representation of a ResponseGenericCitation model
+ response_generic_citation_model_json = {}
+ response_generic_citation_model_json['title'] = 'testString'
+ response_generic_citation_model_json['text'] = 'testString'
+ response_generic_citation_model_json['body'] = 'testString'
+ response_generic_citation_model_json['search_result_index'] = 38
+ response_generic_citation_model_json['ranges'] = [response_generic_citation_ranges_item_model]
+
+ # Construct a model instance of ResponseGenericCitation by calling from_dict on the json representation
+ response_generic_citation_model = ResponseGenericCitation.from_dict(response_generic_citation_model_json)
+ assert response_generic_citation_model != False
+
+ # Construct a model instance of ResponseGenericCitation by calling from_dict on the json representation
+ response_generic_citation_model_dict = ResponseGenericCitation.from_dict(response_generic_citation_model_json).__dict__
+ response_generic_citation_model2 = ResponseGenericCitation(**response_generic_citation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert response_generic_citation_model == response_generic_citation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ response_generic_citation_model_json2 = response_generic_citation_model.to_dict()
+ assert response_generic_citation_model_json2 == response_generic_citation_model_json
+
+
+class TestModel_ResponseGenericCitationRangesItem:
+ """
+ Test Class for ResponseGenericCitationRangesItem
+ """
+
+ def test_response_generic_citation_ranges_item_serialization(self):
+ """
+ Test serialization/deserialization for ResponseGenericCitationRangesItem
+ """
+
+ # Construct a json representation of a ResponseGenericCitationRangesItem model
+ response_generic_citation_ranges_item_model_json = {}
+ response_generic_citation_ranges_item_model_json['start'] = 38
+ response_generic_citation_ranges_item_model_json['end'] = 38
+
+ # Construct a model instance of ResponseGenericCitationRangesItem by calling from_dict on the json representation
+ response_generic_citation_ranges_item_model = ResponseGenericCitationRangesItem.from_dict(response_generic_citation_ranges_item_model_json)
+ assert response_generic_citation_ranges_item_model != False
+
+ # Construct a model instance of ResponseGenericCitationRangesItem by calling from_dict on the json representation
+ response_generic_citation_ranges_item_model_dict = ResponseGenericCitationRangesItem.from_dict(response_generic_citation_ranges_item_model_json).__dict__
+ response_generic_citation_ranges_item_model2 = ResponseGenericCitationRangesItem(**response_generic_citation_ranges_item_model_dict)
+
+ # Verify the model instances are equivalent
+ assert response_generic_citation_ranges_item_model == response_generic_citation_ranges_item_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ response_generic_citation_ranges_item_model_json2 = response_generic_citation_ranges_item_model.to_dict()
+ assert response_generic_citation_ranges_item_model_json2 == response_generic_citation_ranges_item_model_json
+
+
+class TestModel_ResponseGenericConfidenceScores:
+ """
+ Test Class for ResponseGenericConfidenceScores
+ """
+
+ def test_response_generic_confidence_scores_serialization(self):
+ """
+ Test serialization/deserialization for ResponseGenericConfidenceScores
+ """
+
+ # Construct a json representation of a ResponseGenericConfidenceScores model
+ response_generic_confidence_scores_model_json = {}
+ response_generic_confidence_scores_model_json['threshold'] = 72.5
+ response_generic_confidence_scores_model_json['pre_gen'] = 72.5
+ response_generic_confidence_scores_model_json['post_gen'] = 72.5
+ response_generic_confidence_scores_model_json['extractiveness'] = 72.5
+
+ # Construct a model instance of ResponseGenericConfidenceScores by calling from_dict on the json representation
+ response_generic_confidence_scores_model = ResponseGenericConfidenceScores.from_dict(response_generic_confidence_scores_model_json)
+ assert response_generic_confidence_scores_model != False
+
+ # Construct a model instance of ResponseGenericConfidenceScores by calling from_dict on the json representation
+ response_generic_confidence_scores_model_dict = ResponseGenericConfidenceScores.from_dict(response_generic_confidence_scores_model_json).__dict__
+ response_generic_confidence_scores_model2 = ResponseGenericConfidenceScores(**response_generic_confidence_scores_model_dict)
+
+ # Verify the model instances are equivalent
+ assert response_generic_confidence_scores_model == response_generic_confidence_scores_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ response_generic_confidence_scores_model_json2 = response_generic_confidence_scores_model.to_dict()
+ assert response_generic_confidence_scores_model_json2 == response_generic_confidence_scores_model_json
+
+
+class TestModel_RuntimeEntity:
+ """
+ Test Class for RuntimeEntity
+ """
+
+ def test_runtime_entity_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeEntity
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ # Construct a json representation of a RuntimeEntity model
+ runtime_entity_model_json = {}
+ runtime_entity_model_json['entity'] = 'testString'
+ runtime_entity_model_json['location'] = [38]
+ runtime_entity_model_json['value'] = 'testString'
+ runtime_entity_model_json['confidence'] = 72.5
+ runtime_entity_model_json['groups'] = [capture_group_model]
+ runtime_entity_model_json['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model_json['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model_json['role'] = runtime_entity_role_model
+ runtime_entity_model_json['skill'] = 'testString'
+
+ # Construct a model instance of RuntimeEntity by calling from_dict on the json representation
+ runtime_entity_model = RuntimeEntity.from_dict(runtime_entity_model_json)
+ assert runtime_entity_model != False
+
+ # Construct a model instance of RuntimeEntity by calling from_dict on the json representation
+ runtime_entity_model_dict = RuntimeEntity.from_dict(runtime_entity_model_json).__dict__
+ runtime_entity_model2 = RuntimeEntity(**runtime_entity_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_entity_model == runtime_entity_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_entity_model_json2 = runtime_entity_model.to_dict()
+ assert runtime_entity_model_json2 == runtime_entity_model_json
+
+
+class TestModel_RuntimeEntityAlternative:
+ """
+ Test Class for RuntimeEntityAlternative
+ """
+
+ def test_runtime_entity_alternative_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeEntityAlternative
+ """
+
+ # Construct a json representation of a RuntimeEntityAlternative model
+ runtime_entity_alternative_model_json = {}
+ runtime_entity_alternative_model_json['value'] = 'testString'
+ runtime_entity_alternative_model_json['confidence'] = 72.5
+
+ # Construct a model instance of RuntimeEntityAlternative by calling from_dict on the json representation
+ runtime_entity_alternative_model = RuntimeEntityAlternative.from_dict(runtime_entity_alternative_model_json)
+ assert runtime_entity_alternative_model != False
+
+ # Construct a model instance of RuntimeEntityAlternative by calling from_dict on the json representation
+ runtime_entity_alternative_model_dict = RuntimeEntityAlternative.from_dict(runtime_entity_alternative_model_json).__dict__
+ runtime_entity_alternative_model2 = RuntimeEntityAlternative(**runtime_entity_alternative_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_entity_alternative_model == runtime_entity_alternative_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_entity_alternative_model_json2 = runtime_entity_alternative_model.to_dict()
+ assert runtime_entity_alternative_model_json2 == runtime_entity_alternative_model_json
+
+
+class TestModel_RuntimeEntityInterpretation:
+ """
+ Test Class for RuntimeEntityInterpretation
+ """
+
+ def test_runtime_entity_interpretation_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeEntityInterpretation
+ """
+
+ # Construct a json representation of a RuntimeEntityInterpretation model
+ runtime_entity_interpretation_model_json = {}
+ runtime_entity_interpretation_model_json['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model_json['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model_json['festival'] = 'testString'
+ runtime_entity_interpretation_model_json['granularity'] = 'day'
+ runtime_entity_interpretation_model_json['range_link'] = 'testString'
+ runtime_entity_interpretation_model_json['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model_json['relative_day'] = 72.5
+ runtime_entity_interpretation_model_json['relative_month'] = 72.5
+ runtime_entity_interpretation_model_json['relative_week'] = 72.5
+ runtime_entity_interpretation_model_json['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model_json['relative_year'] = 72.5
+ runtime_entity_interpretation_model_json['specific_day'] = 72.5
+ runtime_entity_interpretation_model_json['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model_json['specific_month'] = 72.5
+ runtime_entity_interpretation_model_json['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model_json['specific_year'] = 72.5
+ runtime_entity_interpretation_model_json['numeric_value'] = 72.5
+ runtime_entity_interpretation_model_json['subtype'] = 'testString'
+ runtime_entity_interpretation_model_json['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model_json['relative_hour'] = 72.5
+ runtime_entity_interpretation_model_json['relative_minute'] = 72.5
+ runtime_entity_interpretation_model_json['relative_second'] = 72.5
+ runtime_entity_interpretation_model_json['specific_hour'] = 72.5
+ runtime_entity_interpretation_model_json['specific_minute'] = 72.5
+ runtime_entity_interpretation_model_json['specific_second'] = 72.5
+ runtime_entity_interpretation_model_json['timezone'] = 'testString'
+
+ # Construct a model instance of RuntimeEntityInterpretation by calling from_dict on the json representation
+ runtime_entity_interpretation_model = RuntimeEntityInterpretation.from_dict(runtime_entity_interpretation_model_json)
+ assert runtime_entity_interpretation_model != False
+
+ # Construct a model instance of RuntimeEntityInterpretation by calling from_dict on the json representation
+ runtime_entity_interpretation_model_dict = RuntimeEntityInterpretation.from_dict(runtime_entity_interpretation_model_json).__dict__
+ runtime_entity_interpretation_model2 = RuntimeEntityInterpretation(**runtime_entity_interpretation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_entity_interpretation_model == runtime_entity_interpretation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_entity_interpretation_model_json2 = runtime_entity_interpretation_model.to_dict()
+ assert runtime_entity_interpretation_model_json2 == runtime_entity_interpretation_model_json
+
+
+class TestModel_RuntimeEntityRole:
+ """
+ Test Class for RuntimeEntityRole
+ """
+
+ def test_runtime_entity_role_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeEntityRole
+ """
+
+ # Construct a json representation of a RuntimeEntityRole model
+ runtime_entity_role_model_json = {}
+ runtime_entity_role_model_json['type'] = 'date_from'
+
+ # Construct a model instance of RuntimeEntityRole by calling from_dict on the json representation
+ runtime_entity_role_model = RuntimeEntityRole.from_dict(runtime_entity_role_model_json)
+ assert runtime_entity_role_model != False
+
+ # Construct a model instance of RuntimeEntityRole by calling from_dict on the json representation
+ runtime_entity_role_model_dict = RuntimeEntityRole.from_dict(runtime_entity_role_model_json).__dict__
+ runtime_entity_role_model2 = RuntimeEntityRole(**runtime_entity_role_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_entity_role_model == runtime_entity_role_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_entity_role_model_json2 = runtime_entity_role_model.to_dict()
+ assert runtime_entity_role_model_json2 == runtime_entity_role_model_json
+
+
+class TestModel_RuntimeIntent:
+ """
+ Test Class for RuntimeIntent
+ """
+
+ def test_runtime_intent_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeIntent
+ """
+
+ # Construct a json representation of a RuntimeIntent model
+ runtime_intent_model_json = {}
+ runtime_intent_model_json['intent'] = 'testString'
+ runtime_intent_model_json['confidence'] = 72.5
+ runtime_intent_model_json['skill'] = 'testString'
+
+ # Construct a model instance of RuntimeIntent by calling from_dict on the json representation
+ runtime_intent_model = RuntimeIntent.from_dict(runtime_intent_model_json)
+ assert runtime_intent_model != False
+
+ # Construct a model instance of RuntimeIntent by calling from_dict on the json representation
+ runtime_intent_model_dict = RuntimeIntent.from_dict(runtime_intent_model_json).__dict__
+ runtime_intent_model2 = RuntimeIntent(**runtime_intent_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_intent_model == runtime_intent_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_intent_model_json2 = runtime_intent_model.to_dict()
+ assert runtime_intent_model_json2 == runtime_intent_model_json
+
+
+class TestModel_SearchResult:
+ """
+ Test Class for SearchResult
+ """
+
+ def test_search_result_serialization(self):
+ """
+ Test serialization/deserialization for SearchResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ search_result_metadata_model = {} # SearchResultMetadata
+ search_result_metadata_model['confidence'] = 72.5
+ search_result_metadata_model['score'] = 72.5
+
+ search_result_highlight_model = {} # SearchResultHighlight
+ search_result_highlight_model['body'] = ['testString']
+ search_result_highlight_model['title'] = ['testString']
+ search_result_highlight_model['url'] = ['testString']
+ search_result_highlight_model['foo'] = ['testString']
+
+ search_result_answer_model = {} # SearchResultAnswer
+ search_result_answer_model['text'] = 'testString'
+ search_result_answer_model['confidence'] = 0
+
+ # Construct a json representation of a SearchResult model
+ search_result_model_json = {}
+ search_result_model_json['id'] = 'testString'
+ search_result_model_json['result_metadata'] = search_result_metadata_model
+ search_result_model_json['body'] = 'testString'
+ search_result_model_json['title'] = 'testString'
+ search_result_model_json['url'] = 'testString'
+ search_result_model_json['highlight'] = search_result_highlight_model
+ search_result_model_json['answers'] = [search_result_answer_model]
+
+ # Construct a model instance of SearchResult by calling from_dict on the json representation
+ search_result_model = SearchResult.from_dict(search_result_model_json)
+ assert search_result_model != False
+
+ # Construct a model instance of SearchResult by calling from_dict on the json representation
+ search_result_model_dict = SearchResult.from_dict(search_result_model_json).__dict__
+ search_result_model2 = SearchResult(**search_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_result_model == search_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_result_model_json2 = search_result_model.to_dict()
+ assert search_result_model_json2 == search_result_model_json
+
+
+class TestModel_SearchResultAnswer:
+ """
+ Test Class for SearchResultAnswer
+ """
+
+ def test_search_result_answer_serialization(self):
+ """
+ Test serialization/deserialization for SearchResultAnswer
+ """
+
+ # Construct a json representation of a SearchResultAnswer model
+ search_result_answer_model_json = {}
+ search_result_answer_model_json['text'] = 'testString'
+ search_result_answer_model_json['confidence'] = 0
+
+ # Construct a model instance of SearchResultAnswer by calling from_dict on the json representation
+ search_result_answer_model = SearchResultAnswer.from_dict(search_result_answer_model_json)
+ assert search_result_answer_model != False
+
+ # Construct a model instance of SearchResultAnswer by calling from_dict on the json representation
+ search_result_answer_model_dict = SearchResultAnswer.from_dict(search_result_answer_model_json).__dict__
+ search_result_answer_model2 = SearchResultAnswer(**search_result_answer_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_result_answer_model == search_result_answer_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_result_answer_model_json2 = search_result_answer_model.to_dict()
+ assert search_result_answer_model_json2 == search_result_answer_model_json
+
+
+class TestModel_SearchResultHighlight:
+ """
+ Test Class for SearchResultHighlight
+ """
+
+ def test_search_result_highlight_serialization(self):
+ """
+ Test serialization/deserialization for SearchResultHighlight
+ """
+
+ # Construct a json representation of a SearchResultHighlight model
+ search_result_highlight_model_json = {}
+ search_result_highlight_model_json['body'] = ['testString']
+ search_result_highlight_model_json['title'] = ['testString']
+ search_result_highlight_model_json['url'] = ['testString']
+ search_result_highlight_model_json['foo'] = ['testString']
+
+ # Construct a model instance of SearchResultHighlight by calling from_dict on the json representation
+ search_result_highlight_model = SearchResultHighlight.from_dict(search_result_highlight_model_json)
+ assert search_result_highlight_model != False
+
+ # Construct a model instance of SearchResultHighlight by calling from_dict on the json representation
+ search_result_highlight_model_dict = SearchResultHighlight.from_dict(search_result_highlight_model_json).__dict__
+ search_result_highlight_model2 = SearchResultHighlight(**search_result_highlight_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_result_highlight_model == search_result_highlight_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_result_highlight_model_json2 = search_result_highlight_model.to_dict()
+ assert search_result_highlight_model_json2 == search_result_highlight_model_json
+
+ # Test get_properties and set_properties methods.
+ search_result_highlight_model.set_properties({})
+ actual_dict = search_result_highlight_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': ['testString']}
+ search_result_highlight_model.set_properties(expected_dict)
+ actual_dict = search_result_highlight_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_SearchResultMetadata:
+ """
+ Test Class for SearchResultMetadata
+ """
+
+ def test_search_result_metadata_serialization(self):
+ """
+ Test serialization/deserialization for SearchResultMetadata
+ """
+
+ # Construct a json representation of a SearchResultMetadata model
+ search_result_metadata_model_json = {}
+ search_result_metadata_model_json['confidence'] = 72.5
+ search_result_metadata_model_json['score'] = 72.5
+
+ # Construct a model instance of SearchResultMetadata by calling from_dict on the json representation
+ search_result_metadata_model = SearchResultMetadata.from_dict(search_result_metadata_model_json)
+ assert search_result_metadata_model != False
+
+ # Construct a model instance of SearchResultMetadata by calling from_dict on the json representation
+ search_result_metadata_model_dict = SearchResultMetadata.from_dict(search_result_metadata_model_json).__dict__
+ search_result_metadata_model2 = SearchResultMetadata(**search_result_metadata_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_result_metadata_model == search_result_metadata_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_result_metadata_model_json2 = search_result_metadata_model.to_dict()
+ assert search_result_metadata_model_json2 == search_result_metadata_model_json
+
+
+class TestModel_SearchResults:
+ """
+ Test Class for SearchResults
+ """
+
+ def test_search_results_serialization(self):
+ """
+ Test serialization/deserialization for SearchResults
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ # Construct a json representation of a SearchResults model
+ search_results_model_json = {}
+ search_results_model_json['result_metadata'] = search_results_result_metadata_model
+ search_results_model_json['id'] = 'testString'
+ search_results_model_json['title'] = 'testString'
+ search_results_model_json['body'] = 'testString'
+
+ # Construct a model instance of SearchResults by calling from_dict on the json representation
+ search_results_model = SearchResults.from_dict(search_results_model_json)
+ assert search_results_model != False
+
+ # Construct a model instance of SearchResults by calling from_dict on the json representation
+ search_results_model_dict = SearchResults.from_dict(search_results_model_json).__dict__
+ search_results_model2 = SearchResults(**search_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_results_model == search_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_results_model_json2 = search_results_model.to_dict()
+ assert search_results_model_json2 == search_results_model_json
+
+
+class TestModel_SearchResultsResultMetadata:
+ """
+ Test Class for SearchResultsResultMetadata
+ """
+
+ def test_search_results_result_metadata_serialization(self):
+ """
+ Test serialization/deserialization for SearchResultsResultMetadata
+ """
+
+ # Construct a json representation of a SearchResultsResultMetadata model
+ search_results_result_metadata_model_json = {}
+ search_results_result_metadata_model_json['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model_json['score'] = 38
+
+ # Construct a model instance of SearchResultsResultMetadata by calling from_dict on the json representation
+ search_results_result_metadata_model = SearchResultsResultMetadata.from_dict(search_results_result_metadata_model_json)
+ assert search_results_result_metadata_model != False
+
+ # Construct a model instance of SearchResultsResultMetadata by calling from_dict on the json representation
+ search_results_result_metadata_model_dict = SearchResultsResultMetadata.from_dict(search_results_result_metadata_model_json).__dict__
+ search_results_result_metadata_model2 = SearchResultsResultMetadata(**search_results_result_metadata_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_results_result_metadata_model == search_results_result_metadata_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_results_result_metadata_model_json2 = search_results_result_metadata_model.to_dict()
+ assert search_results_result_metadata_model_json2 == search_results_result_metadata_model_json
+
+
+class TestModel_SearchSettings:
+ """
+ Test Class for SearchSettings
+ """
+
+ def test_search_settings_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettings
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ search_settings_discovery_model = {} # SearchSettingsDiscovery
+ search_settings_discovery_model['instance_id'] = 'testString'
+ search_settings_discovery_model['project_id'] = 'testString'
+ search_settings_discovery_model['url'] = 'testString'
+ search_settings_discovery_model['max_primary_results'] = 10000
+ search_settings_discovery_model['max_total_results'] = 10000
+ search_settings_discovery_model['confidence_threshold'] = 0.0
+ search_settings_discovery_model['highlight'] = True
+ search_settings_discovery_model['find_answers'] = True
+ search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model
+
+ search_settings_messages_model = {} # SearchSettingsMessages
+ search_settings_messages_model['success'] = 'testString'
+ search_settings_messages_model['error'] = 'testString'
+ search_settings_messages_model['no_result'] = 'testString'
+
+ search_settings_schema_mapping_model = {} # SearchSettingsSchemaMapping
+ search_settings_schema_mapping_model['url'] = 'testString'
+ search_settings_schema_mapping_model['body'] = 'testString'
+ search_settings_schema_mapping_model['title'] = 'testString'
+
+ search_settings_elastic_search_model = {} # SearchSettingsElasticSearch
+ search_settings_elastic_search_model['url'] = 'testString'
+ search_settings_elastic_search_model['port'] = 'testString'
+ search_settings_elastic_search_model['username'] = 'testString'
+ search_settings_elastic_search_model['password'] = 'testString'
+ search_settings_elastic_search_model['index'] = 'testString'
+ search_settings_elastic_search_model['filter'] = ['testString']
+ search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model['managed_index'] = 'testString'
+ search_settings_elastic_search_model['apikey'] = 'testString'
+
+ search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ search_settings_conversational_search_model = {} # SearchSettingsConversationalSearch
+ search_settings_conversational_search_model['enabled'] = True
+ search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ search_settings_server_side_search_model = {} # SearchSettingsServerSideSearch
+ search_settings_server_side_search_model['url'] = 'testString'
+ search_settings_server_side_search_model['port'] = 'testString'
+ search_settings_server_side_search_model['username'] = 'testString'
+ search_settings_server_side_search_model['password'] = 'testString'
+ search_settings_server_side_search_model['filter'] = 'testString'
+ search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model['apikey'] = 'testString'
+ search_settings_server_side_search_model['no_auth'] = True
+ search_settings_server_side_search_model['auth_type'] = 'basic'
+
+ search_settings_client_side_search_model = {} # SearchSettingsClientSideSearch
+ search_settings_client_side_search_model['filter'] = 'testString'
+ search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+
+ # Construct a json representation of a SearchSettings model
+ search_settings_model_json = {}
+ search_settings_model_json['discovery'] = search_settings_discovery_model
+ search_settings_model_json['messages'] = search_settings_messages_model
+ search_settings_model_json['schema_mapping'] = search_settings_schema_mapping_model
+ search_settings_model_json['elastic_search'] = search_settings_elastic_search_model
+ search_settings_model_json['conversational_search'] = search_settings_conversational_search_model
+ search_settings_model_json['server_side_search'] = search_settings_server_side_search_model
+ search_settings_model_json['client_side_search'] = search_settings_client_side_search_model
+
+ # Construct a model instance of SearchSettings by calling from_dict on the json representation
+ search_settings_model = SearchSettings.from_dict(search_settings_model_json)
+ assert search_settings_model != False
+
+ # Construct a model instance of SearchSettings by calling from_dict on the json representation
+ search_settings_model_dict = SearchSettings.from_dict(search_settings_model_json).__dict__
+ search_settings_model2 = SearchSettings(**search_settings_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_model == search_settings_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_model_json2 = search_settings_model.to_dict()
+ assert search_settings_model_json2 == search_settings_model_json
+
+
+class TestModel_SearchSettingsClientSideSearch:
+ """
+ Test Class for SearchSettingsClientSideSearch
+ """
+
+ def test_search_settings_client_side_search_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsClientSideSearch
+ """
+
+ # Construct a json representation of a SearchSettingsClientSideSearch model
+ search_settings_client_side_search_model_json = {}
+ search_settings_client_side_search_model_json['filter'] = 'testString'
+ search_settings_client_side_search_model_json['metadata'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of SearchSettingsClientSideSearch by calling from_dict on the json representation
+ search_settings_client_side_search_model = SearchSettingsClientSideSearch.from_dict(search_settings_client_side_search_model_json)
+ assert search_settings_client_side_search_model != False
+
+ # Construct a model instance of SearchSettingsClientSideSearch by calling from_dict on the json representation
+ search_settings_client_side_search_model_dict = SearchSettingsClientSideSearch.from_dict(search_settings_client_side_search_model_json).__dict__
+ search_settings_client_side_search_model2 = SearchSettingsClientSideSearch(**search_settings_client_side_search_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_client_side_search_model == search_settings_client_side_search_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_client_side_search_model_json2 = search_settings_client_side_search_model.to_dict()
+ assert search_settings_client_side_search_model_json2 == search_settings_client_side_search_model_json
+
+
+class TestModel_SearchSettingsConversationalSearch:
+ """
+ Test Class for SearchSettingsConversationalSearch
+ """
+
+ def test_search_settings_conversational_search_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsConversationalSearch
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ # Construct a json representation of a SearchSettingsConversationalSearch model
+ search_settings_conversational_search_model_json = {}
+ search_settings_conversational_search_model_json['enabled'] = True
+ search_settings_conversational_search_model_json['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model_json['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ # Construct a model instance of SearchSettingsConversationalSearch by calling from_dict on the json representation
+ search_settings_conversational_search_model = SearchSettingsConversationalSearch.from_dict(search_settings_conversational_search_model_json)
+ assert search_settings_conversational_search_model != False
+
+ # Construct a model instance of SearchSettingsConversationalSearch by calling from_dict on the json representation
+ search_settings_conversational_search_model_dict = SearchSettingsConversationalSearch.from_dict(search_settings_conversational_search_model_json).__dict__
+ search_settings_conversational_search_model2 = SearchSettingsConversationalSearch(**search_settings_conversational_search_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_conversational_search_model == search_settings_conversational_search_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_conversational_search_model_json2 = search_settings_conversational_search_model.to_dict()
+ assert search_settings_conversational_search_model_json2 == search_settings_conversational_search_model_json
+
+
+class TestModel_SearchSettingsConversationalSearchResponseLength:
+ """
+ Test Class for SearchSettingsConversationalSearchResponseLength
+ """
+
+ def test_search_settings_conversational_search_response_length_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsConversationalSearchResponseLength
+ """
+
+ # Construct a json representation of a SearchSettingsConversationalSearchResponseLength model
+ search_settings_conversational_search_response_length_model_json = {}
+ search_settings_conversational_search_response_length_model_json['option'] = 'moderate'
+
+ # Construct a model instance of SearchSettingsConversationalSearchResponseLength by calling from_dict on the json representation
+ search_settings_conversational_search_response_length_model = SearchSettingsConversationalSearchResponseLength.from_dict(search_settings_conversational_search_response_length_model_json)
+ assert search_settings_conversational_search_response_length_model != False
+
+ # Construct a model instance of SearchSettingsConversationalSearchResponseLength by calling from_dict on the json representation
+ search_settings_conversational_search_response_length_model_dict = SearchSettingsConversationalSearchResponseLength.from_dict(search_settings_conversational_search_response_length_model_json).__dict__
+ search_settings_conversational_search_response_length_model2 = SearchSettingsConversationalSearchResponseLength(**search_settings_conversational_search_response_length_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_conversational_search_response_length_model == search_settings_conversational_search_response_length_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_conversational_search_response_length_model_json2 = search_settings_conversational_search_response_length_model.to_dict()
+ assert search_settings_conversational_search_response_length_model_json2 == search_settings_conversational_search_response_length_model_json
+
+
+class TestModel_SearchSettingsConversationalSearchSearchConfidence:
+ """
+ Test Class for SearchSettingsConversationalSearchSearchConfidence
+ """
+
+ def test_search_settings_conversational_search_search_confidence_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsConversationalSearchSearchConfidence
+ """
+
+ # Construct a json representation of a SearchSettingsConversationalSearchSearchConfidence model
+ search_settings_conversational_search_search_confidence_model_json = {}
+ search_settings_conversational_search_search_confidence_model_json['threshold'] = 'less_often'
+
+ # Construct a model instance of SearchSettingsConversationalSearchSearchConfidence by calling from_dict on the json representation
+ search_settings_conversational_search_search_confidence_model = SearchSettingsConversationalSearchSearchConfidence.from_dict(search_settings_conversational_search_search_confidence_model_json)
+ assert search_settings_conversational_search_search_confidence_model != False
+
+ # Construct a model instance of SearchSettingsConversationalSearchSearchConfidence by calling from_dict on the json representation
+ search_settings_conversational_search_search_confidence_model_dict = SearchSettingsConversationalSearchSearchConfidence.from_dict(search_settings_conversational_search_search_confidence_model_json).__dict__
+ search_settings_conversational_search_search_confidence_model2 = SearchSettingsConversationalSearchSearchConfidence(**search_settings_conversational_search_search_confidence_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_conversational_search_search_confidence_model == search_settings_conversational_search_search_confidence_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_conversational_search_search_confidence_model_json2 = search_settings_conversational_search_search_confidence_model.to_dict()
+ assert search_settings_conversational_search_search_confidence_model_json2 == search_settings_conversational_search_search_confidence_model_json
+
+
+class TestModel_SearchSettingsDiscovery:
+ """
+ Test Class for SearchSettingsDiscovery
+ """
+
+ def test_search_settings_discovery_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsDiscovery
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ # Construct a json representation of a SearchSettingsDiscovery model
+ search_settings_discovery_model_json = {}
+ search_settings_discovery_model_json['instance_id'] = 'testString'
+ search_settings_discovery_model_json['project_id'] = 'testString'
+ search_settings_discovery_model_json['url'] = 'testString'
+ search_settings_discovery_model_json['max_primary_results'] = 10000
+ search_settings_discovery_model_json['max_total_results'] = 10000
+ search_settings_discovery_model_json['confidence_threshold'] = 0.0
+ search_settings_discovery_model_json['highlight'] = True
+ search_settings_discovery_model_json['find_answers'] = True
+ search_settings_discovery_model_json['authentication'] = search_settings_discovery_authentication_model
+
+ # Construct a model instance of SearchSettingsDiscovery by calling from_dict on the json representation
+ search_settings_discovery_model = SearchSettingsDiscovery.from_dict(search_settings_discovery_model_json)
+ assert search_settings_discovery_model != False
+
+ # Construct a model instance of SearchSettingsDiscovery by calling from_dict on the json representation
+ search_settings_discovery_model_dict = SearchSettingsDiscovery.from_dict(search_settings_discovery_model_json).__dict__
+ search_settings_discovery_model2 = SearchSettingsDiscovery(**search_settings_discovery_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_discovery_model == search_settings_discovery_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_discovery_model_json2 = search_settings_discovery_model.to_dict()
+ assert search_settings_discovery_model_json2 == search_settings_discovery_model_json
+
+
+class TestModel_SearchSettingsDiscoveryAuthentication:
+ """
+ Test Class for SearchSettingsDiscoveryAuthentication
+ """
+
+ def test_search_settings_discovery_authentication_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsDiscoveryAuthentication
+ """
+
+ # Construct a json representation of a SearchSettingsDiscoveryAuthentication model
+ search_settings_discovery_authentication_model_json = {}
+ search_settings_discovery_authentication_model_json['basic'] = 'testString'
+ search_settings_discovery_authentication_model_json['bearer'] = 'testString'
+
+ # Construct a model instance of SearchSettingsDiscoveryAuthentication by calling from_dict on the json representation
+ search_settings_discovery_authentication_model = SearchSettingsDiscoveryAuthentication.from_dict(search_settings_discovery_authentication_model_json)
+ assert search_settings_discovery_authentication_model != False
+
+ # Construct a model instance of SearchSettingsDiscoveryAuthentication by calling from_dict on the json representation
+ search_settings_discovery_authentication_model_dict = SearchSettingsDiscoveryAuthentication.from_dict(search_settings_discovery_authentication_model_json).__dict__
+ search_settings_discovery_authentication_model2 = SearchSettingsDiscoveryAuthentication(**search_settings_discovery_authentication_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_discovery_authentication_model == search_settings_discovery_authentication_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_discovery_authentication_model_json2 = search_settings_discovery_authentication_model.to_dict()
+ assert search_settings_discovery_authentication_model_json2 == search_settings_discovery_authentication_model_json
+
+
+class TestModel_SearchSettingsElasticSearch:
+ """
+ Test Class for SearchSettingsElasticSearch
+ """
+
+ def test_search_settings_elastic_search_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsElasticSearch
+ """
+
+ # Construct a json representation of a SearchSettingsElasticSearch model
+ search_settings_elastic_search_model_json = {}
+ search_settings_elastic_search_model_json['url'] = 'testString'
+ search_settings_elastic_search_model_json['port'] = 'testString'
+ search_settings_elastic_search_model_json['username'] = 'testString'
+ search_settings_elastic_search_model_json['password'] = 'testString'
+ search_settings_elastic_search_model_json['index'] = 'testString'
+ search_settings_elastic_search_model_json['filter'] = ['testString']
+ search_settings_elastic_search_model_json['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model_json['managed_index'] = 'testString'
+ search_settings_elastic_search_model_json['apikey'] = 'testString'
+
+ # Construct a model instance of SearchSettingsElasticSearch by calling from_dict on the json representation
+ search_settings_elastic_search_model = SearchSettingsElasticSearch.from_dict(search_settings_elastic_search_model_json)
+ assert search_settings_elastic_search_model != False
+
+ # Construct a model instance of SearchSettingsElasticSearch by calling from_dict on the json representation
+ search_settings_elastic_search_model_dict = SearchSettingsElasticSearch.from_dict(search_settings_elastic_search_model_json).__dict__
+ search_settings_elastic_search_model2 = SearchSettingsElasticSearch(**search_settings_elastic_search_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_elastic_search_model == search_settings_elastic_search_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_elastic_search_model_json2 = search_settings_elastic_search_model.to_dict()
+ assert search_settings_elastic_search_model_json2 == search_settings_elastic_search_model_json
+
+
+class TestModel_SearchSettingsMessages:
+ """
+ Test Class for SearchSettingsMessages
+ """
+
+ def test_search_settings_messages_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsMessages
+ """
+
+ # Construct a json representation of a SearchSettingsMessages model
+ search_settings_messages_model_json = {}
+ search_settings_messages_model_json['success'] = 'testString'
+ search_settings_messages_model_json['error'] = 'testString'
+ search_settings_messages_model_json['no_result'] = 'testString'
+
+ # Construct a model instance of SearchSettingsMessages by calling from_dict on the json representation
+ search_settings_messages_model = SearchSettingsMessages.from_dict(search_settings_messages_model_json)
+ assert search_settings_messages_model != False
+
+ # Construct a model instance of SearchSettingsMessages by calling from_dict on the json representation
+ search_settings_messages_model_dict = SearchSettingsMessages.from_dict(search_settings_messages_model_json).__dict__
+ search_settings_messages_model2 = SearchSettingsMessages(**search_settings_messages_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_messages_model == search_settings_messages_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_messages_model_json2 = search_settings_messages_model.to_dict()
+ assert search_settings_messages_model_json2 == search_settings_messages_model_json
+
+
+class TestModel_SearchSettingsSchemaMapping:
+ """
+ Test Class for SearchSettingsSchemaMapping
+ """
+
+ def test_search_settings_schema_mapping_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsSchemaMapping
+ """
+
+ # Construct a json representation of a SearchSettingsSchemaMapping model
+ search_settings_schema_mapping_model_json = {}
+ search_settings_schema_mapping_model_json['url'] = 'testString'
+ search_settings_schema_mapping_model_json['body'] = 'testString'
+ search_settings_schema_mapping_model_json['title'] = 'testString'
+
+ # Construct a model instance of SearchSettingsSchemaMapping by calling from_dict on the json representation
+ search_settings_schema_mapping_model = SearchSettingsSchemaMapping.from_dict(search_settings_schema_mapping_model_json)
+ assert search_settings_schema_mapping_model != False
+
+ # Construct a model instance of SearchSettingsSchemaMapping by calling from_dict on the json representation
+ search_settings_schema_mapping_model_dict = SearchSettingsSchemaMapping.from_dict(search_settings_schema_mapping_model_json).__dict__
+ search_settings_schema_mapping_model2 = SearchSettingsSchemaMapping(**search_settings_schema_mapping_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_schema_mapping_model == search_settings_schema_mapping_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_schema_mapping_model_json2 = search_settings_schema_mapping_model.to_dict()
+ assert search_settings_schema_mapping_model_json2 == search_settings_schema_mapping_model_json
+
+
+class TestModel_SearchSettingsServerSideSearch:
+ """
+ Test Class for SearchSettingsServerSideSearch
+ """
+
+ def test_search_settings_server_side_search_serialization(self):
+ """
+ Test serialization/deserialization for SearchSettingsServerSideSearch
+ """
+
+ # Construct a json representation of a SearchSettingsServerSideSearch model
+ search_settings_server_side_search_model_json = {}
+ search_settings_server_side_search_model_json['url'] = 'testString'
+ search_settings_server_side_search_model_json['port'] = 'testString'
+ search_settings_server_side_search_model_json['username'] = 'testString'
+ search_settings_server_side_search_model_json['password'] = 'testString'
+ search_settings_server_side_search_model_json['filter'] = 'testString'
+ search_settings_server_side_search_model_json['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model_json['apikey'] = 'testString'
+ search_settings_server_side_search_model_json['no_auth'] = True
+ search_settings_server_side_search_model_json['auth_type'] = 'basic'
+
+ # Construct a model instance of SearchSettingsServerSideSearch by calling from_dict on the json representation
+ search_settings_server_side_search_model = SearchSettingsServerSideSearch.from_dict(search_settings_server_side_search_model_json)
+ assert search_settings_server_side_search_model != False
+
+ # Construct a model instance of SearchSettingsServerSideSearch by calling from_dict on the json representation
+ search_settings_server_side_search_model_dict = SearchSettingsServerSideSearch.from_dict(search_settings_server_side_search_model_json).__dict__
+ search_settings_server_side_search_model2 = SearchSettingsServerSideSearch(**search_settings_server_side_search_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_settings_server_side_search_model == search_settings_server_side_search_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_settings_server_side_search_model_json2 = search_settings_server_side_search_model.to_dict()
+ assert search_settings_server_side_search_model_json2 == search_settings_server_side_search_model_json
+
+
+class TestModel_SearchSkillWarning:
+ """
+ Test Class for SearchSkillWarning
+ """
+
+ def test_search_skill_warning_serialization(self):
+ """
+ Test serialization/deserialization for SearchSkillWarning
+ """
+
+ # Construct a json representation of a SearchSkillWarning model
+ search_skill_warning_model_json = {}
+ search_skill_warning_model_json['code'] = 'testString'
+ search_skill_warning_model_json['path'] = 'testString'
+ search_skill_warning_model_json['message'] = 'testString'
+
+ # Construct a model instance of SearchSkillWarning by calling from_dict on the json representation
+ search_skill_warning_model = SearchSkillWarning.from_dict(search_skill_warning_model_json)
+ assert search_skill_warning_model != False
+
+ # Construct a model instance of SearchSkillWarning by calling from_dict on the json representation
+ search_skill_warning_model_dict = SearchSkillWarning.from_dict(search_skill_warning_model_json).__dict__
+ search_skill_warning_model2 = SearchSkillWarning(**search_skill_warning_model_dict)
+
+ # Verify the model instances are equivalent
+ assert search_skill_warning_model == search_skill_warning_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ search_skill_warning_model_json2 = search_skill_warning_model.to_dict()
+ assert search_skill_warning_model_json2 == search_skill_warning_model_json
+
+
+class TestModel_SessionResponse:
+ """
+ Test Class for SessionResponse
+ """
+
+ def test_session_response_serialization(self):
+ """
+ Test serialization/deserialization for SessionResponse
+ """
+
+ # Construct a json representation of a SessionResponse model
+ session_response_model_json = {}
+ session_response_model_json['session_id'] = 'testString'
+
+ # Construct a model instance of SessionResponse by calling from_dict on the json representation
+ session_response_model = SessionResponse.from_dict(session_response_model_json)
+ assert session_response_model != False
+
+ # Construct a model instance of SessionResponse by calling from_dict on the json representation
+ session_response_model_dict = SessionResponse.from_dict(session_response_model_json).__dict__
+ session_response_model2 = SessionResponse(**session_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert session_response_model == session_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ session_response_model_json2 = session_response_model.to_dict()
+ assert session_response_model_json2 == session_response_model_json
+
+
+class TestModel_Skill:
+ """
+ Test Class for Skill
+ """
+
+ def test_skill_serialization(self):
+ """
+ Test serialization/deserialization for Skill
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ search_settings_discovery_model = {} # SearchSettingsDiscovery
+ search_settings_discovery_model['instance_id'] = 'testString'
+ search_settings_discovery_model['project_id'] = 'testString'
+ search_settings_discovery_model['url'] = 'testString'
+ search_settings_discovery_model['max_primary_results'] = 10000
+ search_settings_discovery_model['max_total_results'] = 10000
+ search_settings_discovery_model['confidence_threshold'] = 0.0
+ search_settings_discovery_model['highlight'] = True
+ search_settings_discovery_model['find_answers'] = True
+ search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model
+
+ search_settings_messages_model = {} # SearchSettingsMessages
+ search_settings_messages_model['success'] = 'testString'
+ search_settings_messages_model['error'] = 'testString'
+ search_settings_messages_model['no_result'] = 'testString'
+
+ search_settings_schema_mapping_model = {} # SearchSettingsSchemaMapping
+ search_settings_schema_mapping_model['url'] = 'testString'
+ search_settings_schema_mapping_model['body'] = 'testString'
+ search_settings_schema_mapping_model['title'] = 'testString'
+
+ search_settings_elastic_search_model = {} # SearchSettingsElasticSearch
+ search_settings_elastic_search_model['url'] = 'testString'
+ search_settings_elastic_search_model['port'] = 'testString'
+ search_settings_elastic_search_model['username'] = 'testString'
+ search_settings_elastic_search_model['password'] = 'testString'
+ search_settings_elastic_search_model['index'] = 'testString'
+ search_settings_elastic_search_model['filter'] = ['testString']
+ search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model['managed_index'] = 'testString'
+ search_settings_elastic_search_model['apikey'] = 'testString'
+
+ search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ search_settings_conversational_search_model = {} # SearchSettingsConversationalSearch
+ search_settings_conversational_search_model['enabled'] = True
+ search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ search_settings_server_side_search_model = {} # SearchSettingsServerSideSearch
+ search_settings_server_side_search_model['url'] = 'testString'
+ search_settings_server_side_search_model['port'] = 'testString'
+ search_settings_server_side_search_model['username'] = 'testString'
+ search_settings_server_side_search_model['password'] = 'testString'
+ search_settings_server_side_search_model['filter'] = 'testString'
+ search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model['apikey'] = 'testString'
+ search_settings_server_side_search_model['no_auth'] = True
+ search_settings_server_side_search_model['auth_type'] = 'basic'
+
+ search_settings_client_side_search_model = {} # SearchSettingsClientSideSearch
+ search_settings_client_side_search_model['filter'] = 'testString'
+ search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+
+ search_settings_model = {} # SearchSettings
+ search_settings_model['discovery'] = search_settings_discovery_model
+ search_settings_model['messages'] = search_settings_messages_model
+ search_settings_model['schema_mapping'] = search_settings_schema_mapping_model
+ search_settings_model['elastic_search'] = search_settings_elastic_search_model
+ search_settings_model['conversational_search'] = search_settings_conversational_search_model
+ search_settings_model['server_side_search'] = search_settings_server_side_search_model
+ search_settings_model['client_side_search'] = search_settings_client_side_search_model
+
+ # Construct a json representation of a Skill model
+ skill_model_json = {}
+ skill_model_json['name'] = 'testString'
+ skill_model_json['description'] = 'testString'
+ skill_model_json['workspace'] = {'anyKey': 'anyValue'}
+ skill_model_json['dialog_settings'] = {'anyKey': 'anyValue'}
+ skill_model_json['search_settings'] = search_settings_model
+ skill_model_json['language'] = 'testString'
+ skill_model_json['type'] = 'action'
+
+ # Construct a model instance of Skill by calling from_dict on the json representation
+ skill_model = Skill.from_dict(skill_model_json)
+ assert skill_model != False
+
+ # Construct a model instance of Skill by calling from_dict on the json representation
+ skill_model_dict = Skill.from_dict(skill_model_json).__dict__
+ skill_model2 = Skill(**skill_model_dict)
+
+ # Verify the model instances are equivalent
+ assert skill_model == skill_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ skill_model_json2 = skill_model.to_dict()
+ assert skill_model_json2 == skill_model_json
+
+
+class TestModel_SkillImport:
+ """
+ Test Class for SkillImport
+ """
+
+ def test_skill_import_serialization(self):
+ """
+ Test serialization/deserialization for SkillImport
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ search_settings_discovery_model = {} # SearchSettingsDiscovery
+ search_settings_discovery_model['instance_id'] = 'testString'
+ search_settings_discovery_model['project_id'] = 'testString'
+ search_settings_discovery_model['url'] = 'testString'
+ search_settings_discovery_model['max_primary_results'] = 10000
+ search_settings_discovery_model['max_total_results'] = 10000
+ search_settings_discovery_model['confidence_threshold'] = 0.0
+ search_settings_discovery_model['highlight'] = True
+ search_settings_discovery_model['find_answers'] = True
+ search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model
+
+ search_settings_messages_model = {} # SearchSettingsMessages
+ search_settings_messages_model['success'] = 'testString'
+ search_settings_messages_model['error'] = 'testString'
+ search_settings_messages_model['no_result'] = 'testString'
+
+ search_settings_schema_mapping_model = {} # SearchSettingsSchemaMapping
+ search_settings_schema_mapping_model['url'] = 'testString'
+ search_settings_schema_mapping_model['body'] = 'testString'
+ search_settings_schema_mapping_model['title'] = 'testString'
+
+ search_settings_elastic_search_model = {} # SearchSettingsElasticSearch
+ search_settings_elastic_search_model['url'] = 'testString'
+ search_settings_elastic_search_model['port'] = 'testString'
+ search_settings_elastic_search_model['username'] = 'testString'
+ search_settings_elastic_search_model['password'] = 'testString'
+ search_settings_elastic_search_model['index'] = 'testString'
+ search_settings_elastic_search_model['filter'] = ['testString']
+ search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model['managed_index'] = 'testString'
+ search_settings_elastic_search_model['apikey'] = 'testString'
+
+ search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ search_settings_conversational_search_model = {} # SearchSettingsConversationalSearch
+ search_settings_conversational_search_model['enabled'] = True
+ search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ search_settings_server_side_search_model = {} # SearchSettingsServerSideSearch
+ search_settings_server_side_search_model['url'] = 'testString'
+ search_settings_server_side_search_model['port'] = 'testString'
+ search_settings_server_side_search_model['username'] = 'testString'
+ search_settings_server_side_search_model['password'] = 'testString'
+ search_settings_server_side_search_model['filter'] = 'testString'
+ search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model['apikey'] = 'testString'
+ search_settings_server_side_search_model['no_auth'] = True
+ search_settings_server_side_search_model['auth_type'] = 'basic'
+
+ search_settings_client_side_search_model = {} # SearchSettingsClientSideSearch
+ search_settings_client_side_search_model['filter'] = 'testString'
+ search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+
+ search_settings_model = {} # SearchSettings
+ search_settings_model['discovery'] = search_settings_discovery_model
+ search_settings_model['messages'] = search_settings_messages_model
+ search_settings_model['schema_mapping'] = search_settings_schema_mapping_model
+ search_settings_model['elastic_search'] = search_settings_elastic_search_model
+ search_settings_model['conversational_search'] = search_settings_conversational_search_model
+ search_settings_model['server_side_search'] = search_settings_server_side_search_model
+ search_settings_model['client_side_search'] = search_settings_client_side_search_model
+
+ # Construct a json representation of a SkillImport model
+ skill_import_model_json = {}
+ skill_import_model_json['name'] = 'testString'
+ skill_import_model_json['description'] = 'testString'
+ skill_import_model_json['workspace'] = {'anyKey': 'anyValue'}
+ skill_import_model_json['dialog_settings'] = {'anyKey': 'anyValue'}
+ skill_import_model_json['search_settings'] = search_settings_model
+ skill_import_model_json['language'] = 'testString'
+ skill_import_model_json['type'] = 'action'
+
+ # Construct a model instance of SkillImport by calling from_dict on the json representation
+ skill_import_model = SkillImport.from_dict(skill_import_model_json)
+ assert skill_import_model != False
+
+ # Construct a model instance of SkillImport by calling from_dict on the json representation
+ skill_import_model_dict = SkillImport.from_dict(skill_import_model_json).__dict__
+ skill_import_model2 = SkillImport(**skill_import_model_dict)
+
+ # Verify the model instances are equivalent
+ assert skill_import_model == skill_import_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ skill_import_model_json2 = skill_import_model.to_dict()
+ assert skill_import_model_json2 == skill_import_model_json
+
+
+class TestModel_SkillsAsyncRequestStatus:
+ """
+ Test Class for SkillsAsyncRequestStatus
+ """
+
+ def test_skills_async_request_status_serialization(self):
+ """
+ Test serialization/deserialization for SkillsAsyncRequestStatus
+ """
+
+ # Construct a json representation of a SkillsAsyncRequestStatus model
+ skills_async_request_status_model_json = {}
+
+ # Construct a model instance of SkillsAsyncRequestStatus by calling from_dict on the json representation
+ skills_async_request_status_model = SkillsAsyncRequestStatus.from_dict(skills_async_request_status_model_json)
+ assert skills_async_request_status_model != False
+
+ # Construct a model instance of SkillsAsyncRequestStatus by calling from_dict on the json representation
+ skills_async_request_status_model_dict = SkillsAsyncRequestStatus.from_dict(skills_async_request_status_model_json).__dict__
+ skills_async_request_status_model2 = SkillsAsyncRequestStatus(**skills_async_request_status_model_dict)
+
+ # Verify the model instances are equivalent
+ assert skills_async_request_status_model == skills_async_request_status_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ skills_async_request_status_model_json2 = skills_async_request_status_model.to_dict()
+ assert skills_async_request_status_model_json2 == skills_async_request_status_model_json
+
+
+class TestModel_SkillsExport:
+ """
+ Test Class for SkillsExport
+ """
+
+ def test_skills_export_serialization(self):
+ """
+ Test serialization/deserialization for SkillsExport
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ search_settings_discovery_authentication_model = {} # SearchSettingsDiscoveryAuthentication
+ search_settings_discovery_authentication_model['basic'] = 'testString'
+ search_settings_discovery_authentication_model['bearer'] = 'testString'
+
+ search_settings_discovery_model = {} # SearchSettingsDiscovery
+ search_settings_discovery_model['instance_id'] = 'testString'
+ search_settings_discovery_model['project_id'] = 'testString'
+ search_settings_discovery_model['url'] = 'testString'
+ search_settings_discovery_model['max_primary_results'] = 10000
+ search_settings_discovery_model['max_total_results'] = 10000
+ search_settings_discovery_model['confidence_threshold'] = 0.0
+ search_settings_discovery_model['highlight'] = True
+ search_settings_discovery_model['find_answers'] = True
+ search_settings_discovery_model['authentication'] = search_settings_discovery_authentication_model
+
+ search_settings_messages_model = {} # SearchSettingsMessages
+ search_settings_messages_model['success'] = 'testString'
+ search_settings_messages_model['error'] = 'testString'
+ search_settings_messages_model['no_result'] = 'testString'
+
+ search_settings_schema_mapping_model = {} # SearchSettingsSchemaMapping
+ search_settings_schema_mapping_model['url'] = 'testString'
+ search_settings_schema_mapping_model['body'] = 'testString'
+ search_settings_schema_mapping_model['title'] = 'testString'
+
+ search_settings_elastic_search_model = {} # SearchSettingsElasticSearch
+ search_settings_elastic_search_model['url'] = 'testString'
+ search_settings_elastic_search_model['port'] = 'testString'
+ search_settings_elastic_search_model['username'] = 'testString'
+ search_settings_elastic_search_model['password'] = 'testString'
+ search_settings_elastic_search_model['index'] = 'testString'
+ search_settings_elastic_search_model['filter'] = ['testString']
+ search_settings_elastic_search_model['query_body'] = {'anyKey': 'anyValue'}
+ search_settings_elastic_search_model['managed_index'] = 'testString'
+ search_settings_elastic_search_model['apikey'] = 'testString'
+
+ search_settings_conversational_search_response_length_model = {} # SearchSettingsConversationalSearchResponseLength
+ search_settings_conversational_search_response_length_model['option'] = 'moderate'
+
+ search_settings_conversational_search_search_confidence_model = {} # SearchSettingsConversationalSearchSearchConfidence
+ search_settings_conversational_search_search_confidence_model['threshold'] = 'less_often'
+
+ search_settings_conversational_search_model = {} # SearchSettingsConversationalSearch
+ search_settings_conversational_search_model['enabled'] = True
+ search_settings_conversational_search_model['response_length'] = search_settings_conversational_search_response_length_model
+ search_settings_conversational_search_model['search_confidence'] = search_settings_conversational_search_search_confidence_model
+
+ search_settings_server_side_search_model = {} # SearchSettingsServerSideSearch
+ search_settings_server_side_search_model['url'] = 'testString'
+ search_settings_server_side_search_model['port'] = 'testString'
+ search_settings_server_side_search_model['username'] = 'testString'
+ search_settings_server_side_search_model['password'] = 'testString'
+ search_settings_server_side_search_model['filter'] = 'testString'
+ search_settings_server_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+ search_settings_server_side_search_model['apikey'] = 'testString'
+ search_settings_server_side_search_model['no_auth'] = True
+ search_settings_server_side_search_model['auth_type'] = 'basic'
+
+ search_settings_client_side_search_model = {} # SearchSettingsClientSideSearch
+ search_settings_client_side_search_model['filter'] = 'testString'
+ search_settings_client_side_search_model['metadata'] = {'anyKey': 'anyValue'}
+
+ search_settings_model = {} # SearchSettings
+ search_settings_model['discovery'] = search_settings_discovery_model
+ search_settings_model['messages'] = search_settings_messages_model
+ search_settings_model['schema_mapping'] = search_settings_schema_mapping_model
+ search_settings_model['elastic_search'] = search_settings_elastic_search_model
+ search_settings_model['conversational_search'] = search_settings_conversational_search_model
+ search_settings_model['server_side_search'] = search_settings_server_side_search_model
+ search_settings_model['client_side_search'] = search_settings_client_side_search_model
+
+ skill_model = {} # Skill
+ skill_model['name'] = 'testString'
+ skill_model['description'] = 'testString'
+ skill_model['workspace'] = {'anyKey': 'anyValue'}
+ skill_model['dialog_settings'] = {'anyKey': 'anyValue'}
+ skill_model['search_settings'] = search_settings_model
+ skill_model['language'] = 'testString'
+ skill_model['type'] = 'action'
+
+ assistant_state_model = {} # AssistantState
+ assistant_state_model['action_disabled'] = True
+ assistant_state_model['dialog_disabled'] = True
+
+ # Construct a json representation of a SkillsExport model
+ skills_export_model_json = {}
+ skills_export_model_json['assistant_skills'] = [skill_model]
+ skills_export_model_json['assistant_state'] = assistant_state_model
+
+ # Construct a model instance of SkillsExport by calling from_dict on the json representation
+ skills_export_model = SkillsExport.from_dict(skills_export_model_json)
+ assert skills_export_model != False
+
+ # Construct a model instance of SkillsExport by calling from_dict on the json representation
+ skills_export_model_dict = SkillsExport.from_dict(skills_export_model_json).__dict__
+ skills_export_model2 = SkillsExport(**skills_export_model_dict)
+
+ # Verify the model instances are equivalent
+ assert skills_export_model == skills_export_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ skills_export_model_json2 = skills_export_model.to_dict()
+ assert skills_export_model_json2 == skills_export_model_json
+
+
+class TestModel_StatefulMessageResponse:
+ """
+ Test Class for StatefulMessageResponse
+ """
+
+ def test_stateful_message_response_serialization(self):
+ """
+ Test serialization/deserialization for StatefulMessageResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ message_output_model = {} # MessageOutput
+ message_output_model['generic'] = [runtime_response_generic_model]
+ message_output_model['intents'] = [runtime_intent_model]
+ message_output_model['entities'] = [runtime_entity_model]
+ message_output_model['actions'] = [dialog_node_action_model]
+ message_output_model['debug'] = message_output_debug_model
+ message_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_output_model['spelling'] = message_output_spelling_model
+ message_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ message_context_global_model = {} # MessageContextGlobal
+ message_context_global_model['system'] = message_context_global_system_model
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ message_context_action_skill_model = {} # MessageContextActionSkill
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ message_context_skills_model = {} # MessageContextSkills
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ message_context_model = {} # MessageContext
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ # Construct a json representation of a StatefulMessageResponse model
+ stateful_message_response_model_json = {}
+ stateful_message_response_model_json['output'] = message_output_model
+ stateful_message_response_model_json['context'] = message_context_model
+ stateful_message_response_model_json['user_id'] = 'testString'
+ stateful_message_response_model_json['masked_output'] = message_output_model
+ stateful_message_response_model_json['masked_input'] = message_input_model
+
+ # Construct a model instance of StatefulMessageResponse by calling from_dict on the json representation
+ stateful_message_response_model = StatefulMessageResponse.from_dict(stateful_message_response_model_json)
+ assert stateful_message_response_model != False
+
+ # Construct a model instance of StatefulMessageResponse by calling from_dict on the json representation
+ stateful_message_response_model_dict = StatefulMessageResponse.from_dict(stateful_message_response_model_json).__dict__
+ stateful_message_response_model2 = StatefulMessageResponse(**stateful_message_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateful_message_response_model == stateful_message_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateful_message_response_model_json2 = stateful_message_response_model.to_dict()
+ assert stateful_message_response_model_json2 == stateful_message_response_model_json
+
+
+class TestModel_StatelessFinalResponse:
+ """
+ Test Class for StatelessFinalResponse
+ """
+
+ def test_stateless_final_response_serialization(self):
+ """
+ Test serialization/deserialization for StatelessFinalResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ stateless_message_context_global_model = {} # StatelessMessageContextGlobal
+ stateless_message_context_global_model['system'] = message_context_global_system_model
+ stateless_message_context_global_model['session_id'] = 'testString'
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill
+ stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model
+ stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'}
+
+ stateless_message_context_skills_model = {} # StatelessMessageContextSkills
+ stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model
+
+ stateless_message_context_model = {} # StatelessMessageContext
+ stateless_message_context_model['global'] = stateless_message_context_global_model
+ stateless_message_context_model['skills'] = stateless_message_context_skills_model
+ stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ stateless_final_response_output_model = {} # StatelessFinalResponseOutput
+ stateless_final_response_output_model['generic'] = [runtime_response_generic_model]
+ stateless_final_response_output_model['intents'] = [runtime_intent_model]
+ stateless_final_response_output_model['entities'] = [runtime_entity_model]
+ stateless_final_response_output_model['actions'] = [dialog_node_action_model]
+ stateless_final_response_output_model['debug'] = message_output_debug_model
+ stateless_final_response_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_final_response_output_model['spelling'] = message_output_spelling_model
+ stateless_final_response_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+ stateless_final_response_output_model['streaming_metadata'] = stateless_message_context_model
+
+ # Construct a json representation of a StatelessFinalResponse model
+ stateless_final_response_model_json = {}
+ stateless_final_response_model_json['output'] = stateless_final_response_output_model
+ stateless_final_response_model_json['context'] = stateless_message_context_model
+ stateless_final_response_model_json['user_id'] = 'testString'
+
+ # Construct a model instance of StatelessFinalResponse by calling from_dict on the json representation
+ stateless_final_response_model = StatelessFinalResponse.from_dict(stateless_final_response_model_json)
+ assert stateless_final_response_model != False
+
+ # Construct a model instance of StatelessFinalResponse by calling from_dict on the json representation
+ stateless_final_response_model_dict = StatelessFinalResponse.from_dict(stateless_final_response_model_json).__dict__
+ stateless_final_response_model2 = StatelessFinalResponse(**stateless_final_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_final_response_model == stateless_final_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_final_response_model_json2 = stateless_final_response_model.to_dict()
+ assert stateless_final_response_model_json2 == stateless_final_response_model_json
+
+
+class TestModel_StatelessFinalResponseOutput:
+ """
+ Test Class for StatelessFinalResponseOutput
+ """
+
+ def test_stateless_final_response_output_serialization(self):
+ """
+ Test serialization/deserialization for StatelessFinalResponseOutput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ stateless_message_context_global_model = {} # StatelessMessageContextGlobal
+ stateless_message_context_global_model['system'] = message_context_global_system_model
+ stateless_message_context_global_model['session_id'] = 'testString'
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill
+ stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model
+ stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'}
+
+ stateless_message_context_skills_model = {} # StatelessMessageContextSkills
+ stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model
+
+ stateless_message_context_model = {} # StatelessMessageContext
+ stateless_message_context_model['global'] = stateless_message_context_global_model
+ stateless_message_context_model['skills'] = stateless_message_context_skills_model
+ stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ # Construct a json representation of a StatelessFinalResponseOutput model
+ stateless_final_response_output_model_json = {}
+ stateless_final_response_output_model_json['generic'] = [runtime_response_generic_model]
+ stateless_final_response_output_model_json['intents'] = [runtime_intent_model]
+ stateless_final_response_output_model_json['entities'] = [runtime_entity_model]
+ stateless_final_response_output_model_json['actions'] = [dialog_node_action_model]
+ stateless_final_response_output_model_json['debug'] = message_output_debug_model
+ stateless_final_response_output_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_final_response_output_model_json['spelling'] = message_output_spelling_model
+ stateless_final_response_output_model_json['llm_metadata'] = [message_output_llm_metadata_model]
+ stateless_final_response_output_model_json['streaming_metadata'] = stateless_message_context_model
+
+ # Construct a model instance of StatelessFinalResponseOutput by calling from_dict on the json representation
+ stateless_final_response_output_model = StatelessFinalResponseOutput.from_dict(stateless_final_response_output_model_json)
+ assert stateless_final_response_output_model != False
+
+ # Construct a model instance of StatelessFinalResponseOutput by calling from_dict on the json representation
+ stateless_final_response_output_model_dict = StatelessFinalResponseOutput.from_dict(stateless_final_response_output_model_json).__dict__
+ stateless_final_response_output_model2 = StatelessFinalResponseOutput(**stateless_final_response_output_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_final_response_output_model == stateless_final_response_output_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_final_response_output_model_json2 = stateless_final_response_output_model.to_dict()
+ assert stateless_final_response_output_model_json2 == stateless_final_response_output_model_json
+
+
+class TestModel_StatelessMessageContext:
+ """
+ Test Class for StatelessMessageContext
+ """
+
+ def test_stateless_message_context_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageContext
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ stateless_message_context_global_model = {} # StatelessMessageContextGlobal
+ stateless_message_context_global_model['system'] = message_context_global_system_model
+ stateless_message_context_global_model['session_id'] = 'testString'
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill
+ stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model
+ stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'}
+
+ stateless_message_context_skills_model = {} # StatelessMessageContextSkills
+ stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model
+
+ # Construct a json representation of a StatelessMessageContext model
+ stateless_message_context_model_json = {}
+ stateless_message_context_model_json['global'] = stateless_message_context_global_model
+ stateless_message_context_model_json['skills'] = stateless_message_context_skills_model
+ stateless_message_context_model_json['integrations'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of StatelessMessageContext by calling from_dict on the json representation
+ stateless_message_context_model = StatelessMessageContext.from_dict(stateless_message_context_model_json)
+ assert stateless_message_context_model != False
+
+ # Construct a model instance of StatelessMessageContext by calling from_dict on the json representation
+ stateless_message_context_model_dict = StatelessMessageContext.from_dict(stateless_message_context_model_json).__dict__
+ stateless_message_context_model2 = StatelessMessageContext(**stateless_message_context_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_context_model == stateless_message_context_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_context_model_json2 = stateless_message_context_model.to_dict()
+ assert stateless_message_context_model_json2 == stateless_message_context_model_json
+
+
+class TestModel_StatelessMessageContextGlobal:
+ """
+ Test Class for StatelessMessageContextGlobal
+ """
+
+ def test_stateless_message_context_global_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageContextGlobal
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ # Construct a json representation of a StatelessMessageContextGlobal model
+ stateless_message_context_global_model_json = {}
+ stateless_message_context_global_model_json['system'] = message_context_global_system_model
+ stateless_message_context_global_model_json['session_id'] = 'testString'
+
+ # Construct a model instance of StatelessMessageContextGlobal by calling from_dict on the json representation
+ stateless_message_context_global_model = StatelessMessageContextGlobal.from_dict(stateless_message_context_global_model_json)
+ assert stateless_message_context_global_model != False
+
+ # Construct a model instance of StatelessMessageContextGlobal by calling from_dict on the json representation
+ stateless_message_context_global_model_dict = StatelessMessageContextGlobal.from_dict(stateless_message_context_global_model_json).__dict__
+ stateless_message_context_global_model2 = StatelessMessageContextGlobal(**stateless_message_context_global_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_context_global_model == stateless_message_context_global_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_context_global_model_json2 = stateless_message_context_global_model.to_dict()
+ assert stateless_message_context_global_model_json2 == stateless_message_context_global_model_json
+
+
+class TestModel_StatelessMessageContextSkills:
+ """
+ Test Class for StatelessMessageContextSkills
+ """
+
+ def test_stateless_message_context_skills_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageContextSkills
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill
+ stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model
+ stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'}
+
+ # Construct a json representation of a StatelessMessageContextSkills model
+ stateless_message_context_skills_model_json = {}
+ stateless_message_context_skills_model_json['main skill'] = message_context_dialog_skill_model
+ stateless_message_context_skills_model_json['actions skill'] = stateless_message_context_skills_actions_skill_model
+
+ # Construct a model instance of StatelessMessageContextSkills by calling from_dict on the json representation
+ stateless_message_context_skills_model = StatelessMessageContextSkills.from_dict(stateless_message_context_skills_model_json)
+ assert stateless_message_context_skills_model != False
+
+ # Construct a model instance of StatelessMessageContextSkills by calling from_dict on the json representation
+ stateless_message_context_skills_model_dict = StatelessMessageContextSkills.from_dict(stateless_message_context_skills_model_json).__dict__
+ stateless_message_context_skills_model2 = StatelessMessageContextSkills(**stateless_message_context_skills_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_context_skills_model == stateless_message_context_skills_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_context_skills_model_json2 = stateless_message_context_skills_model.to_dict()
+ assert stateless_message_context_skills_model_json2 == stateless_message_context_skills_model_json
+
+
+class TestModel_StatelessMessageContextSkillsActionsSkill:
+ """
+ Test Class for StatelessMessageContextSkillsActionsSkill
+ """
+
+ def test_stateless_message_context_skills_actions_skill_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageContextSkillsActionsSkill
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ # Construct a json representation of a StatelessMessageContextSkillsActionsSkill model
+ stateless_message_context_skills_actions_skill_model_json = {}
+ stateless_message_context_skills_actions_skill_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model_json['system'] = message_context_skill_system_model
+ stateless_message_context_skills_actions_skill_model_json['action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model_json['skill_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model_json['private_action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model_json['private_skill_variables'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of StatelessMessageContextSkillsActionsSkill by calling from_dict on the json representation
+ stateless_message_context_skills_actions_skill_model = StatelessMessageContextSkillsActionsSkill.from_dict(stateless_message_context_skills_actions_skill_model_json)
+ assert stateless_message_context_skills_actions_skill_model != False
+
+ # Construct a model instance of StatelessMessageContextSkillsActionsSkill by calling from_dict on the json representation
+ stateless_message_context_skills_actions_skill_model_dict = StatelessMessageContextSkillsActionsSkill.from_dict(stateless_message_context_skills_actions_skill_model_json).__dict__
+ stateless_message_context_skills_actions_skill_model2 = StatelessMessageContextSkillsActionsSkill(**stateless_message_context_skills_actions_skill_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_context_skills_actions_skill_model == stateless_message_context_skills_actions_skill_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_context_skills_actions_skill_model_json2 = stateless_message_context_skills_actions_skill_model.to_dict()
+ assert stateless_message_context_skills_actions_skill_model_json2 == stateless_message_context_skills_actions_skill_model_json
+
+
+class TestModel_StatelessMessageInput:
+ """
+ Test Class for StatelessMessageInput
+ """
+
+ def test_stateless_message_input_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageInput
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ stateless_message_input_options_model = {} # StatelessMessageInputOptions
+ stateless_message_input_options_model['restart'] = False
+ stateless_message_input_options_model['alternate_intents'] = False
+ stateless_message_input_options_model['async_callout'] = False
+ stateless_message_input_options_model['spelling'] = message_input_options_spelling_model
+ stateless_message_input_options_model['debug'] = False
+
+ # Construct a json representation of a StatelessMessageInput model
+ stateless_message_input_model_json = {}
+ stateless_message_input_model_json['message_type'] = 'text'
+ stateless_message_input_model_json['text'] = 'testString'
+ stateless_message_input_model_json['intents'] = [runtime_intent_model]
+ stateless_message_input_model_json['entities'] = [runtime_entity_model]
+ stateless_message_input_model_json['suggestion_id'] = 'testString'
+ stateless_message_input_model_json['attachments'] = [message_input_attachment_model]
+ stateless_message_input_model_json['analytics'] = request_analytics_model
+ stateless_message_input_model_json['options'] = stateless_message_input_options_model
+
+ # Construct a model instance of StatelessMessageInput by calling from_dict on the json representation
+ stateless_message_input_model = StatelessMessageInput.from_dict(stateless_message_input_model_json)
+ assert stateless_message_input_model != False
+
+ # Construct a model instance of StatelessMessageInput by calling from_dict on the json representation
+ stateless_message_input_model_dict = StatelessMessageInput.from_dict(stateless_message_input_model_json).__dict__
+ stateless_message_input_model2 = StatelessMessageInput(**stateless_message_input_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_input_model == stateless_message_input_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_input_model_json2 = stateless_message_input_model.to_dict()
+ assert stateless_message_input_model_json2 == stateless_message_input_model_json
+
+
+class TestModel_StatelessMessageInputOptions:
+ """
+ Test Class for StatelessMessageInputOptions
+ """
+
+ def test_stateless_message_input_options_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageInputOptions
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ # Construct a json representation of a StatelessMessageInputOptions model
+ stateless_message_input_options_model_json = {}
+ stateless_message_input_options_model_json['restart'] = False
+ stateless_message_input_options_model_json['alternate_intents'] = False
+ stateless_message_input_options_model_json['async_callout'] = False
+ stateless_message_input_options_model_json['spelling'] = message_input_options_spelling_model
+ stateless_message_input_options_model_json['debug'] = False
+
+ # Construct a model instance of StatelessMessageInputOptions by calling from_dict on the json representation
+ stateless_message_input_options_model = StatelessMessageInputOptions.from_dict(stateless_message_input_options_model_json)
+ assert stateless_message_input_options_model != False
+
+ # Construct a model instance of StatelessMessageInputOptions by calling from_dict on the json representation
+ stateless_message_input_options_model_dict = StatelessMessageInputOptions.from_dict(stateless_message_input_options_model_json).__dict__
+ stateless_message_input_options_model2 = StatelessMessageInputOptions(**stateless_message_input_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_input_options_model == stateless_message_input_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_input_options_model_json2 = stateless_message_input_options_model.to_dict()
+ assert stateless_message_input_options_model_json2 == stateless_message_input_options_model_json
+
+
+class TestModel_StatelessMessageResponse:
+ """
+ Test Class for StatelessMessageResponse
+ """
+
+ def test_stateless_message_response_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ message_output_model = {} # MessageOutput
+ message_output_model['generic'] = [runtime_response_generic_model]
+ message_output_model['intents'] = [runtime_intent_model]
+ message_output_model['entities'] = [runtime_entity_model]
+ message_output_model['actions'] = [dialog_node_action_model]
+ message_output_model['debug'] = message_output_debug_model
+ message_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_output_model['spelling'] = message_output_spelling_model
+ message_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ stateless_message_context_global_model = {} # StatelessMessageContextGlobal
+ stateless_message_context_global_model['system'] = message_context_global_system_model
+ stateless_message_context_global_model['session_id'] = 'testString'
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill
+ stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model
+ stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'}
+
+ stateless_message_context_skills_model = {} # StatelessMessageContextSkills
+ stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model
+
+ stateless_message_context_model = {} # StatelessMessageContext
+ stateless_message_context_model['global'] = stateless_message_context_global_model
+ stateless_message_context_model['skills'] = stateless_message_context_skills_model
+ stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ # Construct a json representation of a StatelessMessageResponse model
+ stateless_message_response_model_json = {}
+ stateless_message_response_model_json['output'] = message_output_model
+ stateless_message_response_model_json['context'] = stateless_message_context_model
+ stateless_message_response_model_json['masked_output'] = message_output_model
+ stateless_message_response_model_json['masked_input'] = message_input_model
+ stateless_message_response_model_json['user_id'] = 'testString'
+
+ # Construct a model instance of StatelessMessageResponse by calling from_dict on the json representation
+ stateless_message_response_model = StatelessMessageResponse.from_dict(stateless_message_response_model_json)
+ assert stateless_message_response_model != False
+
+ # Construct a model instance of StatelessMessageResponse by calling from_dict on the json representation
+ stateless_message_response_model_dict = StatelessMessageResponse.from_dict(stateless_message_response_model_json).__dict__
+ stateless_message_response_model2 = StatelessMessageResponse(**stateless_message_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_response_model == stateless_message_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_response_model_json2 = stateless_message_response_model.to_dict()
+ assert stateless_message_response_model_json2 == stateless_message_response_model_json
+
+
+class TestModel_StatusError:
+ """
+ Test Class for StatusError
+ """
+
+ def test_status_error_serialization(self):
+ """
+ Test serialization/deserialization for StatusError
+ """
+
+ # Construct a json representation of a StatusError model
+ status_error_model_json = {}
+ status_error_model_json['message'] = 'testString'
+
+ # Construct a model instance of StatusError by calling from_dict on the json representation
+ status_error_model = StatusError.from_dict(status_error_model_json)
+ assert status_error_model != False
+
+ # Construct a model instance of StatusError by calling from_dict on the json representation
+ status_error_model_dict = StatusError.from_dict(status_error_model_json).__dict__
+ status_error_model2 = StatusError(**status_error_model_dict)
+
+ # Verify the model instances are equivalent
+ assert status_error_model == status_error_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ status_error_model_json2 = status_error_model.to_dict()
+ assert status_error_model_json2 == status_error_model_json
+
+
+class TestModel_TurnEventActionSource:
+ """
+ Test Class for TurnEventActionSource
+ """
+
+ def test_turn_event_action_source_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventActionSource
+ """
+
+ # Construct a json representation of a TurnEventActionSource model
+ turn_event_action_source_model_json = {}
+ turn_event_action_source_model_json['type'] = 'action'
+ turn_event_action_source_model_json['action'] = 'testString'
+ turn_event_action_source_model_json['action_title'] = 'testString'
+ turn_event_action_source_model_json['condition'] = 'testString'
+
+ # Construct a model instance of TurnEventActionSource by calling from_dict on the json representation
+ turn_event_action_source_model = TurnEventActionSource.from_dict(turn_event_action_source_model_json)
+ assert turn_event_action_source_model != False
+
+ # Construct a model instance of TurnEventActionSource by calling from_dict on the json representation
+ turn_event_action_source_model_dict = TurnEventActionSource.from_dict(turn_event_action_source_model_json).__dict__
+ turn_event_action_source_model2 = TurnEventActionSource(**turn_event_action_source_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_action_source_model == turn_event_action_source_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_action_source_model_json2 = turn_event_action_source_model.to_dict()
+ assert turn_event_action_source_model_json2 == turn_event_action_source_model_json
+
+
+class TestModel_TurnEventCalloutCallout:
+ """
+ Test Class for TurnEventCalloutCallout
+ """
+
+ def test_turn_event_callout_callout_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventCalloutCallout
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_callout_callout_request_model = {} # TurnEventCalloutCalloutRequest
+ turn_event_callout_callout_request_model['method'] = 'get'
+ turn_event_callout_callout_request_model['url'] = 'testString'
+ turn_event_callout_callout_request_model['path'] = 'testString'
+ turn_event_callout_callout_request_model['query_parameters'] = 'testString'
+ turn_event_callout_callout_request_model['headers'] = {'anyKey': 'anyValue'}
+ turn_event_callout_callout_request_model['body'] = {'anyKey': 'anyValue'}
+
+ turn_event_callout_callout_response_model = {} # TurnEventCalloutCalloutResponse
+ turn_event_callout_callout_response_model['body'] = 'testString'
+ turn_event_callout_callout_response_model['status_code'] = 38
+ turn_event_callout_callout_response_model['last_event'] = {'anyKey': 'anyValue'}
+
+ # Construct a json representation of a TurnEventCalloutCallout model
+ turn_event_callout_callout_model_json = {}
+ turn_event_callout_callout_model_json['type'] = 'integration_interaction'
+ turn_event_callout_callout_model_json['internal'] = {'anyKey': 'anyValue'}
+ turn_event_callout_callout_model_json['result_variable'] = 'testString'
+ turn_event_callout_callout_model_json['request'] = turn_event_callout_callout_request_model
+ turn_event_callout_callout_model_json['response'] = turn_event_callout_callout_response_model
+
+ # Construct a model instance of TurnEventCalloutCallout by calling from_dict on the json representation
+ turn_event_callout_callout_model = TurnEventCalloutCallout.from_dict(turn_event_callout_callout_model_json)
+ assert turn_event_callout_callout_model != False
+
+ # Construct a model instance of TurnEventCalloutCallout by calling from_dict on the json representation
+ turn_event_callout_callout_model_dict = TurnEventCalloutCallout.from_dict(turn_event_callout_callout_model_json).__dict__
+ turn_event_callout_callout_model2 = TurnEventCalloutCallout(**turn_event_callout_callout_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_callout_callout_model == turn_event_callout_callout_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_callout_callout_model_json2 = turn_event_callout_callout_model.to_dict()
+ assert turn_event_callout_callout_model_json2 == turn_event_callout_callout_model_json
+
+
+class TestModel_TurnEventCalloutCalloutRequest:
+ """
+ Test Class for TurnEventCalloutCalloutRequest
+ """
+
+ def test_turn_event_callout_callout_request_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventCalloutCalloutRequest
+ """
+
+ # Construct a json representation of a TurnEventCalloutCalloutRequest model
+ turn_event_callout_callout_request_model_json = {}
+ turn_event_callout_callout_request_model_json['method'] = 'get'
+ turn_event_callout_callout_request_model_json['url'] = 'testString'
+ turn_event_callout_callout_request_model_json['path'] = 'testString'
+ turn_event_callout_callout_request_model_json['query_parameters'] = 'testString'
+ turn_event_callout_callout_request_model_json['headers'] = {'anyKey': 'anyValue'}
+ turn_event_callout_callout_request_model_json['body'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of TurnEventCalloutCalloutRequest by calling from_dict on the json representation
+ turn_event_callout_callout_request_model = TurnEventCalloutCalloutRequest.from_dict(turn_event_callout_callout_request_model_json)
+ assert turn_event_callout_callout_request_model != False
+
+ # Construct a model instance of TurnEventCalloutCalloutRequest by calling from_dict on the json representation
+ turn_event_callout_callout_request_model_dict = TurnEventCalloutCalloutRequest.from_dict(turn_event_callout_callout_request_model_json).__dict__
+ turn_event_callout_callout_request_model2 = TurnEventCalloutCalloutRequest(**turn_event_callout_callout_request_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_callout_callout_request_model == turn_event_callout_callout_request_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_callout_callout_request_model_json2 = turn_event_callout_callout_request_model.to_dict()
+ assert turn_event_callout_callout_request_model_json2 == turn_event_callout_callout_request_model_json
+
+
+class TestModel_TurnEventCalloutCalloutResponse:
+ """
+ Test Class for TurnEventCalloutCalloutResponse
+ """
+
+ def test_turn_event_callout_callout_response_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventCalloutCalloutResponse
+ """
+
+ # Construct a json representation of a TurnEventCalloutCalloutResponse model
+ turn_event_callout_callout_response_model_json = {}
+ turn_event_callout_callout_response_model_json['body'] = 'testString'
+ turn_event_callout_callout_response_model_json['status_code'] = 38
+ turn_event_callout_callout_response_model_json['last_event'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of TurnEventCalloutCalloutResponse by calling from_dict on the json representation
+ turn_event_callout_callout_response_model = TurnEventCalloutCalloutResponse.from_dict(turn_event_callout_callout_response_model_json)
+ assert turn_event_callout_callout_response_model != False
+
+ # Construct a model instance of TurnEventCalloutCalloutResponse by calling from_dict on the json representation
+ turn_event_callout_callout_response_model_dict = TurnEventCalloutCalloutResponse.from_dict(turn_event_callout_callout_response_model_json).__dict__
+ turn_event_callout_callout_response_model2 = TurnEventCalloutCalloutResponse(**turn_event_callout_callout_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_callout_callout_response_model == turn_event_callout_callout_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_callout_callout_response_model_json2 = turn_event_callout_callout_response_model.to_dict()
+ assert turn_event_callout_callout_response_model_json2 == turn_event_callout_callout_response_model_json
+
+
+class TestModel_TurnEventCalloutError:
+ """
+ Test Class for TurnEventCalloutError
+ """
+
+ def test_turn_event_callout_error_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventCalloutError
+ """
+
+ # Construct a json representation of a TurnEventCalloutError model
+ turn_event_callout_error_model_json = {}
+ turn_event_callout_error_model_json['message'] = 'testString'
+
+ # Construct a model instance of TurnEventCalloutError by calling from_dict on the json representation
+ turn_event_callout_error_model = TurnEventCalloutError.from_dict(turn_event_callout_error_model_json)
+ assert turn_event_callout_error_model != False
+
+ # Construct a model instance of TurnEventCalloutError by calling from_dict on the json representation
+ turn_event_callout_error_model_dict = TurnEventCalloutError.from_dict(turn_event_callout_error_model_json).__dict__
+ turn_event_callout_error_model2 = TurnEventCalloutError(**turn_event_callout_error_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_callout_error_model == turn_event_callout_error_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_callout_error_model_json2 = turn_event_callout_error_model.to_dict()
+ assert turn_event_callout_error_model_json2 == turn_event_callout_error_model_json
+
+
+class TestModel_TurnEventGenerativeAICalledCallout:
+ """
+ Test Class for TurnEventGenerativeAICalledCallout
+ """
+
+ def test_turn_event_generative_ai_called_callout_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventGenerativeAICalledCallout
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_generative_ai_called_callout_request_model = {} # TurnEventGenerativeAICalledCalloutRequest
+ turn_event_generative_ai_called_callout_request_model['method'] = 'GET'
+ turn_event_generative_ai_called_callout_request_model['url'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['port'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['path'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['query_parameters'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['headers'] = {'anyKey': 'anyValue'}
+ turn_event_generative_ai_called_callout_request_model['body'] = {'anyKey': 'anyValue'}
+
+ turn_event_generative_ai_called_callout_response_model = {} # TurnEventGenerativeAICalledCalloutResponse
+ turn_event_generative_ai_called_callout_response_model['body'] = 'testString'
+ turn_event_generative_ai_called_callout_response_model['status_code'] = 38
+
+ turn_event_generative_ai_called_callout_search_model = {} # TurnEventGenerativeAICalledCalloutSearch
+ turn_event_generative_ai_called_callout_search_model['engine'] = 'testString'
+ turn_event_generative_ai_called_callout_search_model['index'] = 'testString'
+ turn_event_generative_ai_called_callout_search_model['query'] = 'testString'
+ turn_event_generative_ai_called_callout_search_model['request'] = turn_event_generative_ai_called_callout_request_model
+ turn_event_generative_ai_called_callout_search_model['response'] = turn_event_generative_ai_called_callout_response_model
+
+ turn_event_generative_ai_called_callout_llm_response_model = {} # TurnEventGenerativeAICalledCalloutLlmResponse
+ turn_event_generative_ai_called_callout_llm_response_model['text'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_response_model['response_type'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_response_model['is_idk_response'] = True
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ turn_event_generative_ai_called_callout_llm_model = {} # TurnEventGenerativeAICalledCalloutLlm
+ turn_event_generative_ai_called_callout_llm_model['type'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_model['model_id'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_model['model_class_id'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_model['generated_token_count'] = 38
+ turn_event_generative_ai_called_callout_llm_model['input_token_count'] = 38
+ turn_event_generative_ai_called_callout_llm_model['success'] = True
+ turn_event_generative_ai_called_callout_llm_model['response'] = turn_event_generative_ai_called_callout_llm_response_model
+ turn_event_generative_ai_called_callout_llm_model['request'] = [search_results_model]
+
+ # Construct a json representation of a TurnEventGenerativeAICalledCallout model
+ turn_event_generative_ai_called_callout_model_json = {}
+ turn_event_generative_ai_called_callout_model_json['search_called'] = True
+ turn_event_generative_ai_called_callout_model_json['llm_called'] = True
+ turn_event_generative_ai_called_callout_model_json['search'] = turn_event_generative_ai_called_callout_search_model
+ turn_event_generative_ai_called_callout_model_json['llm'] = turn_event_generative_ai_called_callout_llm_model
+ turn_event_generative_ai_called_callout_model_json['idk_reason_code'] = 'testString'
+
+ # Construct a model instance of TurnEventGenerativeAICalledCallout by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_model = TurnEventGenerativeAICalledCallout.from_dict(turn_event_generative_ai_called_callout_model_json)
+ assert turn_event_generative_ai_called_callout_model != False
+
+ # Construct a model instance of TurnEventGenerativeAICalledCallout by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_model_dict = TurnEventGenerativeAICalledCallout.from_dict(turn_event_generative_ai_called_callout_model_json).__dict__
+ turn_event_generative_ai_called_callout_model2 = TurnEventGenerativeAICalledCallout(**turn_event_generative_ai_called_callout_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_generative_ai_called_callout_model == turn_event_generative_ai_called_callout_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_generative_ai_called_callout_model_json2 = turn_event_generative_ai_called_callout_model.to_dict()
+ assert turn_event_generative_ai_called_callout_model_json2 == turn_event_generative_ai_called_callout_model_json
+
+
+class TestModel_TurnEventGenerativeAICalledCalloutLlm:
+ """
+ Test Class for TurnEventGenerativeAICalledCalloutLlm
+ """
+
+ def test_turn_event_generative_ai_called_callout_llm_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventGenerativeAICalledCalloutLlm
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_generative_ai_called_callout_llm_response_model = {} # TurnEventGenerativeAICalledCalloutLlmResponse
+ turn_event_generative_ai_called_callout_llm_response_model['text'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_response_model['response_type'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_response_model['is_idk_response'] = True
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ # Construct a json representation of a TurnEventGenerativeAICalledCalloutLlm model
+ turn_event_generative_ai_called_callout_llm_model_json = {}
+ turn_event_generative_ai_called_callout_llm_model_json['type'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_model_json['model_id'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_model_json['model_class_id'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_model_json['generated_token_count'] = 38
+ turn_event_generative_ai_called_callout_llm_model_json['input_token_count'] = 38
+ turn_event_generative_ai_called_callout_llm_model_json['success'] = True
+ turn_event_generative_ai_called_callout_llm_model_json['response'] = turn_event_generative_ai_called_callout_llm_response_model
+ turn_event_generative_ai_called_callout_llm_model_json['request'] = [search_results_model]
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutLlm by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_llm_model = TurnEventGenerativeAICalledCalloutLlm.from_dict(turn_event_generative_ai_called_callout_llm_model_json)
+ assert turn_event_generative_ai_called_callout_llm_model != False
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutLlm by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_llm_model_dict = TurnEventGenerativeAICalledCalloutLlm.from_dict(turn_event_generative_ai_called_callout_llm_model_json).__dict__
+ turn_event_generative_ai_called_callout_llm_model2 = TurnEventGenerativeAICalledCalloutLlm(**turn_event_generative_ai_called_callout_llm_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_generative_ai_called_callout_llm_model == turn_event_generative_ai_called_callout_llm_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_generative_ai_called_callout_llm_model_json2 = turn_event_generative_ai_called_callout_llm_model.to_dict()
+ assert turn_event_generative_ai_called_callout_llm_model_json2 == turn_event_generative_ai_called_callout_llm_model_json
+
+
+class TestModel_TurnEventGenerativeAICalledCalloutLlmResponse:
+ """
+ Test Class for TurnEventGenerativeAICalledCalloutLlmResponse
+ """
+
+ def test_turn_event_generative_ai_called_callout_llm_response_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventGenerativeAICalledCalloutLlmResponse
+ """
+
+ # Construct a json representation of a TurnEventGenerativeAICalledCalloutLlmResponse model
+ turn_event_generative_ai_called_callout_llm_response_model_json = {}
+ turn_event_generative_ai_called_callout_llm_response_model_json['text'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_response_model_json['response_type'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_response_model_json['is_idk_response'] = True
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutLlmResponse by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_llm_response_model = TurnEventGenerativeAICalledCalloutLlmResponse.from_dict(turn_event_generative_ai_called_callout_llm_response_model_json)
+ assert turn_event_generative_ai_called_callout_llm_response_model != False
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutLlmResponse by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_llm_response_model_dict = TurnEventGenerativeAICalledCalloutLlmResponse.from_dict(turn_event_generative_ai_called_callout_llm_response_model_json).__dict__
+ turn_event_generative_ai_called_callout_llm_response_model2 = TurnEventGenerativeAICalledCalloutLlmResponse(**turn_event_generative_ai_called_callout_llm_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_generative_ai_called_callout_llm_response_model == turn_event_generative_ai_called_callout_llm_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_generative_ai_called_callout_llm_response_model_json2 = turn_event_generative_ai_called_callout_llm_response_model.to_dict()
+ assert turn_event_generative_ai_called_callout_llm_response_model_json2 == turn_event_generative_ai_called_callout_llm_response_model_json
+
+
+class TestModel_TurnEventGenerativeAICalledCalloutRequest:
+ """
+ Test Class for TurnEventGenerativeAICalledCalloutRequest
+ """
+
+ def test_turn_event_generative_ai_called_callout_request_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventGenerativeAICalledCalloutRequest
+ """
+
+ # Construct a json representation of a TurnEventGenerativeAICalledCalloutRequest model
+ turn_event_generative_ai_called_callout_request_model_json = {}
+ turn_event_generative_ai_called_callout_request_model_json['method'] = 'GET'
+ turn_event_generative_ai_called_callout_request_model_json['url'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model_json['port'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model_json['path'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model_json['query_parameters'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model_json['headers'] = {'anyKey': 'anyValue'}
+ turn_event_generative_ai_called_callout_request_model_json['body'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutRequest by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_request_model = TurnEventGenerativeAICalledCalloutRequest.from_dict(turn_event_generative_ai_called_callout_request_model_json)
+ assert turn_event_generative_ai_called_callout_request_model != False
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutRequest by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_request_model_dict = TurnEventGenerativeAICalledCalloutRequest.from_dict(turn_event_generative_ai_called_callout_request_model_json).__dict__
+ turn_event_generative_ai_called_callout_request_model2 = TurnEventGenerativeAICalledCalloutRequest(**turn_event_generative_ai_called_callout_request_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_generative_ai_called_callout_request_model == turn_event_generative_ai_called_callout_request_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_generative_ai_called_callout_request_model_json2 = turn_event_generative_ai_called_callout_request_model.to_dict()
+ assert turn_event_generative_ai_called_callout_request_model_json2 == turn_event_generative_ai_called_callout_request_model_json
+
+
+class TestModel_TurnEventGenerativeAICalledCalloutResponse:
+ """
+ Test Class for TurnEventGenerativeAICalledCalloutResponse
+ """
+
+ def test_turn_event_generative_ai_called_callout_response_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventGenerativeAICalledCalloutResponse
+ """
+
+ # Construct a json representation of a TurnEventGenerativeAICalledCalloutResponse model
+ turn_event_generative_ai_called_callout_response_model_json = {}
+ turn_event_generative_ai_called_callout_response_model_json['body'] = 'testString'
+ turn_event_generative_ai_called_callout_response_model_json['status_code'] = 38
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutResponse by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_response_model = TurnEventGenerativeAICalledCalloutResponse.from_dict(turn_event_generative_ai_called_callout_response_model_json)
+ assert turn_event_generative_ai_called_callout_response_model != False
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutResponse by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_response_model_dict = TurnEventGenerativeAICalledCalloutResponse.from_dict(turn_event_generative_ai_called_callout_response_model_json).__dict__
+ turn_event_generative_ai_called_callout_response_model2 = TurnEventGenerativeAICalledCalloutResponse(**turn_event_generative_ai_called_callout_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_generative_ai_called_callout_response_model == turn_event_generative_ai_called_callout_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_generative_ai_called_callout_response_model_json2 = turn_event_generative_ai_called_callout_response_model.to_dict()
+ assert turn_event_generative_ai_called_callout_response_model_json2 == turn_event_generative_ai_called_callout_response_model_json
+
+
+class TestModel_TurnEventGenerativeAICalledCalloutSearch:
+ """
+ Test Class for TurnEventGenerativeAICalledCalloutSearch
+ """
+
+ def test_turn_event_generative_ai_called_callout_search_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventGenerativeAICalledCalloutSearch
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_generative_ai_called_callout_request_model = {} # TurnEventGenerativeAICalledCalloutRequest
+ turn_event_generative_ai_called_callout_request_model['method'] = 'GET'
+ turn_event_generative_ai_called_callout_request_model['url'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['port'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['path'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['query_parameters'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['headers'] = {'anyKey': 'anyValue'}
+ turn_event_generative_ai_called_callout_request_model['body'] = {'anyKey': 'anyValue'}
+
+ turn_event_generative_ai_called_callout_response_model = {} # TurnEventGenerativeAICalledCalloutResponse
+ turn_event_generative_ai_called_callout_response_model['body'] = 'testString'
+ turn_event_generative_ai_called_callout_response_model['status_code'] = 38
+
+ # Construct a json representation of a TurnEventGenerativeAICalledCalloutSearch model
+ turn_event_generative_ai_called_callout_search_model_json = {}
+ turn_event_generative_ai_called_callout_search_model_json['engine'] = 'testString'
+ turn_event_generative_ai_called_callout_search_model_json['index'] = 'testString'
+ turn_event_generative_ai_called_callout_search_model_json['query'] = 'testString'
+ turn_event_generative_ai_called_callout_search_model_json['request'] = turn_event_generative_ai_called_callout_request_model
+ turn_event_generative_ai_called_callout_search_model_json['response'] = turn_event_generative_ai_called_callout_response_model
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutSearch by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_search_model = TurnEventGenerativeAICalledCalloutSearch.from_dict(turn_event_generative_ai_called_callout_search_model_json)
+ assert turn_event_generative_ai_called_callout_search_model != False
+
+ # Construct a model instance of TurnEventGenerativeAICalledCalloutSearch by calling from_dict on the json representation
+ turn_event_generative_ai_called_callout_search_model_dict = TurnEventGenerativeAICalledCalloutSearch.from_dict(turn_event_generative_ai_called_callout_search_model_json).__dict__
+ turn_event_generative_ai_called_callout_search_model2 = TurnEventGenerativeAICalledCalloutSearch(**turn_event_generative_ai_called_callout_search_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_generative_ai_called_callout_search_model == turn_event_generative_ai_called_callout_search_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_generative_ai_called_callout_search_model_json2 = turn_event_generative_ai_called_callout_search_model.to_dict()
+ assert turn_event_generative_ai_called_callout_search_model_json2 == turn_event_generative_ai_called_callout_search_model_json
+
+
+class TestModel_TurnEventGenerativeAICalledMetrics:
+ """
+ Test Class for TurnEventGenerativeAICalledMetrics
+ """
+
+ def test_turn_event_generative_ai_called_metrics_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventGenerativeAICalledMetrics
+ """
+
+ # Construct a json representation of a TurnEventGenerativeAICalledMetrics model
+ turn_event_generative_ai_called_metrics_model_json = {}
+ turn_event_generative_ai_called_metrics_model_json['search_time_ms'] = 72.5
+ turn_event_generative_ai_called_metrics_model_json['answer_generation_time_ms'] = 72.5
+ turn_event_generative_ai_called_metrics_model_json['total_time_ms'] = 72.5
+
+ # Construct a model instance of TurnEventGenerativeAICalledMetrics by calling from_dict on the json representation
+ turn_event_generative_ai_called_metrics_model = TurnEventGenerativeAICalledMetrics.from_dict(turn_event_generative_ai_called_metrics_model_json)
+ assert turn_event_generative_ai_called_metrics_model != False
+
+ # Construct a model instance of TurnEventGenerativeAICalledMetrics by calling from_dict on the json representation
+ turn_event_generative_ai_called_metrics_model_dict = TurnEventGenerativeAICalledMetrics.from_dict(turn_event_generative_ai_called_metrics_model_json).__dict__
+ turn_event_generative_ai_called_metrics_model2 = TurnEventGenerativeAICalledMetrics(**turn_event_generative_ai_called_metrics_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_generative_ai_called_metrics_model == turn_event_generative_ai_called_metrics_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_generative_ai_called_metrics_model_json2 = turn_event_generative_ai_called_metrics_model.to_dict()
+ assert turn_event_generative_ai_called_metrics_model_json2 == turn_event_generative_ai_called_metrics_model_json
+
+
+class TestModel_TurnEventNodeSource:
+ """
+ Test Class for TurnEventNodeSource
+ """
+
+ def test_turn_event_node_source_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventNodeSource
+ """
+
+ # Construct a json representation of a TurnEventNodeSource model
+ turn_event_node_source_model_json = {}
+ turn_event_node_source_model_json['type'] = 'dialog_node'
+ turn_event_node_source_model_json['dialog_node'] = 'testString'
+ turn_event_node_source_model_json['title'] = 'testString'
+ turn_event_node_source_model_json['condition'] = 'testString'
+
+ # Construct a model instance of TurnEventNodeSource by calling from_dict on the json representation
+ turn_event_node_source_model = TurnEventNodeSource.from_dict(turn_event_node_source_model_json)
+ assert turn_event_node_source_model != False
+
+ # Construct a model instance of TurnEventNodeSource by calling from_dict on the json representation
+ turn_event_node_source_model_dict = TurnEventNodeSource.from_dict(turn_event_node_source_model_json).__dict__
+ turn_event_node_source_model2 = TurnEventNodeSource(**turn_event_node_source_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_node_source_model == turn_event_node_source_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_node_source_model_json2 = turn_event_node_source_model.to_dict()
+ assert turn_event_node_source_model_json2 == turn_event_node_source_model_json
+
+
+class TestModel_TurnEventSearchError:
+ """
+ Test Class for TurnEventSearchError
+ """
+
+ def test_turn_event_search_error_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventSearchError
+ """
+
+ # Construct a json representation of a TurnEventSearchError model
+ turn_event_search_error_model_json = {}
+ turn_event_search_error_model_json['message'] = 'testString'
+
+ # Construct a model instance of TurnEventSearchError by calling from_dict on the json representation
+ turn_event_search_error_model = TurnEventSearchError.from_dict(turn_event_search_error_model_json)
+ assert turn_event_search_error_model != False
+
+ # Construct a model instance of TurnEventSearchError by calling from_dict on the json representation
+ turn_event_search_error_model_dict = TurnEventSearchError.from_dict(turn_event_search_error_model_json).__dict__
+ turn_event_search_error_model2 = TurnEventSearchError(**turn_event_search_error_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_search_error_model == turn_event_search_error_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_search_error_model_json2 = turn_event_search_error_model.to_dict()
+ assert turn_event_search_error_model_json2 == turn_event_search_error_model_json
+
+
+class TestModel_TurnEventStepSource:
+ """
+ Test Class for TurnEventStepSource
+ """
+
+ def test_turn_event_step_source_serialization(self):
+ """
+ Test serialization/deserialization for TurnEventStepSource
+ """
+
+ # Construct a json representation of a TurnEventStepSource model
+ turn_event_step_source_model_json = {}
+ turn_event_step_source_model_json['type'] = 'step'
+ turn_event_step_source_model_json['action'] = 'testString'
+ turn_event_step_source_model_json['action_title'] = 'testString'
+ turn_event_step_source_model_json['step'] = 'testString'
+ turn_event_step_source_model_json['is_ai_guided'] = True
+ turn_event_step_source_model_json['is_skill_based'] = True
+
+ # Construct a model instance of TurnEventStepSource by calling from_dict on the json representation
+ turn_event_step_source_model = TurnEventStepSource.from_dict(turn_event_step_source_model_json)
+ assert turn_event_step_source_model != False
+
+ # Construct a model instance of TurnEventStepSource by calling from_dict on the json representation
+ turn_event_step_source_model_dict = TurnEventStepSource.from_dict(turn_event_step_source_model_json).__dict__
+ turn_event_step_source_model2 = TurnEventStepSource(**turn_event_step_source_model_dict)
+
+ # Verify the model instances are equivalent
+ assert turn_event_step_source_model == turn_event_step_source_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ turn_event_step_source_model_json2 = turn_event_step_source_model.to_dict()
+ assert turn_event_step_source_model_json2 == turn_event_step_source_model_json
+
+
+class TestModel_UpdateEnvironmentOrchestration:
+ """
+ Test Class for UpdateEnvironmentOrchestration
+ """
+
+ def test_update_environment_orchestration_serialization(self):
+ """
+ Test serialization/deserialization for UpdateEnvironmentOrchestration
+ """
+
+ # Construct a json representation of a UpdateEnvironmentOrchestration model
+ update_environment_orchestration_model_json = {}
+ update_environment_orchestration_model_json['search_skill_fallback'] = True
+
+ # Construct a model instance of UpdateEnvironmentOrchestration by calling from_dict on the json representation
+ update_environment_orchestration_model = UpdateEnvironmentOrchestration.from_dict(update_environment_orchestration_model_json)
+ assert update_environment_orchestration_model != False
+
+ # Construct a model instance of UpdateEnvironmentOrchestration by calling from_dict on the json representation
+ update_environment_orchestration_model_dict = UpdateEnvironmentOrchestration.from_dict(update_environment_orchestration_model_json).__dict__
+ update_environment_orchestration_model2 = UpdateEnvironmentOrchestration(**update_environment_orchestration_model_dict)
+
+ # Verify the model instances are equivalent
+ assert update_environment_orchestration_model == update_environment_orchestration_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ update_environment_orchestration_model_json2 = update_environment_orchestration_model.to_dict()
+ assert update_environment_orchestration_model_json2 == update_environment_orchestration_model_json
+
+
+class TestModel_UpdateEnvironmentReleaseReference:
+ """
+ Test Class for UpdateEnvironmentReleaseReference
+ """
+
+ def test_update_environment_release_reference_serialization(self):
+ """
+ Test serialization/deserialization for UpdateEnvironmentReleaseReference
+ """
+
+ # Construct a json representation of a UpdateEnvironmentReleaseReference model
+ update_environment_release_reference_model_json = {}
+ update_environment_release_reference_model_json['release'] = 'testString'
+
+ # Construct a model instance of UpdateEnvironmentReleaseReference by calling from_dict on the json representation
+ update_environment_release_reference_model = UpdateEnvironmentReleaseReference.from_dict(update_environment_release_reference_model_json)
+ assert update_environment_release_reference_model != False
+
+ # Construct a model instance of UpdateEnvironmentReleaseReference by calling from_dict on the json representation
+ update_environment_release_reference_model_dict = UpdateEnvironmentReleaseReference.from_dict(update_environment_release_reference_model_json).__dict__
+ update_environment_release_reference_model2 = UpdateEnvironmentReleaseReference(**update_environment_release_reference_model_dict)
+
+ # Verify the model instances are equivalent
+ assert update_environment_release_reference_model == update_environment_release_reference_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ update_environment_release_reference_model_json2 = update_environment_release_reference_model.to_dict()
+ assert update_environment_release_reference_model_json2 == update_environment_release_reference_model_json
+
+
+class TestModel_CompleteItem:
+ """
+ Test Class for CompleteItem
+ """
+
+ def test_complete_item_serialization(self):
+ """
+ Test serialization/deserialization for CompleteItem
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ # Construct a json representation of a CompleteItem model
+ complete_item_model_json = {}
+ complete_item_model_json['streaming_metadata'] = metadata_model
+
+ # Construct a model instance of CompleteItem by calling from_dict on the json representation
+ complete_item_model = CompleteItem.from_dict(complete_item_model_json)
+ assert complete_item_model != False
+
+ # Construct a model instance of CompleteItem by calling from_dict on the json representation
+ complete_item_model_dict = CompleteItem.from_dict(complete_item_model_json).__dict__
+ complete_item_model2 = CompleteItem(**complete_item_model_dict)
+
+ # Verify the model instances are equivalent
+ assert complete_item_model == complete_item_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ complete_item_model_json2 = complete_item_model.to_dict()
+ assert complete_item_model_json2 == complete_item_model_json
+
+
+class TestModel_GenerativeAITaskContentGroundedAnswering:
+ """
+ Test Class for GenerativeAITaskContentGroundedAnswering
+ """
+
+ def test_generative_ai_task_content_grounded_answering_serialization(self):
+ """
+ Test serialization/deserialization for GenerativeAITaskContentGroundedAnswering
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ generative_ai_task_confidence_scores_model = {} # GenerativeAITaskConfidenceScores
+ generative_ai_task_confidence_scores_model['pre_gen'] = 72.5
+ generative_ai_task_confidence_scores_model['pre_gen_threshold'] = 72.5
+ generative_ai_task_confidence_scores_model['post_gen'] = 72.5
+ generative_ai_task_confidence_scores_model['post_gen_threshold'] = 72.5
+
+ # Construct a json representation of a GenerativeAITaskContentGroundedAnswering model
+ generative_ai_task_content_grounded_answering_model_json = {}
+ generative_ai_task_content_grounded_answering_model_json['task'] = 'content_grounded_answering'
+ generative_ai_task_content_grounded_answering_model_json['is_idk_response'] = True
+ generative_ai_task_content_grounded_answering_model_json['is_hap_detected'] = True
+ generative_ai_task_content_grounded_answering_model_json['confidence_scores'] = generative_ai_task_confidence_scores_model
+ generative_ai_task_content_grounded_answering_model_json['original_response'] = 'testString'
+ generative_ai_task_content_grounded_answering_model_json['inferred_query'] = 'testString'
+
+ # Construct a model instance of GenerativeAITaskContentGroundedAnswering by calling from_dict on the json representation
+ generative_ai_task_content_grounded_answering_model = GenerativeAITaskContentGroundedAnswering.from_dict(generative_ai_task_content_grounded_answering_model_json)
+ assert generative_ai_task_content_grounded_answering_model != False
+
+ # Construct a model instance of GenerativeAITaskContentGroundedAnswering by calling from_dict on the json representation
+ generative_ai_task_content_grounded_answering_model_dict = GenerativeAITaskContentGroundedAnswering.from_dict(generative_ai_task_content_grounded_answering_model_json).__dict__
+ generative_ai_task_content_grounded_answering_model2 = GenerativeAITaskContentGroundedAnswering(**generative_ai_task_content_grounded_answering_model_dict)
+
+ # Verify the model instances are equivalent
+ assert generative_ai_task_content_grounded_answering_model == generative_ai_task_content_grounded_answering_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ generative_ai_task_content_grounded_answering_model_json2 = generative_ai_task_content_grounded_answering_model.to_dict()
+ assert generative_ai_task_content_grounded_answering_model_json2 == generative_ai_task_content_grounded_answering_model_json
+
+
+class TestModel_GenerativeAITaskGeneralPurposeAnswering:
+ """
+ Test Class for GenerativeAITaskGeneralPurposeAnswering
+ """
+
+ def test_generative_ai_task_general_purpose_answering_serialization(self):
+ """
+ Test serialization/deserialization for GenerativeAITaskGeneralPurposeAnswering
+ """
+
+ # Construct a json representation of a GenerativeAITaskGeneralPurposeAnswering model
+ generative_ai_task_general_purpose_answering_model_json = {}
+ generative_ai_task_general_purpose_answering_model_json['task'] = 'general_purpose_answering'
+ generative_ai_task_general_purpose_answering_model_json['is_idk_response'] = True
+ generative_ai_task_general_purpose_answering_model_json['is_hap_detected'] = True
+
+ # Construct a model instance of GenerativeAITaskGeneralPurposeAnswering by calling from_dict on the json representation
+ generative_ai_task_general_purpose_answering_model = GenerativeAITaskGeneralPurposeAnswering.from_dict(generative_ai_task_general_purpose_answering_model_json)
+ assert generative_ai_task_general_purpose_answering_model != False
+
+ # Construct a model instance of GenerativeAITaskGeneralPurposeAnswering by calling from_dict on the json representation
+ generative_ai_task_general_purpose_answering_model_dict = GenerativeAITaskGeneralPurposeAnswering.from_dict(generative_ai_task_general_purpose_answering_model_json).__dict__
+ generative_ai_task_general_purpose_answering_model2 = GenerativeAITaskGeneralPurposeAnswering(**generative_ai_task_general_purpose_answering_model_dict)
+
+ # Verify the model instances are equivalent
+ assert generative_ai_task_general_purpose_answering_model == generative_ai_task_general_purpose_answering_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ generative_ai_task_general_purpose_answering_model_json2 = generative_ai_task_general_purpose_answering_model.to_dict()
+ assert generative_ai_task_general_purpose_answering_model_json2 == generative_ai_task_general_purpose_answering_model_json
+
+
+class TestModel_LogMessageSourceAction:
+ """
+ Test Class for LogMessageSourceAction
+ """
+
+ def test_log_message_source_action_serialization(self):
+ """
+ Test serialization/deserialization for LogMessageSourceAction
+ """
+
+ # Construct a json representation of a LogMessageSourceAction model
+ log_message_source_action_model_json = {}
+ log_message_source_action_model_json['type'] = 'action'
+ log_message_source_action_model_json['action'] = 'testString'
+
+ # Construct a model instance of LogMessageSourceAction by calling from_dict on the json representation
+ log_message_source_action_model = LogMessageSourceAction.from_dict(log_message_source_action_model_json)
+ assert log_message_source_action_model != False
+
+ # Construct a model instance of LogMessageSourceAction by calling from_dict on the json representation
+ log_message_source_action_model_dict = LogMessageSourceAction.from_dict(log_message_source_action_model_json).__dict__
+ log_message_source_action_model2 = LogMessageSourceAction(**log_message_source_action_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_message_source_action_model == log_message_source_action_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_message_source_action_model_json2 = log_message_source_action_model.to_dict()
+ assert log_message_source_action_model_json2 == log_message_source_action_model_json
+
+
+class TestModel_LogMessageSourceDialogNode:
+ """
+ Test Class for LogMessageSourceDialogNode
+ """
+
+ def test_log_message_source_dialog_node_serialization(self):
+ """
+ Test serialization/deserialization for LogMessageSourceDialogNode
+ """
+
+ # Construct a json representation of a LogMessageSourceDialogNode model
+ log_message_source_dialog_node_model_json = {}
+ log_message_source_dialog_node_model_json['type'] = 'dialog_node'
+ log_message_source_dialog_node_model_json['dialog_node'] = 'testString'
+
+ # Construct a model instance of LogMessageSourceDialogNode by calling from_dict on the json representation
+ log_message_source_dialog_node_model = LogMessageSourceDialogNode.from_dict(log_message_source_dialog_node_model_json)
+ assert log_message_source_dialog_node_model != False
+
+ # Construct a model instance of LogMessageSourceDialogNode by calling from_dict on the json representation
+ log_message_source_dialog_node_model_dict = LogMessageSourceDialogNode.from_dict(log_message_source_dialog_node_model_json).__dict__
+ log_message_source_dialog_node_model2 = LogMessageSourceDialogNode(**log_message_source_dialog_node_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_message_source_dialog_node_model == log_message_source_dialog_node_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_message_source_dialog_node_model_json2 = log_message_source_dialog_node_model.to_dict()
+ assert log_message_source_dialog_node_model_json2 == log_message_source_dialog_node_model_json
+
+
+class TestModel_LogMessageSourceHandler:
+ """
+ Test Class for LogMessageSourceHandler
+ """
+
+ def test_log_message_source_handler_serialization(self):
+ """
+ Test serialization/deserialization for LogMessageSourceHandler
+ """
+
+ # Construct a json representation of a LogMessageSourceHandler model
+ log_message_source_handler_model_json = {}
+ log_message_source_handler_model_json['type'] = 'handler'
+ log_message_source_handler_model_json['action'] = 'testString'
+ log_message_source_handler_model_json['step'] = 'testString'
+ log_message_source_handler_model_json['handler'] = 'testString'
+
+ # Construct a model instance of LogMessageSourceHandler by calling from_dict on the json representation
+ log_message_source_handler_model = LogMessageSourceHandler.from_dict(log_message_source_handler_model_json)
+ assert log_message_source_handler_model != False
+
+ # Construct a model instance of LogMessageSourceHandler by calling from_dict on the json representation
+ log_message_source_handler_model_dict = LogMessageSourceHandler.from_dict(log_message_source_handler_model_json).__dict__
+ log_message_source_handler_model2 = LogMessageSourceHandler(**log_message_source_handler_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_message_source_handler_model == log_message_source_handler_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_message_source_handler_model_json2 = log_message_source_handler_model.to_dict()
+ assert log_message_source_handler_model_json2 == log_message_source_handler_model_json
+
+
+class TestModel_LogMessageSourceStep:
+ """
+ Test Class for LogMessageSourceStep
+ """
+
+ def test_log_message_source_step_serialization(self):
+ """
+ Test serialization/deserialization for LogMessageSourceStep
+ """
+
+ # Construct a json representation of a LogMessageSourceStep model
+ log_message_source_step_model_json = {}
+ log_message_source_step_model_json['type'] = 'step'
+ log_message_source_step_model_json['action'] = 'testString'
+ log_message_source_step_model_json['step'] = 'testString'
+
+ # Construct a model instance of LogMessageSourceStep by calling from_dict on the json representation
+ log_message_source_step_model = LogMessageSourceStep.from_dict(log_message_source_step_model_json)
+ assert log_message_source_step_model != False
+
+ # Construct a model instance of LogMessageSourceStep by calling from_dict on the json representation
+ log_message_source_step_model_dict = LogMessageSourceStep.from_dict(log_message_source_step_model_json).__dict__
+ log_message_source_step_model2 = LogMessageSourceStep(**log_message_source_step_model_dict)
+
+ # Verify the model instances are equivalent
+ assert log_message_source_step_model == log_message_source_step_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ log_message_source_step_model_json2 = log_message_source_step_model.to_dict()
+ assert log_message_source_step_model_json2 == log_message_source_step_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventActionFinished:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventActionFinished
+ """
+
+ def test_message_output_debug_turn_event_turn_event_action_finished_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventActionFinished
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventActionFinished model
+ message_output_debug_turn_event_turn_event_action_finished_model_json = {}
+ message_output_debug_turn_event_turn_event_action_finished_model_json['event'] = 'action_finished'
+ message_output_debug_turn_event_turn_event_action_finished_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_action_finished_model_json['action_start_time'] = 'testString'
+ message_output_debug_turn_event_turn_event_action_finished_model_json['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_turn_event_action_finished_model_json['reason'] = 'all_steps_done'
+ message_output_debug_turn_event_turn_event_action_finished_model_json['action_variables'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionFinished by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_action_finished_model = MessageOutputDebugTurnEventTurnEventActionFinished.from_dict(message_output_debug_turn_event_turn_event_action_finished_model_json)
+ assert message_output_debug_turn_event_turn_event_action_finished_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionFinished by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_action_finished_model_dict = MessageOutputDebugTurnEventTurnEventActionFinished.from_dict(message_output_debug_turn_event_turn_event_action_finished_model_json).__dict__
+ message_output_debug_turn_event_turn_event_action_finished_model2 = MessageOutputDebugTurnEventTurnEventActionFinished(**message_output_debug_turn_event_turn_event_action_finished_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_action_finished_model == message_output_debug_turn_event_turn_event_action_finished_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_action_finished_model_json2 = message_output_debug_turn_event_turn_event_action_finished_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_action_finished_model_json2 == message_output_debug_turn_event_turn_event_action_finished_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventActionRoutingDenied:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventActionRoutingDenied
+ """
+
+ def test_message_output_debug_turn_event_turn_event_action_routing_denied_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventActionRoutingDenied
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventActionRoutingDenied model
+ message_output_debug_turn_event_turn_event_action_routing_denied_model_json = {}
+ message_output_debug_turn_event_turn_event_action_routing_denied_model_json['event'] = 'action_routing_denied'
+ message_output_debug_turn_event_turn_event_action_routing_denied_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_action_routing_denied_model_json['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_turn_event_action_routing_denied_model_json['reason'] = 'action_conditions_failed'
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionRoutingDenied by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_action_routing_denied_model = MessageOutputDebugTurnEventTurnEventActionRoutingDenied.from_dict(message_output_debug_turn_event_turn_event_action_routing_denied_model_json)
+ assert message_output_debug_turn_event_turn_event_action_routing_denied_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionRoutingDenied by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_action_routing_denied_model_dict = MessageOutputDebugTurnEventTurnEventActionRoutingDenied.from_dict(message_output_debug_turn_event_turn_event_action_routing_denied_model_json).__dict__
+ message_output_debug_turn_event_turn_event_action_routing_denied_model2 = MessageOutputDebugTurnEventTurnEventActionRoutingDenied(**message_output_debug_turn_event_turn_event_action_routing_denied_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_action_routing_denied_model == message_output_debug_turn_event_turn_event_action_routing_denied_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_action_routing_denied_model_json2 = message_output_debug_turn_event_turn_event_action_routing_denied_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_action_routing_denied_model_json2 == message_output_debug_turn_event_turn_event_action_routing_denied_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventActionVisited:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventActionVisited
+ """
+
+ def test_message_output_debug_turn_event_turn_event_action_visited_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventActionVisited
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventActionVisited model
+ message_output_debug_turn_event_turn_event_action_visited_model_json = {}
+ message_output_debug_turn_event_turn_event_action_visited_model_json['event'] = 'action_visited'
+ message_output_debug_turn_event_turn_event_action_visited_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_action_visited_model_json['action_start_time'] = 'testString'
+ message_output_debug_turn_event_turn_event_action_visited_model_json['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_turn_event_action_visited_model_json['reason'] = 'intent'
+ message_output_debug_turn_event_turn_event_action_visited_model_json['result_variable'] = 'testString'
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionVisited by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_action_visited_model = MessageOutputDebugTurnEventTurnEventActionVisited.from_dict(message_output_debug_turn_event_turn_event_action_visited_model_json)
+ assert message_output_debug_turn_event_turn_event_action_visited_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventActionVisited by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_action_visited_model_dict = MessageOutputDebugTurnEventTurnEventActionVisited.from_dict(message_output_debug_turn_event_turn_event_action_visited_model_json).__dict__
+ message_output_debug_turn_event_turn_event_action_visited_model2 = MessageOutputDebugTurnEventTurnEventActionVisited(**message_output_debug_turn_event_turn_event_action_visited_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_action_visited_model == message_output_debug_turn_event_turn_event_action_visited_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_action_visited_model_json2 = message_output_debug_turn_event_turn_event_action_visited_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_action_visited_model_json2 == message_output_debug_turn_event_turn_event_action_visited_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventCallout:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventCallout
+ """
+
+ def test_message_output_debug_turn_event_turn_event_callout_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventCallout
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ turn_event_callout_callout_request_model = {} # TurnEventCalloutCalloutRequest
+ turn_event_callout_callout_request_model['method'] = 'get'
+ turn_event_callout_callout_request_model['url'] = 'testString'
+ turn_event_callout_callout_request_model['path'] = 'testString'
+ turn_event_callout_callout_request_model['query_parameters'] = 'testString'
+ turn_event_callout_callout_request_model['headers'] = {'anyKey': 'anyValue'}
+ turn_event_callout_callout_request_model['body'] = {'anyKey': 'anyValue'}
+
+ turn_event_callout_callout_response_model = {} # TurnEventCalloutCalloutResponse
+ turn_event_callout_callout_response_model['body'] = 'testString'
+ turn_event_callout_callout_response_model['status_code'] = 38
+ turn_event_callout_callout_response_model['last_event'] = {'anyKey': 'anyValue'}
+
+ turn_event_callout_callout_model = {} # TurnEventCalloutCallout
+ turn_event_callout_callout_model['type'] = 'integration_interaction'
+ turn_event_callout_callout_model['internal'] = {'anyKey': 'anyValue'}
+ turn_event_callout_callout_model['result_variable'] = 'testString'
+ turn_event_callout_callout_model['request'] = turn_event_callout_callout_request_model
+ turn_event_callout_callout_model['response'] = turn_event_callout_callout_response_model
+
+ turn_event_callout_error_model = {} # TurnEventCalloutError
+ turn_event_callout_error_model['message'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventCallout model
+ message_output_debug_turn_event_turn_event_callout_model_json = {}
+ message_output_debug_turn_event_turn_event_callout_model_json['event'] = 'callout'
+ message_output_debug_turn_event_turn_event_callout_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_callout_model_json['callout'] = turn_event_callout_callout_model
+ message_output_debug_turn_event_turn_event_callout_model_json['error'] = turn_event_callout_error_model
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventCallout by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_callout_model = MessageOutputDebugTurnEventTurnEventCallout.from_dict(message_output_debug_turn_event_turn_event_callout_model_json)
+ assert message_output_debug_turn_event_turn_event_callout_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventCallout by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_callout_model_dict = MessageOutputDebugTurnEventTurnEventCallout.from_dict(message_output_debug_turn_event_turn_event_callout_model_json).__dict__
+ message_output_debug_turn_event_turn_event_callout_model2 = MessageOutputDebugTurnEventTurnEventCallout(**message_output_debug_turn_event_turn_event_callout_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_callout_model == message_output_debug_turn_event_turn_event_callout_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_callout_model_json2 = message_output_debug_turn_event_turn_event_callout_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_callout_model_json2 == message_output_debug_turn_event_turn_event_callout_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventClientActions:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventClientActions
+ """
+
+ def test_message_output_debug_turn_event_turn_event_client_actions_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventClientActions
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_step_source_model = {} # TurnEventStepSource
+ turn_event_step_source_model['type'] = 'step'
+ turn_event_step_source_model['action'] = 'testString'
+ turn_event_step_source_model['action_title'] = 'testString'
+ turn_event_step_source_model['step'] = 'testString'
+ turn_event_step_source_model['is_ai_guided'] = True
+ turn_event_step_source_model['is_skill_based'] = True
+
+ client_action_model = {} # ClientAction
+ client_action_model['name'] = 'testString'
+ client_action_model['result_variable'] = 'testString'
+ client_action_model['type'] = 'testString'
+ client_action_model['skill'] = 'main skill'
+ client_action_model['parameters'] = {'anyKey': 'anyValue'}
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventClientActions model
+ message_output_debug_turn_event_turn_event_client_actions_model_json = {}
+ message_output_debug_turn_event_turn_event_client_actions_model_json['event'] = 'client_actions'
+ message_output_debug_turn_event_turn_event_client_actions_model_json['source'] = turn_event_step_source_model
+ message_output_debug_turn_event_turn_event_client_actions_model_json['client_actions'] = [client_action_model]
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventClientActions by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_client_actions_model = MessageOutputDebugTurnEventTurnEventClientActions.from_dict(message_output_debug_turn_event_turn_event_client_actions_model_json)
+ assert message_output_debug_turn_event_turn_event_client_actions_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventClientActions by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_client_actions_model_dict = MessageOutputDebugTurnEventTurnEventClientActions.from_dict(message_output_debug_turn_event_turn_event_client_actions_model_json).__dict__
+ message_output_debug_turn_event_turn_event_client_actions_model2 = MessageOutputDebugTurnEventTurnEventClientActions(**message_output_debug_turn_event_turn_event_client_actions_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_client_actions_model == message_output_debug_turn_event_turn_event_client_actions_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_client_actions_model_json2 = message_output_debug_turn_event_turn_event_client_actions_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_client_actions_model_json2 == message_output_debug_turn_event_turn_event_client_actions_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventConversationalSearchEnd:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventConversationalSearchEnd
+ """
+
+ def test_message_output_debug_turn_event_turn_event_conversational_search_end_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventConversationalSearchEnd
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventConversationalSearchEnd model
+ message_output_debug_turn_event_turn_event_conversational_search_end_model_json = {}
+ message_output_debug_turn_event_turn_event_conversational_search_end_model_json['event'] = 'conversational_search_end'
+ message_output_debug_turn_event_turn_event_conversational_search_end_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_conversational_search_end_model_json['condition_type'] = 'user_defined'
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventConversationalSearchEnd by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_conversational_search_end_model = MessageOutputDebugTurnEventTurnEventConversationalSearchEnd.from_dict(message_output_debug_turn_event_turn_event_conversational_search_end_model_json)
+ assert message_output_debug_turn_event_turn_event_conversational_search_end_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventConversationalSearchEnd by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_conversational_search_end_model_dict = MessageOutputDebugTurnEventTurnEventConversationalSearchEnd.from_dict(message_output_debug_turn_event_turn_event_conversational_search_end_model_json).__dict__
+ message_output_debug_turn_event_turn_event_conversational_search_end_model2 = MessageOutputDebugTurnEventTurnEventConversationalSearchEnd(**message_output_debug_turn_event_turn_event_conversational_search_end_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_conversational_search_end_model == message_output_debug_turn_event_turn_event_conversational_search_end_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_conversational_search_end_model_json2 = message_output_debug_turn_event_turn_event_conversational_search_end_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_conversational_search_end_model_json2 == message_output_debug_turn_event_turn_event_conversational_search_end_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventGenerativeAICalled:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventGenerativeAICalled
+ """
+
+ def test_message_output_debug_turn_event_turn_event_generative_ai_called_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventGenerativeAICalled
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ generative_ai_task_confidence_scores_model = {} # GenerativeAITaskConfidenceScores
+ generative_ai_task_confidence_scores_model['pre_gen'] = 72.5
+ generative_ai_task_confidence_scores_model['pre_gen_threshold'] = 72.5
+ generative_ai_task_confidence_scores_model['post_gen'] = 72.5
+ generative_ai_task_confidence_scores_model['post_gen_threshold'] = 72.5
+
+ generative_ai_task_model = {} # GenerativeAITaskContentGroundedAnswering
+ generative_ai_task_model['task'] = 'content_grounded_answering'
+ generative_ai_task_model['is_idk_response'] = True
+ generative_ai_task_model['is_hap_detected'] = True
+ generative_ai_task_model['confidence_scores'] = generative_ai_task_confidence_scores_model
+ generative_ai_task_model['original_response'] = 'testString'
+ generative_ai_task_model['inferred_query'] = 'testString'
+
+ turn_event_generative_ai_called_callout_request_model = {} # TurnEventGenerativeAICalledCalloutRequest
+ turn_event_generative_ai_called_callout_request_model['method'] = 'GET'
+ turn_event_generative_ai_called_callout_request_model['url'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['port'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['path'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['query_parameters'] = 'testString'
+ turn_event_generative_ai_called_callout_request_model['headers'] = {'anyKey': 'anyValue'}
+ turn_event_generative_ai_called_callout_request_model['body'] = {'anyKey': 'anyValue'}
+
+ turn_event_generative_ai_called_callout_response_model = {} # TurnEventGenerativeAICalledCalloutResponse
+ turn_event_generative_ai_called_callout_response_model['body'] = 'testString'
+ turn_event_generative_ai_called_callout_response_model['status_code'] = 38
+
+ turn_event_generative_ai_called_callout_search_model = {} # TurnEventGenerativeAICalledCalloutSearch
+ turn_event_generative_ai_called_callout_search_model['engine'] = 'testString'
+ turn_event_generative_ai_called_callout_search_model['index'] = 'testString'
+ turn_event_generative_ai_called_callout_search_model['query'] = 'testString'
+ turn_event_generative_ai_called_callout_search_model['request'] = turn_event_generative_ai_called_callout_request_model
+ turn_event_generative_ai_called_callout_search_model['response'] = turn_event_generative_ai_called_callout_response_model
+
+ turn_event_generative_ai_called_callout_llm_response_model = {} # TurnEventGenerativeAICalledCalloutLlmResponse
+ turn_event_generative_ai_called_callout_llm_response_model['text'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_response_model['response_type'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_response_model['is_idk_response'] = True
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ turn_event_generative_ai_called_callout_llm_model = {} # TurnEventGenerativeAICalledCalloutLlm
+ turn_event_generative_ai_called_callout_llm_model['type'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_model['model_id'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_model['model_class_id'] = 'testString'
+ turn_event_generative_ai_called_callout_llm_model['generated_token_count'] = 38
+ turn_event_generative_ai_called_callout_llm_model['input_token_count'] = 38
+ turn_event_generative_ai_called_callout_llm_model['success'] = True
+ turn_event_generative_ai_called_callout_llm_model['response'] = turn_event_generative_ai_called_callout_llm_response_model
+ turn_event_generative_ai_called_callout_llm_model['request'] = [search_results_model]
+
+ turn_event_generative_ai_called_callout_model = {} # TurnEventGenerativeAICalledCallout
+ turn_event_generative_ai_called_callout_model['search_called'] = True
+ turn_event_generative_ai_called_callout_model['llm_called'] = True
+ turn_event_generative_ai_called_callout_model['search'] = turn_event_generative_ai_called_callout_search_model
+ turn_event_generative_ai_called_callout_model['llm'] = turn_event_generative_ai_called_callout_llm_model
+ turn_event_generative_ai_called_callout_model['idk_reason_code'] = 'testString'
+
+ turn_event_generative_ai_called_metrics_model = {} # TurnEventGenerativeAICalledMetrics
+ turn_event_generative_ai_called_metrics_model['search_time_ms'] = 72.5
+ turn_event_generative_ai_called_metrics_model['answer_generation_time_ms'] = 72.5
+ turn_event_generative_ai_called_metrics_model['total_time_ms'] = 72.5
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventGenerativeAICalled model
+ message_output_debug_turn_event_turn_event_generative_ai_called_model_json = {}
+ message_output_debug_turn_event_turn_event_generative_ai_called_model_json['event'] = 'generative_ai_called'
+ message_output_debug_turn_event_turn_event_generative_ai_called_model_json['source'] = {'anyKey': 'anyValue'}
+ message_output_debug_turn_event_turn_event_generative_ai_called_model_json['generative_ai_start_time'] = 'testString'
+ message_output_debug_turn_event_turn_event_generative_ai_called_model_json['generative_ai'] = generative_ai_task_model
+ message_output_debug_turn_event_turn_event_generative_ai_called_model_json['callout'] = turn_event_generative_ai_called_callout_model
+ message_output_debug_turn_event_turn_event_generative_ai_called_model_json['metrics'] = turn_event_generative_ai_called_metrics_model
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventGenerativeAICalled by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_generative_ai_called_model = MessageOutputDebugTurnEventTurnEventGenerativeAICalled.from_dict(message_output_debug_turn_event_turn_event_generative_ai_called_model_json)
+ assert message_output_debug_turn_event_turn_event_generative_ai_called_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventGenerativeAICalled by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_generative_ai_called_model_dict = MessageOutputDebugTurnEventTurnEventGenerativeAICalled.from_dict(message_output_debug_turn_event_turn_event_generative_ai_called_model_json).__dict__
+ message_output_debug_turn_event_turn_event_generative_ai_called_model2 = MessageOutputDebugTurnEventTurnEventGenerativeAICalled(**message_output_debug_turn_event_turn_event_generative_ai_called_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_generative_ai_called_model == message_output_debug_turn_event_turn_event_generative_ai_called_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_generative_ai_called_model_json2 = message_output_debug_turn_event_turn_event_generative_ai_called_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_generative_ai_called_model_json2 == message_output_debug_turn_event_turn_event_generative_ai_called_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventHandlerVisited:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventHandlerVisited
+ """
+
+ def test_message_output_debug_turn_event_turn_event_handler_visited_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventHandlerVisited
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventHandlerVisited model
+ message_output_debug_turn_event_turn_event_handler_visited_model_json = {}
+ message_output_debug_turn_event_turn_event_handler_visited_model_json['event'] = 'handler_visited'
+ message_output_debug_turn_event_turn_event_handler_visited_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_handler_visited_model_json['action_start_time'] = 'testString'
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventHandlerVisited by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_handler_visited_model = MessageOutputDebugTurnEventTurnEventHandlerVisited.from_dict(message_output_debug_turn_event_turn_event_handler_visited_model_json)
+ assert message_output_debug_turn_event_turn_event_handler_visited_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventHandlerVisited by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_handler_visited_model_dict = MessageOutputDebugTurnEventTurnEventHandlerVisited.from_dict(message_output_debug_turn_event_turn_event_handler_visited_model_json).__dict__
+ message_output_debug_turn_event_turn_event_handler_visited_model2 = MessageOutputDebugTurnEventTurnEventHandlerVisited(**message_output_debug_turn_event_turn_event_handler_visited_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_handler_visited_model == message_output_debug_turn_event_turn_event_handler_visited_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_handler_visited_model_json2 = message_output_debug_turn_event_turn_event_handler_visited_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_handler_visited_model_json2 == message_output_debug_turn_event_turn_event_handler_visited_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventManualRoute:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventManualRoute
+ """
+
+ def test_message_output_debug_turn_event_turn_event_manual_route_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventManualRoute
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_step_source_model = {} # TurnEventStepSource
+ turn_event_step_source_model['type'] = 'step'
+ turn_event_step_source_model['action'] = 'testString'
+ turn_event_step_source_model['action_title'] = 'testString'
+ turn_event_step_source_model['step'] = 'testString'
+ turn_event_step_source_model['is_ai_guided'] = True
+ turn_event_step_source_model['is_skill_based'] = True
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventManualRoute model
+ message_output_debug_turn_event_turn_event_manual_route_model_json = {}
+ message_output_debug_turn_event_turn_event_manual_route_model_json['event'] = 'manual_route'
+ message_output_debug_turn_event_turn_event_manual_route_model_json['source'] = turn_event_step_source_model
+ message_output_debug_turn_event_turn_event_manual_route_model_json['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_turn_event_manual_route_model_json['action_start_time'] = 'testString'
+ message_output_debug_turn_event_turn_event_manual_route_model_json['route_name'] = 'testString'
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventManualRoute by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_manual_route_model = MessageOutputDebugTurnEventTurnEventManualRoute.from_dict(message_output_debug_turn_event_turn_event_manual_route_model_json)
+ assert message_output_debug_turn_event_turn_event_manual_route_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventManualRoute by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_manual_route_model_dict = MessageOutputDebugTurnEventTurnEventManualRoute.from_dict(message_output_debug_turn_event_turn_event_manual_route_model_json).__dict__
+ message_output_debug_turn_event_turn_event_manual_route_model2 = MessageOutputDebugTurnEventTurnEventManualRoute(**message_output_debug_turn_event_turn_event_manual_route_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_manual_route_model == message_output_debug_turn_event_turn_event_manual_route_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_manual_route_model_json2 = message_output_debug_turn_event_turn_event_manual_route_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_manual_route_model_json2 == message_output_debug_turn_event_turn_event_manual_route_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventNodeVisited:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventNodeVisited
+ """
+
+ def test_message_output_debug_turn_event_turn_event_node_visited_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventNodeVisited
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_node_source_model = {} # TurnEventNodeSource
+ turn_event_node_source_model['type'] = 'dialog_node'
+ turn_event_node_source_model['dialog_node'] = 'testString'
+ turn_event_node_source_model['title'] = 'testString'
+ turn_event_node_source_model['condition'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventNodeVisited model
+ message_output_debug_turn_event_turn_event_node_visited_model_json = {}
+ message_output_debug_turn_event_turn_event_node_visited_model_json['event'] = 'node_visited'
+ message_output_debug_turn_event_turn_event_node_visited_model_json['source'] = turn_event_node_source_model
+ message_output_debug_turn_event_turn_event_node_visited_model_json['reason'] = 'welcome'
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventNodeVisited by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_node_visited_model = MessageOutputDebugTurnEventTurnEventNodeVisited.from_dict(message_output_debug_turn_event_turn_event_node_visited_model_json)
+ assert message_output_debug_turn_event_turn_event_node_visited_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventNodeVisited by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_node_visited_model_dict = MessageOutputDebugTurnEventTurnEventNodeVisited.from_dict(message_output_debug_turn_event_turn_event_node_visited_model_json).__dict__
+ message_output_debug_turn_event_turn_event_node_visited_model2 = MessageOutputDebugTurnEventTurnEventNodeVisited(**message_output_debug_turn_event_turn_event_node_visited_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_node_visited_model == message_output_debug_turn_event_turn_event_node_visited_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_node_visited_model_json2 = message_output_debug_turn_event_turn_event_node_visited_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_node_visited_model_json2 == message_output_debug_turn_event_turn_event_node_visited_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventSearch:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventSearch
+ """
+
+ def test_message_output_debug_turn_event_turn_event_search_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventSearch
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ turn_event_search_error_model = {} # TurnEventSearchError
+ turn_event_search_error_model['message'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventSearch model
+ message_output_debug_turn_event_turn_event_search_model_json = {}
+ message_output_debug_turn_event_turn_event_search_model_json['event'] = 'search'
+ message_output_debug_turn_event_turn_event_search_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_search_model_json['error'] = turn_event_search_error_model
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventSearch by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_search_model = MessageOutputDebugTurnEventTurnEventSearch.from_dict(message_output_debug_turn_event_turn_event_search_model_json)
+ assert message_output_debug_turn_event_turn_event_search_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventSearch by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_search_model_dict = MessageOutputDebugTurnEventTurnEventSearch.from_dict(message_output_debug_turn_event_turn_event_search_model_json).__dict__
+ message_output_debug_turn_event_turn_event_search_model2 = MessageOutputDebugTurnEventTurnEventSearch(**message_output_debug_turn_event_turn_event_search_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_search_model == message_output_debug_turn_event_turn_event_search_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_search_model_json2 = message_output_debug_turn_event_turn_event_search_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_search_model_json2 == message_output_debug_turn_event_turn_event_search_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventStepAnswered:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventStepAnswered
+ """
+
+ def test_message_output_debug_turn_event_turn_event_step_answered_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventStepAnswered
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventStepAnswered model
+ message_output_debug_turn_event_turn_event_step_answered_model_json = {}
+ message_output_debug_turn_event_turn_event_step_answered_model_json['event'] = 'step_answered'
+ message_output_debug_turn_event_turn_event_step_answered_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_step_answered_model_json['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_turn_event_step_answered_model_json['action_start_time'] = 'testString'
+ message_output_debug_turn_event_turn_event_step_answered_model_json['prompted'] = True
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventStepAnswered by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_step_answered_model = MessageOutputDebugTurnEventTurnEventStepAnswered.from_dict(message_output_debug_turn_event_turn_event_step_answered_model_json)
+ assert message_output_debug_turn_event_turn_event_step_answered_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventStepAnswered by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_step_answered_model_dict = MessageOutputDebugTurnEventTurnEventStepAnswered.from_dict(message_output_debug_turn_event_turn_event_step_answered_model_json).__dict__
+ message_output_debug_turn_event_turn_event_step_answered_model2 = MessageOutputDebugTurnEventTurnEventStepAnswered(**message_output_debug_turn_event_turn_event_step_answered_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_step_answered_model == message_output_debug_turn_event_turn_event_step_answered_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_step_answered_model_json2 = message_output_debug_turn_event_turn_event_step_answered_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_step_answered_model_json2 == message_output_debug_turn_event_turn_event_step_answered_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventStepVisited:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventStepVisited
+ """
+
+ def test_message_output_debug_turn_event_turn_event_step_visited_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventStepVisited
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventStepVisited model
+ message_output_debug_turn_event_turn_event_step_visited_model_json = {}
+ message_output_debug_turn_event_turn_event_step_visited_model_json['event'] = 'step_visited'
+ message_output_debug_turn_event_turn_event_step_visited_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_step_visited_model_json['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_turn_event_step_visited_model_json['action_start_time'] = 'testString'
+ message_output_debug_turn_event_turn_event_step_visited_model_json['has_question'] = True
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventStepVisited by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_step_visited_model = MessageOutputDebugTurnEventTurnEventStepVisited.from_dict(message_output_debug_turn_event_turn_event_step_visited_model_json)
+ assert message_output_debug_turn_event_turn_event_step_visited_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventStepVisited by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_step_visited_model_dict = MessageOutputDebugTurnEventTurnEventStepVisited.from_dict(message_output_debug_turn_event_turn_event_step_visited_model_json).__dict__
+ message_output_debug_turn_event_turn_event_step_visited_model2 = MessageOutputDebugTurnEventTurnEventStepVisited(**message_output_debug_turn_event_turn_event_step_visited_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_step_visited_model == message_output_debug_turn_event_turn_event_step_visited_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_step_visited_model_json2 = message_output_debug_turn_event_turn_event_step_visited_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_step_visited_model_json2 == message_output_debug_turn_event_turn_event_step_visited_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied
+ """
+
+ def test_message_output_debug_turn_event_turn_event_suggestion_intents_denied_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied model
+ message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json = {}
+ message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json['event'] = 'suggestion_intents_denied'
+ message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json['intents_denied'] = [runtime_intent_model]
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_suggestion_intents_denied_model = MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied.from_dict(message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json)
+ assert message_output_debug_turn_event_turn_event_suggestion_intents_denied_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_dict = MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied.from_dict(message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json).__dict__
+ message_output_debug_turn_event_turn_event_suggestion_intents_denied_model2 = MessageOutputDebugTurnEventTurnEventSuggestionIntentsDenied(**message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_suggestion_intents_denied_model == message_output_debug_turn_event_turn_event_suggestion_intents_denied_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json2 = message_output_debug_turn_event_turn_event_suggestion_intents_denied_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json2 == message_output_debug_turn_event_turn_event_suggestion_intents_denied_model_json
+
+
+class TestModel_MessageOutputDebugTurnEventTurnEventTopicSwitchDenied:
+ """
+ Test Class for MessageOutputDebugTurnEventTurnEventTopicSwitchDenied
+ """
+
+ def test_message_output_debug_turn_event_turn_event_topic_switch_denied_serialization(self):
+ """
+ Test serialization/deserialization for MessageOutputDebugTurnEventTurnEventTopicSwitchDenied
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ # Construct a json representation of a MessageOutputDebugTurnEventTurnEventTopicSwitchDenied model
+ message_output_debug_turn_event_turn_event_topic_switch_denied_model_json = {}
+ message_output_debug_turn_event_turn_event_topic_switch_denied_model_json['event'] = 'topic_switch_denied'
+ message_output_debug_turn_event_turn_event_topic_switch_denied_model_json['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_turn_event_topic_switch_denied_model_json['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_turn_event_topic_switch_denied_model_json['reason'] = 'action_conditions_failed'
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventTopicSwitchDenied by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_topic_switch_denied_model = MessageOutputDebugTurnEventTurnEventTopicSwitchDenied.from_dict(message_output_debug_turn_event_turn_event_topic_switch_denied_model_json)
+ assert message_output_debug_turn_event_turn_event_topic_switch_denied_model != False
+
+ # Construct a model instance of MessageOutputDebugTurnEventTurnEventTopicSwitchDenied by calling from_dict on the json representation
+ message_output_debug_turn_event_turn_event_topic_switch_denied_model_dict = MessageOutputDebugTurnEventTurnEventTopicSwitchDenied.from_dict(message_output_debug_turn_event_turn_event_topic_switch_denied_model_json).__dict__
+ message_output_debug_turn_event_turn_event_topic_switch_denied_model2 = MessageOutputDebugTurnEventTurnEventTopicSwitchDenied(**message_output_debug_turn_event_turn_event_topic_switch_denied_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_output_debug_turn_event_turn_event_topic_switch_denied_model == message_output_debug_turn_event_turn_event_topic_switch_denied_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_output_debug_turn_event_turn_event_topic_switch_denied_model_json2 = message_output_debug_turn_event_turn_event_topic_switch_denied_model.to_dict()
+ assert message_output_debug_turn_event_turn_event_topic_switch_denied_model_json2 == message_output_debug_turn_event_turn_event_topic_switch_denied_model_json
+
+
+class TestModel_MessageStreamResponseMessageStreamCompleteItem:
+ """
+ Test Class for MessageStreamResponseMessageStreamCompleteItem
+ """
+
+ def test_message_stream_response_message_stream_complete_item_serialization(self):
+ """
+ Test serialization/deserialization for MessageStreamResponseMessageStreamCompleteItem
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ complete_item_model = {} # CompleteItem
+ complete_item_model['streaming_metadata'] = metadata_model
+
+ # Construct a json representation of a MessageStreamResponseMessageStreamCompleteItem model
+ message_stream_response_message_stream_complete_item_model_json = {}
+ message_stream_response_message_stream_complete_item_model_json['complete_item'] = complete_item_model
+
+ # Construct a model instance of MessageStreamResponseMessageStreamCompleteItem by calling from_dict on the json representation
+ message_stream_response_message_stream_complete_item_model = MessageStreamResponseMessageStreamCompleteItem.from_dict(message_stream_response_message_stream_complete_item_model_json)
+ assert message_stream_response_message_stream_complete_item_model != False
+
+ # Construct a model instance of MessageStreamResponseMessageStreamCompleteItem by calling from_dict on the json representation
+ message_stream_response_message_stream_complete_item_model_dict = MessageStreamResponseMessageStreamCompleteItem.from_dict(message_stream_response_message_stream_complete_item_model_json).__dict__
+ message_stream_response_message_stream_complete_item_model2 = MessageStreamResponseMessageStreamCompleteItem(**message_stream_response_message_stream_complete_item_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_stream_response_message_stream_complete_item_model == message_stream_response_message_stream_complete_item_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_stream_response_message_stream_complete_item_model_json2 = message_stream_response_message_stream_complete_item_model.to_dict()
+ assert message_stream_response_message_stream_complete_item_model_json2 == message_stream_response_message_stream_complete_item_model_json
+
+
+class TestModel_MessageStreamResponseMessageStreamPartialItem:
+ """
+ Test Class for MessageStreamResponseMessageStreamPartialItem
+ """
+
+ def test_message_stream_response_message_stream_partial_item_serialization(self):
+ """
+ Test serialization/deserialization for MessageStreamResponseMessageStreamPartialItem
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ partial_item_model = {} # PartialItem
+ partial_item_model['response_type'] = 'testString'
+ partial_item_model['text'] = 'testString'
+ partial_item_model['streaming_metadata'] = metadata_model
+
+ # Construct a json representation of a MessageStreamResponseMessageStreamPartialItem model
+ message_stream_response_message_stream_partial_item_model_json = {}
+ message_stream_response_message_stream_partial_item_model_json['partial_item'] = partial_item_model
+
+ # Construct a model instance of MessageStreamResponseMessageStreamPartialItem by calling from_dict on the json representation
+ message_stream_response_message_stream_partial_item_model = MessageStreamResponseMessageStreamPartialItem.from_dict(message_stream_response_message_stream_partial_item_model_json)
+ assert message_stream_response_message_stream_partial_item_model != False
+
+ # Construct a model instance of MessageStreamResponseMessageStreamPartialItem by calling from_dict on the json representation
+ message_stream_response_message_stream_partial_item_model_dict = MessageStreamResponseMessageStreamPartialItem.from_dict(message_stream_response_message_stream_partial_item_model_json).__dict__
+ message_stream_response_message_stream_partial_item_model2 = MessageStreamResponseMessageStreamPartialItem(**message_stream_response_message_stream_partial_item_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_stream_response_message_stream_partial_item_model == message_stream_response_message_stream_partial_item_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_stream_response_message_stream_partial_item_model_json2 = message_stream_response_message_stream_partial_item_model.to_dict()
+ assert message_stream_response_message_stream_partial_item_model_json2 == message_stream_response_message_stream_partial_item_model_json
+
+
+class TestModel_MessageStreamResponseStatefulMessageStreamFinalResponse:
+ """
+ Test Class for MessageStreamResponseStatefulMessageStreamFinalResponse
+ """
+
+ def test_message_stream_response_stateful_message_stream_final_response_serialization(self):
+ """
+ Test serialization/deserialization for MessageStreamResponseStatefulMessageStreamFinalResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ message_stream_metadata_model = {} # MessageStreamMetadata
+ message_stream_metadata_model['streaming_metadata'] = metadata_model
+
+ final_response_output_model = {} # FinalResponseOutput
+ final_response_output_model['generic'] = [runtime_response_generic_model]
+ final_response_output_model['intents'] = [runtime_intent_model]
+ final_response_output_model['entities'] = [runtime_entity_model]
+ final_response_output_model['actions'] = [dialog_node_action_model]
+ final_response_output_model['debug'] = message_output_debug_model
+ final_response_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ final_response_output_model['spelling'] = message_output_spelling_model
+ final_response_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+ final_response_output_model['streaming_metadata'] = message_stream_metadata_model
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ message_context_global_model = {} # MessageContextGlobal
+ message_context_global_model['system'] = message_context_global_system_model
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ message_context_action_skill_model = {} # MessageContextActionSkill
+ message_context_action_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['system'] = message_context_skill_system_model
+ message_context_action_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ message_context_action_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+
+ message_context_skills_model = {} # MessageContextSkills
+ message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ message_context_skills_model['actions skill'] = message_context_action_skill_model
+
+ message_context_model = {} # MessageContext
+ message_context_model['global'] = message_context_global_model
+ message_context_model['skills'] = message_context_skills_model
+ message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ message_output_model = {} # MessageOutput
+ message_output_model['generic'] = [runtime_response_generic_model]
+ message_output_model['intents'] = [runtime_intent_model]
+ message_output_model['entities'] = [runtime_entity_model]
+ message_output_model['actions'] = [dialog_node_action_model]
+ message_output_model['debug'] = message_output_debug_model
+ message_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_output_model['spelling'] = message_output_spelling_model
+ message_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ final_response_model = {} # FinalResponse
+ final_response_model['output'] = final_response_output_model
+ final_response_model['context'] = message_context_model
+ final_response_model['user_id'] = 'testString'
+ final_response_model['masked_output'] = message_output_model
+ final_response_model['masked_input'] = message_input_model
+
+ # Construct a json representation of a MessageStreamResponseStatefulMessageStreamFinalResponse model
+ message_stream_response_stateful_message_stream_final_response_model_json = {}
+ message_stream_response_stateful_message_stream_final_response_model_json['final_response'] = final_response_model
+
+ # Construct a model instance of MessageStreamResponseStatefulMessageStreamFinalResponse by calling from_dict on the json representation
+ message_stream_response_stateful_message_stream_final_response_model = MessageStreamResponseStatefulMessageStreamFinalResponse.from_dict(message_stream_response_stateful_message_stream_final_response_model_json)
+ assert message_stream_response_stateful_message_stream_final_response_model != False
+
+ # Construct a model instance of MessageStreamResponseStatefulMessageStreamFinalResponse by calling from_dict on the json representation
+ message_stream_response_stateful_message_stream_final_response_model_dict = MessageStreamResponseStatefulMessageStreamFinalResponse.from_dict(message_stream_response_stateful_message_stream_final_response_model_json).__dict__
+ message_stream_response_stateful_message_stream_final_response_model2 = MessageStreamResponseStatefulMessageStreamFinalResponse(**message_stream_response_stateful_message_stream_final_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert message_stream_response_stateful_message_stream_final_response_model == message_stream_response_stateful_message_stream_final_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ message_stream_response_stateful_message_stream_final_response_model_json2 = message_stream_response_stateful_message_stream_final_response_model.to_dict()
+ assert message_stream_response_stateful_message_stream_final_response_model_json2 == message_stream_response_stateful_message_stream_final_response_model_json
+
+
+class TestModel_ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode:
+ """
+ Test Class for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode
+ """
+
+ def test_provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_serialization(self):
+ """
+ Test serialization/deserialization for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode
+ """
+
+ # Construct a json representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode model
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json = {}
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['authorization_url'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json['redirect_uri'] = 'testString'
+
+ # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode by calling from_dict on the json representation
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json)
+ assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model != False
+
+ # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode by calling from_dict on the json representation
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_dict = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json).__dict__
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model2 = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2AuthorizationCode(**provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json2 = provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model.to_dict()
+ assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json2 == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_authorization_code_model_json
+
+
+class TestModel_ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials:
+ """
+ Test Class for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials
+ """
+
+ def test_provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_serialization(self):
+ """
+ Test serialization/deserialization for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials
+ """
+
+ # Construct a json representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials model
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json = {}
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json['header_prefix'] = 'testString'
+
+ # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials by calling from_dict on the json representation
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json)
+ assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model != False
+
+ # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials by calling from_dict on the json representation
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_dict = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json).__dict__
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model2 = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2ClientCredentials(**provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json2 = provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model.to_dict()
+ assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json2 == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_client_credentials_model_json
+
+
+class TestModel_ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password:
+ """
+ Test Class for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ """
+
+ def test_provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_serialization(self):
+ """
+ Test serialization/deserialization for ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_o_auth2_password_username_model = {} # ProviderAuthenticationOAuth2PasswordUsername
+ provider_authentication_o_auth2_password_username_model['type'] = 'value'
+ provider_authentication_o_auth2_password_username_model['value'] = 'testString'
+
+ # Construct a json representation of a ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password model
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json = {}
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['token_url'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['refresh_url'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['client_auth_type'] = 'Body'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['content_type'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['header_prefix'] = 'testString'
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json['username'] = provider_authentication_o_auth2_password_username_model
+
+ # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password by calling from_dict on the json representation
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json)
+ assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model != False
+
+ # Construct a model instance of ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password by calling from_dict on the json representation
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_dict = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password.from_dict(provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json).__dict__
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model2 = ProviderAuthenticationOAuth2FlowsProviderAuthenticationOAuth2Password(**provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json2 = provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model.to_dict()
+ assert provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json2 == provider_authentication_o_auth2_flows_provider_authentication_o_auth2_password_model_json
+
+
+class TestModel_ProviderPrivateAuthenticationBasicFlow:
+ """
+ Test Class for ProviderPrivateAuthenticationBasicFlow
+ """
+
+ def test_provider_private_authentication_basic_flow_serialization(self):
+ """
+ Test serialization/deserialization for ProviderPrivateAuthenticationBasicFlow
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ # Construct a json representation of a ProviderPrivateAuthenticationBasicFlow model
+ provider_private_authentication_basic_flow_model_json = {}
+ provider_private_authentication_basic_flow_model_json['password'] = provider_authentication_type_and_value_model
+
+ # Construct a model instance of ProviderPrivateAuthenticationBasicFlow by calling from_dict on the json representation
+ provider_private_authentication_basic_flow_model = ProviderPrivateAuthenticationBasicFlow.from_dict(provider_private_authentication_basic_flow_model_json)
+ assert provider_private_authentication_basic_flow_model != False
+
+ # Construct a model instance of ProviderPrivateAuthenticationBasicFlow by calling from_dict on the json representation
+ provider_private_authentication_basic_flow_model_dict = ProviderPrivateAuthenticationBasicFlow.from_dict(provider_private_authentication_basic_flow_model_json).__dict__
+ provider_private_authentication_basic_flow_model2 = ProviderPrivateAuthenticationBasicFlow(**provider_private_authentication_basic_flow_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_private_authentication_basic_flow_model == provider_private_authentication_basic_flow_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_private_authentication_basic_flow_model_json2 = provider_private_authentication_basic_flow_model.to_dict()
+ assert provider_private_authentication_basic_flow_model_json2 == provider_private_authentication_basic_flow_model_json
+
+
+class TestModel_ProviderPrivateAuthenticationBearerFlow:
+ """
+ Test Class for ProviderPrivateAuthenticationBearerFlow
+ """
+
+ def test_provider_private_authentication_bearer_flow_serialization(self):
+ """
+ Test serialization/deserialization for ProviderPrivateAuthenticationBearerFlow
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_authentication_type_and_value_model = {} # ProviderAuthenticationTypeAndValue
+ provider_authentication_type_and_value_model['type'] = 'value'
+ provider_authentication_type_and_value_model['value'] = 'testString'
+
+ # Construct a json representation of a ProviderPrivateAuthenticationBearerFlow model
+ provider_private_authentication_bearer_flow_model_json = {}
+ provider_private_authentication_bearer_flow_model_json['token'] = provider_authentication_type_and_value_model
+
+ # Construct a model instance of ProviderPrivateAuthenticationBearerFlow by calling from_dict on the json representation
+ provider_private_authentication_bearer_flow_model = ProviderPrivateAuthenticationBearerFlow.from_dict(provider_private_authentication_bearer_flow_model_json)
+ assert provider_private_authentication_bearer_flow_model != False
+
+ # Construct a model instance of ProviderPrivateAuthenticationBearerFlow by calling from_dict on the json representation
+ provider_private_authentication_bearer_flow_model_dict = ProviderPrivateAuthenticationBearerFlow.from_dict(provider_private_authentication_bearer_flow_model_json).__dict__
+ provider_private_authentication_bearer_flow_model2 = ProviderPrivateAuthenticationBearerFlow(**provider_private_authentication_bearer_flow_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_private_authentication_bearer_flow_model == provider_private_authentication_bearer_flow_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_private_authentication_bearer_flow_model_json2 = provider_private_authentication_bearer_flow_model.to_dict()
+ assert provider_private_authentication_bearer_flow_model_json2 == provider_private_authentication_bearer_flow_model_json
+
+
+class TestModel_ProviderPrivateAuthenticationOAuth2Flow:
+ """
+ Test Class for ProviderPrivateAuthenticationOAuth2Flow
+ """
+
+ def test_provider_private_authentication_o_auth2_flow_serialization(self):
+ """
+ Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2Flow
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_private_authentication_o_auth2_password_password_model = {} # ProviderPrivateAuthenticationOAuth2PasswordPassword
+ provider_private_authentication_o_auth2_password_password_model['type'] = 'value'
+ provider_private_authentication_o_auth2_password_password_model['value'] = 'testString'
+
+ provider_private_authentication_o_auth2_flow_flows_model = {} # ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password
+ provider_private_authentication_o_auth2_flow_flows_model['client_id'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_model['client_secret'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_model['access_token'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_model['refresh_token'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_model['password'] = provider_private_authentication_o_auth2_password_password_model
+
+ # Construct a json representation of a ProviderPrivateAuthenticationOAuth2Flow model
+ provider_private_authentication_o_auth2_flow_model_json = {}
+ provider_private_authentication_o_auth2_flow_model_json['flows'] = provider_private_authentication_o_auth2_flow_flows_model
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2Flow by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_flow_model = ProviderPrivateAuthenticationOAuth2Flow.from_dict(provider_private_authentication_o_auth2_flow_model_json)
+ assert provider_private_authentication_o_auth2_flow_model != False
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2Flow by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_flow_model_dict = ProviderPrivateAuthenticationOAuth2Flow.from_dict(provider_private_authentication_o_auth2_flow_model_json).__dict__
+ provider_private_authentication_o_auth2_flow_model2 = ProviderPrivateAuthenticationOAuth2Flow(**provider_private_authentication_o_auth2_flow_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_private_authentication_o_auth2_flow_model == provider_private_authentication_o_auth2_flow_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_private_authentication_o_auth2_flow_model_json2 = provider_private_authentication_o_auth2_flow_model.to_dict()
+ assert provider_private_authentication_o_auth2_flow_model_json2 == provider_private_authentication_o_auth2_flow_model_json
+
+
+class TestModel_ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode:
+ """
+ Test Class for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode
+ """
+
+ def test_provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_serialization(self):
+ """
+ Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode
+ """
+
+ # Construct a json representation of a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode model
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json = {}
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['client_id'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['client_secret'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['access_token'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['refresh_token'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json['authorization_code'] = 'testString'
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json)
+ assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model != False
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_dict = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json).__dict__
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model2 = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2AuthorizationCode(**provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json2 = provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model.to_dict()
+ assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json2 == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_authorization_code_model_json
+
+
+class TestModel_ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials:
+ """
+ Test Class for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials
+ """
+
+ def test_provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_serialization(self):
+ """
+ Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials
+ """
+
+ # Construct a json representation of a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials model
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json = {}
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json['client_id'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json['client_secret'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json['access_token'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json['refresh_token'] = 'testString'
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json)
+ assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model != False
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_dict = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json).__dict__
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model2 = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2ClientCredentials(**provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json2 = provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model.to_dict()
+ assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json2 == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_client_credentials_model_json
+
+
+class TestModel_ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password:
+ """
+ Test Class for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password
+ """
+
+ def test_provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_serialization(self):
+ """
+ Test serialization/deserialization for ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ provider_private_authentication_o_auth2_password_password_model = {} # ProviderPrivateAuthenticationOAuth2PasswordPassword
+ provider_private_authentication_o_auth2_password_password_model['type'] = 'value'
+ provider_private_authentication_o_auth2_password_password_model['value'] = 'testString'
+
+ # Construct a json representation of a ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password model
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json = {}
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['client_id'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['client_secret'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['access_token'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['refresh_token'] = 'testString'
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json['password'] = provider_private_authentication_o_auth2_password_password_model
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json)
+ assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model != False
+
+ # Construct a model instance of ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password by calling from_dict on the json representation
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_dict = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password.from_dict(provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json).__dict__
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model2 = ProviderPrivateAuthenticationOAuth2FlowFlowsProviderPrivateAuthenticationOAuth2Password(**provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_dict)
+
+ # Verify the model instances are equivalent
+ assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json2 = provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model.to_dict()
+ assert provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json2 == provider_private_authentication_o_auth2_flow_flows_provider_private_authentication_o_auth2_password_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeAudio:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeAudio
+ """
+
+ def test_runtime_response_generic_runtime_response_type_audio_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeAudio
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeAudio model
+ runtime_response_generic_runtime_response_type_audio_model_json = {}
+ runtime_response_generic_runtime_response_type_audio_model_json['response_type'] = 'audio'
+ runtime_response_generic_runtime_response_type_audio_model_json['source'] = 'testString'
+ runtime_response_generic_runtime_response_type_audio_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_audio_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_audio_model_json['channels'] = [response_generic_channel_model]
+ runtime_response_generic_runtime_response_type_audio_model_json['channel_options'] = {'anyKey': 'anyValue'}
+ runtime_response_generic_runtime_response_type_audio_model_json['alt_text'] = 'testString'
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeAudio by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_audio_model = RuntimeResponseGenericRuntimeResponseTypeAudio.from_dict(runtime_response_generic_runtime_response_type_audio_model_json)
+ assert runtime_response_generic_runtime_response_type_audio_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeAudio by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_audio_model_dict = RuntimeResponseGenericRuntimeResponseTypeAudio.from_dict(runtime_response_generic_runtime_response_type_audio_model_json).__dict__
+ runtime_response_generic_runtime_response_type_audio_model2 = RuntimeResponseGenericRuntimeResponseTypeAudio(**runtime_response_generic_runtime_response_type_audio_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_audio_model == runtime_response_generic_runtime_response_type_audio_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_audio_model_json2 = runtime_response_generic_runtime_response_type_audio_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_audio_model_json2 == runtime_response_generic_runtime_response_type_audio_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeChannelTransfer:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer
+ """
+
+ def test_runtime_response_generic_runtime_response_type_channel_transfer_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeChannelTransfer
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ channel_transfer_target_chat_model = {} # ChannelTransferTargetChat
+ channel_transfer_target_chat_model['url'] = 'testString'
+
+ channel_transfer_target_model = {} # ChannelTransferTarget
+ channel_transfer_target_model['chat'] = channel_transfer_target_chat_model
+
+ channel_transfer_info_model = {} # ChannelTransferInfo
+ channel_transfer_info_model['target'] = channel_transfer_target_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeChannelTransfer model
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json = {}
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json['response_type'] = 'channel_transfer'
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json['message_to_user'] = 'testString'
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json['transfer_info'] = channel_transfer_info_model
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeChannelTransfer by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_channel_transfer_model = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer.from_dict(runtime_response_generic_runtime_response_type_channel_transfer_model_json)
+ assert runtime_response_generic_runtime_response_type_channel_transfer_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeChannelTransfer by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_channel_transfer_model_dict = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer.from_dict(runtime_response_generic_runtime_response_type_channel_transfer_model_json).__dict__
+ runtime_response_generic_runtime_response_type_channel_transfer_model2 = RuntimeResponseGenericRuntimeResponseTypeChannelTransfer(**runtime_response_generic_runtime_response_type_channel_transfer_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_channel_transfer_model == runtime_response_generic_runtime_response_type_channel_transfer_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_channel_transfer_model_json2 = runtime_response_generic_runtime_response_type_channel_transfer_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_channel_transfer_model_json2 == runtime_response_generic_runtime_response_type_channel_transfer_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeConnectToAgent:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent
+ """
+
+ def test_runtime_response_generic_runtime_response_type_connect_to_agent_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeConnectToAgent
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ agent_availability_message_model = {} # AgentAvailabilityMessage
+ agent_availability_message_model['message'] = 'testString'
+
+ dialog_node_output_connect_to_agent_transfer_info_model = {} # DialogNodeOutputConnectToAgentTransferInfo
+ dialog_node_output_connect_to_agent_transfer_info_model['target'] = {'key1': {'anyKey': 'anyValue'}}
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeConnectToAgent model
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json = {}
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['response_type'] = 'connect_to_agent'
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['message_to_human_agent'] = 'testString'
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['agent_available'] = agent_availability_message_model
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['agent_unavailable'] = agent_availability_message_model
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['transfer_info'] = dialog_node_output_connect_to_agent_transfer_info_model
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['topic'] = 'testString'
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConnectToAgent by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_connect_to_agent_model = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.from_dict(runtime_response_generic_runtime_response_type_connect_to_agent_model_json)
+ assert runtime_response_generic_runtime_response_type_connect_to_agent_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConnectToAgent by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_dict = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent.from_dict(runtime_response_generic_runtime_response_type_connect_to_agent_model_json).__dict__
+ runtime_response_generic_runtime_response_type_connect_to_agent_model2 = RuntimeResponseGenericRuntimeResponseTypeConnectToAgent(**runtime_response_generic_runtime_response_type_connect_to_agent_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_connect_to_agent_model == runtime_response_generic_runtime_response_type_connect_to_agent_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 = runtime_response_generic_runtime_response_type_connect_to_agent_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_connect_to_agent_model_json2 == runtime_response_generic_runtime_response_type_connect_to_agent_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeConversationalSearch:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ """
+
+ def test_runtime_response_generic_runtime_response_type_conversational_search_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeConversationalSearch model
+ runtime_response_generic_runtime_response_type_conversational_search_model_json = {}
+ runtime_response_generic_runtime_response_type_conversational_search_model_json['response_type'] = 'conversation_search'
+ runtime_response_generic_runtime_response_type_conversational_search_model_json['text'] = 'testString'
+ runtime_response_generic_runtime_response_type_conversational_search_model_json['citations_title'] = 'testString'
+ runtime_response_generic_runtime_response_type_conversational_search_model_json['citations'] = [response_generic_citation_model]
+ runtime_response_generic_runtime_response_type_conversational_search_model_json['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_runtime_response_type_conversational_search_model_json['response_length_option'] = 'testString'
+ runtime_response_generic_runtime_response_type_conversational_search_model_json['search_results'] = [search_results_model]
+ runtime_response_generic_runtime_response_type_conversational_search_model_json['disclaimer'] = 'testString'
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConversationalSearch by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_conversational_search_model = RuntimeResponseGenericRuntimeResponseTypeConversationalSearch.from_dict(runtime_response_generic_runtime_response_type_conversational_search_model_json)
+ assert runtime_response_generic_runtime_response_type_conversational_search_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeConversationalSearch by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_conversational_search_model_dict = RuntimeResponseGenericRuntimeResponseTypeConversationalSearch.from_dict(runtime_response_generic_runtime_response_type_conversational_search_model_json).__dict__
+ runtime_response_generic_runtime_response_type_conversational_search_model2 = RuntimeResponseGenericRuntimeResponseTypeConversationalSearch(**runtime_response_generic_runtime_response_type_conversational_search_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_conversational_search_model == runtime_response_generic_runtime_response_type_conversational_search_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_conversational_search_model_json2 = runtime_response_generic_runtime_response_type_conversational_search_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_conversational_search_model_json2 == runtime_response_generic_runtime_response_type_conversational_search_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeDate:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeDate
+ """
+
+ def test_runtime_response_generic_runtime_response_type_date_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeDate
+ """
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeDate model
+ runtime_response_generic_runtime_response_type_date_model_json = {}
+ runtime_response_generic_runtime_response_type_date_model_json['response_type'] = 'date'
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeDate by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_date_model = RuntimeResponseGenericRuntimeResponseTypeDate.from_dict(runtime_response_generic_runtime_response_type_date_model_json)
+ assert runtime_response_generic_runtime_response_type_date_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeDate by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_date_model_dict = RuntimeResponseGenericRuntimeResponseTypeDate.from_dict(runtime_response_generic_runtime_response_type_date_model_json).__dict__
+ runtime_response_generic_runtime_response_type_date_model2 = RuntimeResponseGenericRuntimeResponseTypeDate(**runtime_response_generic_runtime_response_type_date_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_date_model == runtime_response_generic_runtime_response_type_date_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_date_model_json2 = runtime_response_generic_runtime_response_type_date_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_date_model_json2 == runtime_response_generic_runtime_response_type_date_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeDtmf:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeDtmf
+ """
+
+ def test_runtime_response_generic_runtime_response_type_dtmf_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeDtmf
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ dtmf_command_info_model = {} # DtmfCommandInfo
+ dtmf_command_info_model['type'] = 'collect'
+ dtmf_command_info_model['parameters'] = {'anyKey': 'anyValue'}
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeDtmf model
+ runtime_response_generic_runtime_response_type_dtmf_model_json = {}
+ runtime_response_generic_runtime_response_type_dtmf_model_json['response_type'] = 'dtmf'
+ runtime_response_generic_runtime_response_type_dtmf_model_json['command_info'] = dtmf_command_info_model
+ runtime_response_generic_runtime_response_type_dtmf_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeDtmf by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_dtmf_model = RuntimeResponseGenericRuntimeResponseTypeDtmf.from_dict(runtime_response_generic_runtime_response_type_dtmf_model_json)
+ assert runtime_response_generic_runtime_response_type_dtmf_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeDtmf by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_dtmf_model_dict = RuntimeResponseGenericRuntimeResponseTypeDtmf.from_dict(runtime_response_generic_runtime_response_type_dtmf_model_json).__dict__
+ runtime_response_generic_runtime_response_type_dtmf_model2 = RuntimeResponseGenericRuntimeResponseTypeDtmf(**runtime_response_generic_runtime_response_type_dtmf_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_dtmf_model == runtime_response_generic_runtime_response_type_dtmf_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_dtmf_model_json2 = runtime_response_generic_runtime_response_type_dtmf_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_dtmf_model_json2 == runtime_response_generic_runtime_response_type_dtmf_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeEndSession:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeEndSession
+ """
+
+ def test_runtime_response_generic_runtime_response_type_end_session_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeEndSession
+ """
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeEndSession model
+ runtime_response_generic_runtime_response_type_end_session_model_json = {}
+ runtime_response_generic_runtime_response_type_end_session_model_json['response_type'] = 'end_session'
+ runtime_response_generic_runtime_response_type_end_session_model_json['channel_options'] = {'anyKey': 'anyValue'}
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeEndSession by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_end_session_model = RuntimeResponseGenericRuntimeResponseTypeEndSession.from_dict(runtime_response_generic_runtime_response_type_end_session_model_json)
+ assert runtime_response_generic_runtime_response_type_end_session_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeEndSession by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_end_session_model_dict = RuntimeResponseGenericRuntimeResponseTypeEndSession.from_dict(runtime_response_generic_runtime_response_type_end_session_model_json).__dict__
+ runtime_response_generic_runtime_response_type_end_session_model2 = RuntimeResponseGenericRuntimeResponseTypeEndSession(**runtime_response_generic_runtime_response_type_end_session_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_end_session_model == runtime_response_generic_runtime_response_type_end_session_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_end_session_model_json2 = runtime_response_generic_runtime_response_type_end_session_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_end_session_model_json2 == runtime_response_generic_runtime_response_type_end_session_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeIframe:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeIframe
+ """
+
+ def test_runtime_response_generic_runtime_response_type_iframe_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeIframe
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeIframe model
+ runtime_response_generic_runtime_response_type_iframe_model_json = {}
+ runtime_response_generic_runtime_response_type_iframe_model_json['response_type'] = 'iframe'
+ runtime_response_generic_runtime_response_type_iframe_model_json['source'] = 'testString'
+ runtime_response_generic_runtime_response_type_iframe_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_iframe_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_iframe_model_json['image_url'] = 'testString'
+ runtime_response_generic_runtime_response_type_iframe_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeIframe by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_iframe_model = RuntimeResponseGenericRuntimeResponseTypeIframe.from_dict(runtime_response_generic_runtime_response_type_iframe_model_json)
+ assert runtime_response_generic_runtime_response_type_iframe_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeIframe by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_iframe_model_dict = RuntimeResponseGenericRuntimeResponseTypeIframe.from_dict(runtime_response_generic_runtime_response_type_iframe_model_json).__dict__
+ runtime_response_generic_runtime_response_type_iframe_model2 = RuntimeResponseGenericRuntimeResponseTypeIframe(**runtime_response_generic_runtime_response_type_iframe_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_iframe_model == runtime_response_generic_runtime_response_type_iframe_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_iframe_model_json2 = runtime_response_generic_runtime_response_type_iframe_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_iframe_model_json2 == runtime_response_generic_runtime_response_type_iframe_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeImage:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeImage
+ """
+
+ def test_runtime_response_generic_runtime_response_type_image_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeImage
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeImage model
+ runtime_response_generic_runtime_response_type_image_model_json = {}
+ runtime_response_generic_runtime_response_type_image_model_json['response_type'] = 'image'
+ runtime_response_generic_runtime_response_type_image_model_json['source'] = 'testString'
+ runtime_response_generic_runtime_response_type_image_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_image_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_image_model_json['channels'] = [response_generic_channel_model]
+ runtime_response_generic_runtime_response_type_image_model_json['alt_text'] = 'testString'
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_image_model = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json)
+ assert runtime_response_generic_runtime_response_type_image_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeImage by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_image_model_dict = RuntimeResponseGenericRuntimeResponseTypeImage.from_dict(runtime_response_generic_runtime_response_type_image_model_json).__dict__
+ runtime_response_generic_runtime_response_type_image_model2 = RuntimeResponseGenericRuntimeResponseTypeImage(**runtime_response_generic_runtime_response_type_image_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_image_model == runtime_response_generic_runtime_response_type_image_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_image_model_json2 = runtime_response_generic_runtime_response_type_image_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_image_model_json2 == runtime_response_generic_runtime_response_type_image_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeOption:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeOption
+ """
+
+ def test_runtime_response_generic_runtime_response_type_option_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeOption
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ dialog_node_output_options_element_value_model = {} # DialogNodeOutputOptionsElementValue
+ dialog_node_output_options_element_value_model['input'] = message_input_model
+
+ dialog_node_output_options_element_model = {} # DialogNodeOutputOptionsElement
+ dialog_node_output_options_element_model['label'] = 'testString'
+ dialog_node_output_options_element_model['value'] = dialog_node_output_options_element_value_model
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeOption model
+ runtime_response_generic_runtime_response_type_option_model_json = {}
+ runtime_response_generic_runtime_response_type_option_model_json['response_type'] = 'option'
+ runtime_response_generic_runtime_response_type_option_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_option_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_option_model_json['preference'] = 'dropdown'
+ runtime_response_generic_runtime_response_type_option_model_json['options'] = [dialog_node_output_options_element_model]
+ runtime_response_generic_runtime_response_type_option_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeOption by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_option_model = RuntimeResponseGenericRuntimeResponseTypeOption.from_dict(runtime_response_generic_runtime_response_type_option_model_json)
+ assert runtime_response_generic_runtime_response_type_option_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeOption by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_option_model_dict = RuntimeResponseGenericRuntimeResponseTypeOption.from_dict(runtime_response_generic_runtime_response_type_option_model_json).__dict__
+ runtime_response_generic_runtime_response_type_option_model2 = RuntimeResponseGenericRuntimeResponseTypeOption(**runtime_response_generic_runtime_response_type_option_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_option_model == runtime_response_generic_runtime_response_type_option_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_option_model_json2 = runtime_response_generic_runtime_response_type_option_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_option_model_json2 == runtime_response_generic_runtime_response_type_option_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypePause:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypePause
+ """
+
+ def test_runtime_response_generic_runtime_response_type_pause_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypePause
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypePause model
+ runtime_response_generic_runtime_response_type_pause_model_json = {}
+ runtime_response_generic_runtime_response_type_pause_model_json['response_type'] = 'pause'
+ runtime_response_generic_runtime_response_type_pause_model_json['time'] = 38
+ runtime_response_generic_runtime_response_type_pause_model_json['typing'] = True
+ runtime_response_generic_runtime_response_type_pause_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypePause by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_pause_model = RuntimeResponseGenericRuntimeResponseTypePause.from_dict(runtime_response_generic_runtime_response_type_pause_model_json)
+ assert runtime_response_generic_runtime_response_type_pause_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypePause by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_pause_model_dict = RuntimeResponseGenericRuntimeResponseTypePause.from_dict(runtime_response_generic_runtime_response_type_pause_model_json).__dict__
+ runtime_response_generic_runtime_response_type_pause_model2 = RuntimeResponseGenericRuntimeResponseTypePause(**runtime_response_generic_runtime_response_type_pause_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_pause_model == runtime_response_generic_runtime_response_type_pause_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_pause_model_json2 = runtime_response_generic_runtime_response_type_pause_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_pause_model_json2 == runtime_response_generic_runtime_response_type_pause_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeSearch:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeSearch
+ """
+
+ def test_runtime_response_generic_runtime_response_type_search_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeSearch
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ search_result_metadata_model = {} # SearchResultMetadata
+ search_result_metadata_model['confidence'] = 72.5
+ search_result_metadata_model['score'] = 72.5
+
+ search_result_highlight_model = {} # SearchResultHighlight
+ search_result_highlight_model['body'] = ['testString']
+ search_result_highlight_model['title'] = ['testString']
+ search_result_highlight_model['url'] = ['testString']
+ search_result_highlight_model['foo'] = ['testString']
+
+ search_result_answer_model = {} # SearchResultAnswer
+ search_result_answer_model['text'] = 'testString'
+ search_result_answer_model['confidence'] = 0
+
+ search_result_model = {} # SearchResult
+ search_result_model['id'] = 'testString'
+ search_result_model['result_metadata'] = search_result_metadata_model
+ search_result_model['body'] = 'testString'
+ search_result_model['title'] = 'testString'
+ search_result_model['url'] = 'testString'
+ search_result_model['highlight'] = search_result_highlight_model
+ search_result_model['answers'] = [search_result_answer_model]
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeSearch model
+ runtime_response_generic_runtime_response_type_search_model_json = {}
+ runtime_response_generic_runtime_response_type_search_model_json['response_type'] = 'search'
+ runtime_response_generic_runtime_response_type_search_model_json['header'] = 'testString'
+ runtime_response_generic_runtime_response_type_search_model_json['primary_results'] = [search_result_model]
+ runtime_response_generic_runtime_response_type_search_model_json['additional_results'] = [search_result_model]
+ runtime_response_generic_runtime_response_type_search_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSearch by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_search_model = RuntimeResponseGenericRuntimeResponseTypeSearch.from_dict(runtime_response_generic_runtime_response_type_search_model_json)
+ assert runtime_response_generic_runtime_response_type_search_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSearch by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_search_model_dict = RuntimeResponseGenericRuntimeResponseTypeSearch.from_dict(runtime_response_generic_runtime_response_type_search_model_json).__dict__
+ runtime_response_generic_runtime_response_type_search_model2 = RuntimeResponseGenericRuntimeResponseTypeSearch(**runtime_response_generic_runtime_response_type_search_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_search_model == runtime_response_generic_runtime_response_type_search_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_search_model_json2 = runtime_response_generic_runtime_response_type_search_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_search_model_json2 == runtime_response_generic_runtime_response_type_search_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeSuggestion:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeSuggestion
+ """
+
+ def test_runtime_response_generic_runtime_response_type_suggestion_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeSuggestion
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ message_input_attachment_model = {} # MessageInputAttachment
+ message_input_attachment_model['url'] = 'testString'
+ message_input_attachment_model['media_type'] = 'testString'
+
+ request_analytics_model = {} # RequestAnalytics
+ request_analytics_model['browser'] = 'testString'
+ request_analytics_model['device'] = 'testString'
+ request_analytics_model['pageUrl'] = 'testString'
+
+ message_input_options_spelling_model = {} # MessageInputOptionsSpelling
+ message_input_options_spelling_model['suggestions'] = True
+ message_input_options_spelling_model['auto_correct'] = True
+
+ message_input_options_model = {} # MessageInputOptions
+ message_input_options_model['restart'] = False
+ message_input_options_model['alternate_intents'] = False
+ message_input_options_model['async_callout'] = False
+ message_input_options_model['spelling'] = message_input_options_spelling_model
+ message_input_options_model['debug'] = False
+ message_input_options_model['return_context'] = False
+ message_input_options_model['export'] = False
+
+ message_input_model = {} # MessageInput
+ message_input_model['message_type'] = 'text'
+ message_input_model['text'] = 'testString'
+ message_input_model['intents'] = [runtime_intent_model]
+ message_input_model['entities'] = [runtime_entity_model]
+ message_input_model['suggestion_id'] = 'testString'
+ message_input_model['attachments'] = [message_input_attachment_model]
+ message_input_model['analytics'] = request_analytics_model
+ message_input_model['options'] = message_input_options_model
+
+ dialog_suggestion_value_model = {} # DialogSuggestionValue
+ dialog_suggestion_value_model['input'] = message_input_model
+
+ dialog_suggestion_model = {} # DialogSuggestion
+ dialog_suggestion_model['label'] = 'testString'
+ dialog_suggestion_model['value'] = dialog_suggestion_value_model
+ dialog_suggestion_model['output'] = {'anyKey': 'anyValue'}
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeSuggestion model
+ runtime_response_generic_runtime_response_type_suggestion_model_json = {}
+ runtime_response_generic_runtime_response_type_suggestion_model_json['response_type'] = 'suggestion'
+ runtime_response_generic_runtime_response_type_suggestion_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_suggestion_model_json['suggestions'] = [dialog_suggestion_model]
+ runtime_response_generic_runtime_response_type_suggestion_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSuggestion by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_suggestion_model = RuntimeResponseGenericRuntimeResponseTypeSuggestion.from_dict(runtime_response_generic_runtime_response_type_suggestion_model_json)
+ assert runtime_response_generic_runtime_response_type_suggestion_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeSuggestion by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_suggestion_model_dict = RuntimeResponseGenericRuntimeResponseTypeSuggestion.from_dict(runtime_response_generic_runtime_response_type_suggestion_model_json).__dict__
+ runtime_response_generic_runtime_response_type_suggestion_model2 = RuntimeResponseGenericRuntimeResponseTypeSuggestion(**runtime_response_generic_runtime_response_type_suggestion_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_suggestion_model == runtime_response_generic_runtime_response_type_suggestion_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_suggestion_model_json2 = runtime_response_generic_runtime_response_type_suggestion_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_suggestion_model_json2 == runtime_response_generic_runtime_response_type_suggestion_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeText:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeText
+ """
+
+ def test_runtime_response_generic_runtime_response_type_text_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeText
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeText model
+ runtime_response_generic_runtime_response_type_text_model_json = {}
+ runtime_response_generic_runtime_response_type_text_model_json['response_type'] = 'text'
+ runtime_response_generic_runtime_response_type_text_model_json['text'] = 'testString'
+ runtime_response_generic_runtime_response_type_text_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeText by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_text_model = RuntimeResponseGenericRuntimeResponseTypeText.from_dict(runtime_response_generic_runtime_response_type_text_model_json)
+ assert runtime_response_generic_runtime_response_type_text_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeText by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_text_model_dict = RuntimeResponseGenericRuntimeResponseTypeText.from_dict(runtime_response_generic_runtime_response_type_text_model_json).__dict__
+ runtime_response_generic_runtime_response_type_text_model2 = RuntimeResponseGenericRuntimeResponseTypeText(**runtime_response_generic_runtime_response_type_text_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_text_model == runtime_response_generic_runtime_response_type_text_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_text_model_json2 = runtime_response_generic_runtime_response_type_text_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_text_model_json2 == runtime_response_generic_runtime_response_type_text_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeUserDefined:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeUserDefined
+ """
+
+ def test_runtime_response_generic_runtime_response_type_user_defined_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeUserDefined
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeUserDefined model
+ runtime_response_generic_runtime_response_type_user_defined_model_json = {}
+ runtime_response_generic_runtime_response_type_user_defined_model_json['response_type'] = 'user_defined'
+ runtime_response_generic_runtime_response_type_user_defined_model_json['user_defined'] = {'anyKey': 'anyValue'}
+ runtime_response_generic_runtime_response_type_user_defined_model_json['channels'] = [response_generic_channel_model]
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeUserDefined by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_user_defined_model = RuntimeResponseGenericRuntimeResponseTypeUserDefined.from_dict(runtime_response_generic_runtime_response_type_user_defined_model_json)
+ assert runtime_response_generic_runtime_response_type_user_defined_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeUserDefined by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_user_defined_model_dict = RuntimeResponseGenericRuntimeResponseTypeUserDefined.from_dict(runtime_response_generic_runtime_response_type_user_defined_model_json).__dict__
+ runtime_response_generic_runtime_response_type_user_defined_model2 = RuntimeResponseGenericRuntimeResponseTypeUserDefined(**runtime_response_generic_runtime_response_type_user_defined_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_user_defined_model == runtime_response_generic_runtime_response_type_user_defined_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_user_defined_model_json2 = runtime_response_generic_runtime_response_type_user_defined_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_user_defined_model_json2 == runtime_response_generic_runtime_response_type_user_defined_model_json
+
+
+class TestModel_RuntimeResponseGenericRuntimeResponseTypeVideo:
+ """
+ Test Class for RuntimeResponseGenericRuntimeResponseTypeVideo
+ """
+
+ def test_runtime_response_generic_runtime_response_type_video_serialization(self):
+ """
+ Test serialization/deserialization for RuntimeResponseGenericRuntimeResponseTypeVideo
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_channel_model = {} # ResponseGenericChannel
+ response_generic_channel_model['channel'] = 'testString'
+
+ # Construct a json representation of a RuntimeResponseGenericRuntimeResponseTypeVideo model
+ runtime_response_generic_runtime_response_type_video_model_json = {}
+ runtime_response_generic_runtime_response_type_video_model_json['response_type'] = 'video'
+ runtime_response_generic_runtime_response_type_video_model_json['source'] = 'testString'
+ runtime_response_generic_runtime_response_type_video_model_json['title'] = 'testString'
+ runtime_response_generic_runtime_response_type_video_model_json['description'] = 'testString'
+ runtime_response_generic_runtime_response_type_video_model_json['channels'] = [response_generic_channel_model]
+ runtime_response_generic_runtime_response_type_video_model_json['channel_options'] = {'anyKey': 'anyValue'}
+ runtime_response_generic_runtime_response_type_video_model_json['alt_text'] = 'testString'
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeVideo by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_video_model = RuntimeResponseGenericRuntimeResponseTypeVideo.from_dict(runtime_response_generic_runtime_response_type_video_model_json)
+ assert runtime_response_generic_runtime_response_type_video_model != False
+
+ # Construct a model instance of RuntimeResponseGenericRuntimeResponseTypeVideo by calling from_dict on the json representation
+ runtime_response_generic_runtime_response_type_video_model_dict = RuntimeResponseGenericRuntimeResponseTypeVideo.from_dict(runtime_response_generic_runtime_response_type_video_model_json).__dict__
+ runtime_response_generic_runtime_response_type_video_model2 = RuntimeResponseGenericRuntimeResponseTypeVideo(**runtime_response_generic_runtime_response_type_video_model_dict)
+
+ # Verify the model instances are equivalent
+ assert runtime_response_generic_runtime_response_type_video_model == runtime_response_generic_runtime_response_type_video_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ runtime_response_generic_runtime_response_type_video_model_json2 = runtime_response_generic_runtime_response_type_video_model.to_dict()
+ assert runtime_response_generic_runtime_response_type_video_model_json2 == runtime_response_generic_runtime_response_type_video_model_json
+
+
+class TestModel_StatelessMessageStreamResponseMessageStreamCompleteItem:
+ """
+ Test Class for StatelessMessageStreamResponseMessageStreamCompleteItem
+ """
+
+ def test_stateless_message_stream_response_message_stream_complete_item_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageStreamResponseMessageStreamCompleteItem
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ complete_item_model = {} # CompleteItem
+ complete_item_model['streaming_metadata'] = metadata_model
+
+ # Construct a json representation of a StatelessMessageStreamResponseMessageStreamCompleteItem model
+ stateless_message_stream_response_message_stream_complete_item_model_json = {}
+ stateless_message_stream_response_message_stream_complete_item_model_json['complete_item'] = complete_item_model
+
+ # Construct a model instance of StatelessMessageStreamResponseMessageStreamCompleteItem by calling from_dict on the json representation
+ stateless_message_stream_response_message_stream_complete_item_model = StatelessMessageStreamResponseMessageStreamCompleteItem.from_dict(stateless_message_stream_response_message_stream_complete_item_model_json)
+ assert stateless_message_stream_response_message_stream_complete_item_model != False
+
+ # Construct a model instance of StatelessMessageStreamResponseMessageStreamCompleteItem by calling from_dict on the json representation
+ stateless_message_stream_response_message_stream_complete_item_model_dict = StatelessMessageStreamResponseMessageStreamCompleteItem.from_dict(stateless_message_stream_response_message_stream_complete_item_model_json).__dict__
+ stateless_message_stream_response_message_stream_complete_item_model2 = StatelessMessageStreamResponseMessageStreamCompleteItem(**stateless_message_stream_response_message_stream_complete_item_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_stream_response_message_stream_complete_item_model == stateless_message_stream_response_message_stream_complete_item_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_stream_response_message_stream_complete_item_model_json2 = stateless_message_stream_response_message_stream_complete_item_model.to_dict()
+ assert stateless_message_stream_response_message_stream_complete_item_model_json2 == stateless_message_stream_response_message_stream_complete_item_model_json
+
+
+class TestModel_StatelessMessageStreamResponseMessageStreamPartialItem:
+ """
+ Test Class for StatelessMessageStreamResponseMessageStreamPartialItem
+ """
+
+ def test_stateless_message_stream_response_message_stream_partial_item_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageStreamResponseMessageStreamPartialItem
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ metadata_model = {} # Metadata
+ metadata_model['id'] = 38
+
+ partial_item_model = {} # PartialItem
+ partial_item_model['response_type'] = 'testString'
+ partial_item_model['text'] = 'testString'
+ partial_item_model['streaming_metadata'] = metadata_model
+
+ # Construct a json representation of a StatelessMessageStreamResponseMessageStreamPartialItem model
+ stateless_message_stream_response_message_stream_partial_item_model_json = {}
+ stateless_message_stream_response_message_stream_partial_item_model_json['partial_item'] = partial_item_model
+
+ # Construct a model instance of StatelessMessageStreamResponseMessageStreamPartialItem by calling from_dict on the json representation
+ stateless_message_stream_response_message_stream_partial_item_model = StatelessMessageStreamResponseMessageStreamPartialItem.from_dict(stateless_message_stream_response_message_stream_partial_item_model_json)
+ assert stateless_message_stream_response_message_stream_partial_item_model != False
+
+ # Construct a model instance of StatelessMessageStreamResponseMessageStreamPartialItem by calling from_dict on the json representation
+ stateless_message_stream_response_message_stream_partial_item_model_dict = StatelessMessageStreamResponseMessageStreamPartialItem.from_dict(stateless_message_stream_response_message_stream_partial_item_model_json).__dict__
+ stateless_message_stream_response_message_stream_partial_item_model2 = StatelessMessageStreamResponseMessageStreamPartialItem(**stateless_message_stream_response_message_stream_partial_item_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_stream_response_message_stream_partial_item_model == stateless_message_stream_response_message_stream_partial_item_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_stream_response_message_stream_partial_item_model_json2 = stateless_message_stream_response_message_stream_partial_item_model.to_dict()
+ assert stateless_message_stream_response_message_stream_partial_item_model_json2 == stateless_message_stream_response_message_stream_partial_item_model_json
+
+
+class TestModel_StatelessMessageStreamResponseStatelessMessageStreamFinalResponse:
+ """
+ Test Class for StatelessMessageStreamResponseStatelessMessageStreamFinalResponse
+ """
+
+ def test_stateless_message_stream_response_stateless_message_stream_final_response_serialization(self):
+ """
+ Test serialization/deserialization for StatelessMessageStreamResponseStatelessMessageStreamFinalResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ response_generic_citation_ranges_item_model = {} # ResponseGenericCitationRangesItem
+ response_generic_citation_ranges_item_model['start'] = 38
+ response_generic_citation_ranges_item_model['end'] = 38
+
+ response_generic_citation_model = {} # ResponseGenericCitation
+ response_generic_citation_model['title'] = 'testString'
+ response_generic_citation_model['text'] = 'testString'
+ response_generic_citation_model['body'] = 'testString'
+ response_generic_citation_model['search_result_index'] = 38
+ response_generic_citation_model['ranges'] = [response_generic_citation_ranges_item_model]
+
+ response_generic_confidence_scores_model = {} # ResponseGenericConfidenceScores
+ response_generic_confidence_scores_model['threshold'] = 72.5
+ response_generic_confidence_scores_model['pre_gen'] = 72.5
+ response_generic_confidence_scores_model['post_gen'] = 72.5
+ response_generic_confidence_scores_model['extractiveness'] = 72.5
+
+ search_results_result_metadata_model = {} # SearchResultsResultMetadata
+ search_results_result_metadata_model['document_retrieval_source'] = 'testString'
+ search_results_result_metadata_model['score'] = 38
+
+ search_results_model = {} # SearchResults
+ search_results_model['result_metadata'] = search_results_result_metadata_model
+ search_results_model['id'] = 'testString'
+ search_results_model['title'] = 'testString'
+ search_results_model['body'] = 'testString'
+
+ runtime_response_generic_model = {} # RuntimeResponseGenericRuntimeResponseTypeConversationalSearch
+ runtime_response_generic_model['response_type'] = 'conversation_search'
+ runtime_response_generic_model['text'] = 'testString'
+ runtime_response_generic_model['citations_title'] = 'testString'
+ runtime_response_generic_model['citations'] = [response_generic_citation_model]
+ runtime_response_generic_model['confidence_scores'] = response_generic_confidence_scores_model
+ runtime_response_generic_model['response_length_option'] = 'testString'
+ runtime_response_generic_model['search_results'] = [search_results_model]
+ runtime_response_generic_model['disclaimer'] = 'testString'
+
+ runtime_intent_model = {} # RuntimeIntent
+ runtime_intent_model['intent'] = 'testString'
+ runtime_intent_model['confidence'] = 72.5
+ runtime_intent_model['skill'] = 'testString'
+
+ capture_group_model = {} # CaptureGroup
+ capture_group_model['group'] = 'testString'
+ capture_group_model['location'] = [38]
+
+ runtime_entity_interpretation_model = {} # RuntimeEntityInterpretation
+ runtime_entity_interpretation_model['calendar_type'] = 'testString'
+ runtime_entity_interpretation_model['datetime_link'] = 'testString'
+ runtime_entity_interpretation_model['festival'] = 'testString'
+ runtime_entity_interpretation_model['granularity'] = 'day'
+ runtime_entity_interpretation_model['range_link'] = 'testString'
+ runtime_entity_interpretation_model['range_modifier'] = 'testString'
+ runtime_entity_interpretation_model['relative_day'] = 72.5
+ runtime_entity_interpretation_model['relative_month'] = 72.5
+ runtime_entity_interpretation_model['relative_week'] = 72.5
+ runtime_entity_interpretation_model['relative_weekend'] = 72.5
+ runtime_entity_interpretation_model['relative_year'] = 72.5
+ runtime_entity_interpretation_model['specific_day'] = 72.5
+ runtime_entity_interpretation_model['specific_day_of_week'] = 'testString'
+ runtime_entity_interpretation_model['specific_month'] = 72.5
+ runtime_entity_interpretation_model['specific_quarter'] = 72.5
+ runtime_entity_interpretation_model['specific_year'] = 72.5
+ runtime_entity_interpretation_model['numeric_value'] = 72.5
+ runtime_entity_interpretation_model['subtype'] = 'testString'
+ runtime_entity_interpretation_model['part_of_day'] = 'testString'
+ runtime_entity_interpretation_model['relative_hour'] = 72.5
+ runtime_entity_interpretation_model['relative_minute'] = 72.5
+ runtime_entity_interpretation_model['relative_second'] = 72.5
+ runtime_entity_interpretation_model['specific_hour'] = 72.5
+ runtime_entity_interpretation_model['specific_minute'] = 72.5
+ runtime_entity_interpretation_model['specific_second'] = 72.5
+ runtime_entity_interpretation_model['timezone'] = 'testString'
+
+ runtime_entity_alternative_model = {} # RuntimeEntityAlternative
+ runtime_entity_alternative_model['value'] = 'testString'
+ runtime_entity_alternative_model['confidence'] = 72.5
+
+ runtime_entity_role_model = {} # RuntimeEntityRole
+ runtime_entity_role_model['type'] = 'date_from'
+
+ runtime_entity_model = {} # RuntimeEntity
+ runtime_entity_model['entity'] = 'testString'
+ runtime_entity_model['location'] = [38]
+ runtime_entity_model['value'] = 'testString'
+ runtime_entity_model['confidence'] = 72.5
+ runtime_entity_model['groups'] = [capture_group_model]
+ runtime_entity_model['interpretation'] = runtime_entity_interpretation_model
+ runtime_entity_model['alternatives'] = [runtime_entity_alternative_model]
+ runtime_entity_model['role'] = runtime_entity_role_model
+ runtime_entity_model['skill'] = 'testString'
+
+ dialog_node_action_model = {} # DialogNodeAction
+ dialog_node_action_model['name'] = 'testString'
+ dialog_node_action_model['type'] = 'client'
+ dialog_node_action_model['parameters'] = {'anyKey': 'anyValue'}
+ dialog_node_action_model['result_variable'] = 'testString'
+ dialog_node_action_model['credentials'] = 'testString'
+
+ dialog_node_visited_model = {} # DialogNodeVisited
+ dialog_node_visited_model['dialog_node'] = 'testString'
+ dialog_node_visited_model['title'] = 'testString'
+ dialog_node_visited_model['conditions'] = 'testString'
+
+ log_message_source_model = {} # LogMessageSourceDialogNode
+ log_message_source_model['type'] = 'dialog_node'
+ log_message_source_model['dialog_node'] = 'testString'
+
+ dialog_log_message_model = {} # DialogLogMessage
+ dialog_log_message_model['level'] = 'info'
+ dialog_log_message_model['message'] = 'testString'
+ dialog_log_message_model['code'] = 'testString'
+ dialog_log_message_model['source'] = log_message_source_model
+
+ turn_event_action_source_model = {} # TurnEventActionSource
+ turn_event_action_source_model['type'] = 'action'
+ turn_event_action_source_model['action'] = 'testString'
+ turn_event_action_source_model['action_title'] = 'testString'
+ turn_event_action_source_model['condition'] = 'testString'
+
+ message_output_debug_turn_event_model = {} # MessageOutputDebugTurnEventTurnEventActionVisited
+ message_output_debug_turn_event_model['event'] = 'action_visited'
+ message_output_debug_turn_event_model['source'] = turn_event_action_source_model
+ message_output_debug_turn_event_model['action_start_time'] = 'testString'
+ message_output_debug_turn_event_model['condition_type'] = 'user_defined'
+ message_output_debug_turn_event_model['reason'] = 'intent'
+ message_output_debug_turn_event_model['result_variable'] = 'testString'
+
+ message_output_debug_model = {} # MessageOutputDebug
+ message_output_debug_model['nodes_visited'] = [dialog_node_visited_model]
+ message_output_debug_model['log_messages'] = [dialog_log_message_model]
+ message_output_debug_model['branch_exited'] = True
+ message_output_debug_model['branch_exited_reason'] = 'completed'
+ message_output_debug_model['turn_events'] = [message_output_debug_turn_event_model]
+
+ message_output_spelling_model = {} # MessageOutputSpelling
+ message_output_spelling_model['text'] = 'testString'
+ message_output_spelling_model['original_text'] = 'testString'
+ message_output_spelling_model['suggested_text'] = 'testString'
+
+ message_output_llm_metadata_model = {} # MessageOutputLLMMetadata
+ message_output_llm_metadata_model['task'] = 'testString'
+ message_output_llm_metadata_model['model_id'] = 'testString'
+
+ message_context_global_system_model = {} # MessageContextGlobalSystem
+ message_context_global_system_model['timezone'] = 'testString'
+ message_context_global_system_model['user_id'] = 'testString'
+ message_context_global_system_model['turn_count'] = 38
+ message_context_global_system_model['locale'] = 'en-us'
+ message_context_global_system_model['reference_time'] = 'testString'
+ message_context_global_system_model['session_start_time'] = 'testString'
+ message_context_global_system_model['state'] = 'testString'
+ message_context_global_system_model['skip_user_input'] = True
+
+ stateless_message_context_global_model = {} # StatelessMessageContextGlobal
+ stateless_message_context_global_model['system'] = message_context_global_system_model
+ stateless_message_context_global_model['session_id'] = 'testString'
+
+ message_context_skill_system_model = {} # MessageContextSkillSystem
+ message_context_skill_system_model['state'] = 'testString'
+ message_context_skill_system_model['foo'] = 'testString'
+
+ message_context_dialog_skill_model = {} # MessageContextDialogSkill
+ message_context_dialog_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ message_context_dialog_skill_model['system'] = message_context_skill_system_model
+
+ stateless_message_context_skills_actions_skill_model = {} # StatelessMessageContextSkillsActionsSkill
+ stateless_message_context_skills_actions_skill_model['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['system'] = message_context_skill_system_model
+ stateless_message_context_skills_actions_skill_model['action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['skill_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_action_variables'] = {'anyKey': 'anyValue'}
+ stateless_message_context_skills_actions_skill_model['private_skill_variables'] = {'anyKey': 'anyValue'}
+
+ stateless_message_context_skills_model = {} # StatelessMessageContextSkills
+ stateless_message_context_skills_model['main skill'] = message_context_dialog_skill_model
+ stateless_message_context_skills_model['actions skill'] = stateless_message_context_skills_actions_skill_model
+
+ stateless_message_context_model = {} # StatelessMessageContext
+ stateless_message_context_model['global'] = stateless_message_context_global_model
+ stateless_message_context_model['skills'] = stateless_message_context_skills_model
+ stateless_message_context_model['integrations'] = {'anyKey': 'anyValue'}
+
+ stateless_final_response_output_model = {} # StatelessFinalResponseOutput
+ stateless_final_response_output_model['generic'] = [runtime_response_generic_model]
+ stateless_final_response_output_model['intents'] = [runtime_intent_model]
+ stateless_final_response_output_model['entities'] = [runtime_entity_model]
+ stateless_final_response_output_model['actions'] = [dialog_node_action_model]
+ stateless_final_response_output_model['debug'] = message_output_debug_model
+ stateless_final_response_output_model['user_defined'] = {'anyKey': 'anyValue'}
+ stateless_final_response_output_model['spelling'] = message_output_spelling_model
+ stateless_final_response_output_model['llm_metadata'] = [message_output_llm_metadata_model]
+ stateless_final_response_output_model['streaming_metadata'] = stateless_message_context_model
+
+ stateless_final_response_model = {} # StatelessFinalResponse
+ stateless_final_response_model['output'] = stateless_final_response_output_model
+ stateless_final_response_model['context'] = stateless_message_context_model
+ stateless_final_response_model['user_id'] = 'testString'
+
+ # Construct a json representation of a StatelessMessageStreamResponseStatelessMessageStreamFinalResponse model
+ stateless_message_stream_response_stateless_message_stream_final_response_model_json = {}
+ stateless_message_stream_response_stateless_message_stream_final_response_model_json['final_response'] = stateless_final_response_model
+
+ # Construct a model instance of StatelessMessageStreamResponseStatelessMessageStreamFinalResponse by calling from_dict on the json representation
+ stateless_message_stream_response_stateless_message_stream_final_response_model = StatelessMessageStreamResponseStatelessMessageStreamFinalResponse.from_dict(stateless_message_stream_response_stateless_message_stream_final_response_model_json)
+ assert stateless_message_stream_response_stateless_message_stream_final_response_model != False
+
+ # Construct a model instance of StatelessMessageStreamResponseStatelessMessageStreamFinalResponse by calling from_dict on the json representation
+ stateless_message_stream_response_stateless_message_stream_final_response_model_dict = StatelessMessageStreamResponseStatelessMessageStreamFinalResponse.from_dict(stateless_message_stream_response_stateless_message_stream_final_response_model_json).__dict__
+ stateless_message_stream_response_stateless_message_stream_final_response_model2 = StatelessMessageStreamResponseStatelessMessageStreamFinalResponse(**stateless_message_stream_response_stateless_message_stream_final_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stateless_message_stream_response_stateless_message_stream_final_response_model == stateless_message_stream_response_stateless_message_stream_final_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stateless_message_stream_response_stateless_message_stream_final_response_model_json2 = stateless_message_stream_response_stateless_message_stream_final_response_model.to_dict()
+ assert stateless_message_stream_response_stateless_message_stream_final_response_model_json2 == stateless_message_stream_response_stateless_message_stream_final_response_model_json
+
+
+# endregion
+##############################################################################
+# End of Model Tests
+##############################################################################
diff --git a/test/unit/test_authorization_v1.py b/test/unit/test_authorization_v1.py
deleted file mode 100644
index 0b2125c84..000000000
--- a/test/unit/test_authorization_v1.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# coding: utf-8
-import responses
-import ibm_watson
-
-
-@responses.activate
-def test_request_token():
- url = 'https://stream.watsonplatform.net/authorization/api/v1/token?url=https://stream.watsonplatform.net/speech-to-text/api'
- responses.add(responses.GET,
- url=url,
- body=b'mocked token',
- status=200)
- authorization = ibm_watson.AuthorizationV1(username='xxx', password='yyy')
- authorization.get_token(url=ibm_watson.SpeechToTextV1.default_url)
- assert responses.calls[0].request.url == url
- assert responses.calls[0].response.content.decode('utf-8') == 'mocked token'
diff --git a/test/unit/test_common.py b/test/unit/test_common.py
index a2553e86f..62e0bbf6b 100644
--- a/test/unit/test_common.py
+++ b/test/unit/test_common.py
@@ -1,6 +1,6 @@
# coding: utf-8
-# Copyright 2019 IBM All Rights Reserved.
+# (C) Copyright IBM Corp. 2019, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,11 +17,16 @@
from ibm_watson import get_sdk_headers
import unittest
+
class TestCommon(unittest.TestCase):
+
def test_get_sdk_headers(self):
headers = get_sdk_headers('my_service', 'v1', 'my_operation')
self.assertIsNotNone(headers)
self.assertIsNotNone(headers.get('X-IBMCloud-SDK-Analytics'))
self.assertIsNotNone(headers.get('User-Agent'))
self.assertIn('watson-apis-python-sdk', headers.get('User-Agent'))
- self.assertEqual(headers.get('X-IBMCloud-SDK-Analytics'), 'service_name=my_service;service_version=v1;operation_id=my_operation')
+ self.assertEqual(
+ headers.get('X-IBMCloud-SDK-Analytics'),
+ 'service_name=my_service;service_version=v1;operation_id=my_operation'
+ )
diff --git a/test/unit/test_compare_comply_v1.py b/test/unit/test_compare_comply_v1.py
deleted file mode 100644
index 1252a7a5e..000000000
--- a/test/unit/test_compare_comply_v1.py
+++ /dev/null
@@ -1,556 +0,0 @@
-# coding: utf-8
-import responses
-import ibm_watson
-import json
-import os
-
-from unittest import TestCase
-
-base_url = "https://gateway.watsonplatform.net/compare-comply/api"
-feedback = {
- "comment": "test commment",
- "user_id": "wonder woman",
- "feedback_id": "lala",
- "feedback_data": {
- "model_id": "contracts",
- "original_labels": {
- "categories": [
- {
- "modification": "unchanged",
- "provenance_ids": [],
- "label": "Responsibilities"
- },
- {
- "modification": "removed",
- "provenance_ids": [],
- "label": "Amendments"
- }
- ],
- "types": [
- {
- "modification": "unchanged",
- "provenance_ids": [
- "111",
- "2222"
- ],
- "label": {
- "party": "IBM",
- "nature": "Obligation"
- }
- },
- {
- "modification": "removed",
- "provenance_ids": [
- "111",
- "2222"
- ],
- "label": {
- "party": "Exclusion",
- "nature": "End User"
- }
- }
- ]
- },
- "text": "1. IBM will provide a Senior Managing Consultant / expert resource, for up to 80 hours, to assist Florida Power & Light (FPL) with the creation of an IT infrastructure unit cost model for existing infrastructure.",
- "feedback_type": "element_classification",
- "updated_labels": {
- "categories": [
- {
- "modification": "unchanged",
- "label": "Responsibilities"
- },
- {
- "modification": "added",
- "label": "Audits"
- }
- ],
- "types": [
- {
- "modification": "unchanged",
- "label": {
- "party": "IBM",
- "nature": "Obligation"
- }
- },
- {
- "modification": "added",
- "label": {
- "party": "Buyer",
- "nature": "Disclaimer"
- }
- }
- ]
- },
- "model_version": "11.00",
- "location": {
- "begin": "214",
- "end": "237"
- },
- "document": {
- "hash": "",
- "title": "doc title"
- }
- },
- "created": "2018-11-16T22:57:14+0000"
-}
-
-
-batch = {
- "function": "html_conversion",
- "status": "completed",
- "updated": "2018-11-12T21:02:43.867+0000",
- "document_counts": {
- "successful": 4,
- "failed": 0,
- "total": 4,
- "pending": 0
- },
- "created": "2018-11-12T21:02:38.907+0000",
- "input_bucket_location": "us-south",
- "input_bucket_name": "compare-comply-integration-test-bucket-input",
- "batch_id": "xxx",
- "output_bucket_name": "compare-comply-integration-test-bucket-output",
- "model": "contracts",
- "output_bucket_location": "us-south"
-}
-
-class TestCompareComplyV1(TestCase):
-
- @classmethod
- def setUp(cls):
- iam_url = "https://iam.cloud.ibm.com/identity/token"
- iam_token_response = """{
- "access_token": "oAeisG8yqPY7sFR_x66Z15",
- "token_type": "Bearer",
- "expires_in": 3600,
- "expiration": 1524167011,
- "refresh_token": "jy4gl91BQ"
- }"""
- responses.add(
- responses.POST, url=iam_url, body=iam_token_response, status=200)
-
- @responses.activate
- def test_convert_to_html(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/html_conversion')
-
- response = {
- "hash": "0d9589556c16fca21c64ce9c8b10d065",
- "html": "",
- "num_pages": "4",
- "publication_date": "2018-11-10",
- "title": "Microsoft Word - contract_A.doc"
- }
-
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- with open(
- os.path.join(os.path.dirname(__file__),
- '../../resources/contract_A.pdf'), 'rb') as file:
- service.convert_to_html(
- file,
- model_id="contracts",
- file_content_type="application/octet-stream")
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_classify_elements(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/element_classification')
-
- response = [{
- "text":
- "__November 9, 2018______________ date",
- "categories": [],
- "location": {
- "begin": 19373,
- "end": 19410
- },
- "types": [],
- "attributes": [{
- "text": "November 9, 2018",
- "type": "DateTime",
- "location": {
- "begin": 19375,
- "end": 19391
- }
- }]
- }]
-
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__),
- '../../resources/contract_A.pdf'), 'rb') as file:
- service.classify_elements(
- file,
- model_id="contracts",
- file_content_type="application/octet-stream")
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_extract_tables(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/tables')
-
- response = {
- "model_version":
- "0.2.8-SNAPSHOT",
- "model_id":
- "tables",
- "document": {
- "hash": "0906a4721a59ffeaf2ec12997aa4f7f7",
- "title": "Design and build accessible PDF tables, sample tables"
- },
- "tables": [{
- "section_title": {
- "text": "Sample tables ",
- "location": {
- "begin": 2099,
- "end": 2113
- }
- },
- "text":
- "Column header (TH) Column header (TH) Column header (TH) Row header (TH) Data cell (TD) Data cell (TD) Row header(TH) Data cell (TD) Data cell (TD) ",
- "table_headers": [],
- "row_headers": [],
- "location": {
- "begin": 2832,
- "end": 4801
- },
- "body_cells": [],
- }]
- }
-
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__),
- '../../resources/sample-tables.pdf'), 'rb') as file:
- service.extract_tables(file)
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_compare_documents(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/comparison')
-
- response = {
- "aligned_elements": [
- {
- "element_pair": [{
- "text":
- "WITNESSETH: that the Owner and Contractor undertake and agree as follows:",
- "types": [],
- "document_label":
- "file_1",
- "attributes": [],
- "categories": [],
- "location": {
- "begin": 3845,
- "end": 4085
- }
- }, {
- "text":
- "WITNESSETH: that the Owner and Contractor undertake and agree as follows:",
- "types": [],
- "document_label":
- "file_2",
- "attributes": [],
- "categories": [],
- "location": {
- "begin": 3846,
- "end": 4086
- }
- }],
- "provenance_ids":
- ["1mSG/96z1wY4De35LAExJzhCo2t0DfvbYnTl+vbavjY="],
- },
- ],
- "model_id":
- "contracts",
- "model_version":
- "1.0.0"
- }
-
- responses.add(
- responses.POST,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__),
- '../../resources/contract_A.pdf'), 'rb') as file1:
- with open(os.path.join(os.path.dirname(__file__),
- '../../resources/contract_B.pdf'), 'rb') as file2:
- service.compare_documents(file1, file2)
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_add_feedback(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/feedback')
-
- feedback_data = {
- "feedback_type": "element_classification",
- "document": {
- "hash": "",
- "title": "doc title"
- },
- "model_id": "contracts",
- "model_version": "11.00",
- "location": {
- "begin": "214",
- "end": "237"
- },
- "text": "1. IBM will provide a Senior Managing Consultant / expert resource, for up to 80 hours, to assist Florida Power & Light (FPL) with the creation of an IT infrastructure unit cost model for existing infrastructure.",
- "original_labels": {
- "types": [
- {
- "label": {
- "nature": "Obligation",
- "party": "IBM"
- },
- "provenance_ids": [
- "85f5981a-ba91-44f5-9efa-0bd22e64b7bc",
- "ce0480a1-5ef1-4c3e-9861-3743b5610795"
- ]
- },
- {
- "label": {
- "nature": "End User",
- "party": "Exclusion"
- },
- "provenance_ids": [
- "85f5981a-ba91-44f5-9efa-0bd22e64b7bc",
- "ce0480a1-5ef1-4c3e-9861-3743b5610795"
- ]
- }
- ],
- "categories": [
- {
- "label": "Responsibilities",
- "provenance_ids": []
- },
- {
- "label": "Amendments",
- "provenance_ids": []
- }
- ]
- },
- "updated_labels": {
- "types": [
- {
- "label": {
- "nature": "Obligation",
- "party": "IBM"
- }
- },
- {
- "label": {
- "nature": "Disclaimer",
- "party": "Buyer"
- }
- }
- ],
- "categories": [
- {
- "label": "Responsibilities"
- },
- {
- "label": "Audits"
- }
- ]
- }
- }
-
- responses.add(
- responses.POST,
- url,
- body=json.dumps(feedback),
- status=200,
- content_type='application/json')
-
- result = service.add_feedback(
- feedback_data,
- "wonder woman",
- "test commment").get_result()
- assert result["feedback_id"] == "lala"
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_get_feedback(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/feedback/xxx')
-
- responses.add(
- responses.GET,
- url,
- body=json.dumps(feedback),
- status=200,
- content_type='application/json')
-
- result = service.get_feedback("xxx").get_result()
- assert result["feedback_id"] == "lala"
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_list_feedback(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/feedback')
-
- responses.add(
- responses.GET,
- url,
- body=json.dumps({"feedback":[feedback]}),
- status=200,
- content_type='application/json')
-
- result = service.list_feedback().get_result()
- assert result["feedback"][0]["feedback_id"] == "lala"
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_delete_feedback(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/feedback/xxx')
-
- response = {
- "status": 200,
- "message": "Successfully deleted the feedback with id - 90ae2cb9-e6c5-43eb-a70f-199959f76019"
- }
-
- responses.add(
- responses.DELETE,
- url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- result = service.delete_feedback("xxx").get_result()
- assert result["status"] == 200
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_create_batch(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/batches')
-
- responses.add(
- responses.POST,
- url,
- body=json.dumps(batch),
- status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__),
- '../../resources/dummy-storage-credentials.json'), 'rb') as input_credentials_file:
- with open(os.path.join(os.path.dirname(__file__),
- '../../resources/dummy-storage-credentials.json'), 'rb') as output_credentials_file:
- result = service.create_batch(
- "html_conversion",
- input_credentials_file,
- "us-south",
- "compare-comply-integration-test-bucket-input",
- output_credentials_file,
- "us-south",
- "compare-comply-integration-test-bucket-output").get_result()
-
- assert result["batch_id"] == "xxx"
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_get_batch(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/batches/xxx')
-
- responses.add(
- responses.GET,
- url,
- body=json.dumps(batch),
- status=200,
- content_type='application/json')
-
- result = service.get_batch("xxx").get_result()
- assert result["batch_id"] == "xxx"
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_list_batches(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/batches')
-
- responses.add(
- responses.GET,
- url,
- body=json.dumps({"batches": [batch]}),
- status=200,
- content_type='application/json')
-
- result = service.list_batches().get_result()
- assert result["batches"][0]["batch_id"] == "xxx"
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_update_batch(self):
- service = ibm_watson.CompareComplyV1(
- '2016-10-20', iam_apikey='bogusapikey')
-
- url = "{0}{1}".format(base_url, '/v1/batches/xxx')
-
- responses.add(
- responses.PUT,
- url,
- body=json.dumps(batch),
- status=200,
- content_type='application/json')
-
- result = service.update_batch("xxx", "rescan").get_result()
- assert result["batch_id"] == "xxx"
- assert len(responses.calls) == 2
diff --git a/test/unit/test_discovery_v1.py b/test/unit/test_discovery_v1.py
deleted file mode 100644
index 0dc20269d..000000000
--- a/test/unit/test_discovery_v1.py
+++ /dev/null
@@ -1,1284 +0,0 @@
-# coding: utf-8
-import responses
-import os
-import json
-import io
-import ibm_watson
-from ibm_watson.discovery_v1 import TrainingDataSet, TrainingQuery, TrainingExample
-
-try:
- from urllib.parse import urlparse, urljoin
-except ImportError:
- from urlparse import urlparse, urljoin
-
-base_discovery_url = 'https://gateway.watsonplatform.net/discovery/api/v1/'
-
-platform_url = 'https://gateway.watsonplatform.net'
-service_path = '/discovery/api'
-base_url = '{0}{1}'.format(platform_url, service_path)
-
-version = '2016-12-01'
-environment_id = 'envid'
-collection_id = 'collid'
-
-
-@responses.activate
-def test_environments():
- discovery_url = urljoin(base_discovery_url, 'environments')
- discovery_response_body = """{
- "environments": [
- {
- "environment_id": "string",
- "name": "envname",
- "description": "",
- "created": "2016-11-20T01:03:17.645Z",
- "updated": "2016-11-20T01:03:17.645Z",
- "status": "status",
- "index_capacity": {
- "disk_usage": {
- "used_bytes": 0,
- "total_bytes": 0,
- "used": "string",
- "total": "string",
- "percent_used": 0
- },
- "memory_usage": {
- "used_bytes": 0,
- "total_bytes": 0,
- "used": "string",
- "total": "string",
- "percent_used": 0
- }
- }
- }
- ]
-}"""
-
- responses.add(responses.GET, discovery_url,
- body=discovery_response_body, status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.list_environments()
-
- url_str = "{0}?version=2016-11-07".format(discovery_url)
- assert responses.calls[0].request.url == url_str
-
- assert responses.calls[0].response.text == discovery_response_body
- assert len(responses.calls) == 1
-
-
-@responses.activate
-def test_get_environment():
- discovery_url = urljoin(base_discovery_url, 'environments/envid')
- responses.add(responses.GET, discovery_url,
- body="{\"resulting_key\": true}", status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.get_environment(environment_id='envid')
- url_str = "{0}?version=2016-11-07".format(discovery_url)
- assert responses.calls[0].request.url == url_str
- assert len(responses.calls) == 1
-
-
-@responses.activate
-def test_create_environment():
-
- discovery_url = urljoin(base_discovery_url, 'environments')
- responses.add(responses.POST, discovery_url,
- body="{\"resulting_key\": true}", status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
-
- discovery.create_environment(name="my name", description="my description")
- assert len(responses.calls) == 1
-
-
-@responses.activate
-def test_update_environment():
- discovery_url = urljoin(base_discovery_url, 'environments/envid')
- responses.add(responses.PUT, discovery_url,
- body="{\"resulting_key\": true}", status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.update_environment('envid', name="hello", description="new")
- assert len(responses.calls) == 1
-
-
-@responses.activate
-def test_delete_environment():
- discovery_url = urljoin(base_discovery_url, 'environments/envid')
- responses.add(responses.DELETE, discovery_url,
- body="{\"resulting_key\": true}", status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.delete_environment('envid')
- assert len(responses.calls) == 1
-
-
-@responses.activate
-def test_collections():
- discovery_url = urljoin(base_discovery_url,
- 'environments/envid/collections')
-
- responses.add(responses.GET, discovery_url,
- body="{\"body\": \"hello\"}", status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.list_collections('envid')
-
- called_url = urlparse(responses.calls[0].request.url)
- test_url = urlparse(discovery_url)
-
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
- assert len(responses.calls) == 1
-
-
-@responses.activate
-def test_collection():
- discovery_url = urljoin(base_discovery_url,
- 'environments/envid/collections/collid')
-
- discovery_fields = urljoin(base_discovery_url,
- 'environments/envid/collections/collid/fields')
- config_url = urljoin(base_discovery_url,
- 'environments/envid/configurations')
-
- responses.add(responses.GET, config_url,
- body="{\"body\": \"hello\"}",
- status=200,
- content_type='application/json')
-
- responses.add(responses.GET, discovery_fields,
- body="{\"body\": \"hello\"}", status=200,
- content_type='application/json')
-
- responses.add(responses.GET, discovery_url,
- body="{\"body\": \"hello\"}", status=200,
- content_type='application/json')
-
- responses.add(responses.DELETE, discovery_url,
- body="{\"body\": \"hello\"}", status=200,
- content_type='application/json')
-
- responses.add(responses.POST,
- urljoin(base_discovery_url,
- 'environments/envid/collections'),
- body="{\"body\": \"create\"}",
- status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.create_collection(environment_id='envid',
- name="name",
- description="",
- language="",
- configuration_id='confid')
-
- discovery.create_collection(environment_id='envid',
- name="name",
- language="es",
- description="")
-
- discovery.get_collection('envid', 'collid')
-
- called_url = urlparse(responses.calls[2].request.url)
- test_url = urlparse(discovery_url)
-
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
-
- discovery.delete_collection(environment_id='envid',
- collection_id='collid')
- discovery.list_collection_fields(environment_id='envid',
- collection_id='collid')
- assert len(responses.calls) == 5
-
-@responses.activate
-def test_federated_query():
- discovery_url = urljoin(base_discovery_url,
- 'environments/envid/query')
-
- responses.add(responses.POST, discovery_url,
- body="{\"body\": \"hello\"}", status=200,
- content_type='application/json')
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.federated_query('envid', 'colls.sha1::9181d244*', collection_ids=['collid1', 'collid2'])
-
- called_url = urlparse(responses.calls[0].request.url)
- test_url = urlparse(discovery_url)
-
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_federated_query_2():
- discovery_url = urljoin(base_discovery_url,
- 'environments/envid/query')
-
- responses.add(responses.POST, discovery_url,
- body="{\"body\": \"hello\"}", status=200,
- content_type='application/json')
- discovery = ibm_watson.DiscoveryV1('2016-11-07', username='username', password='password')
- discovery.federated_query('envid', collection_ids="'collid1', 'collid2'",
- filter='colls.sha1::9181d244*',
- bias='1',
- logging_opt_out=True)
-
- called_url = urlparse(responses.calls[0].request.url)
- test_url = urlparse(discovery_url)
-
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_federated_query_notices():
- discovery_url = urljoin(base_discovery_url,
- 'environments/envid/notices')
-
- responses.add(responses.GET, discovery_url,
- body="{\"body\": \"hello\"}", status=200,
- content_type='application/json')
- discovery = ibm_watson.DiscoveryV1('2016-11-07', username='username', password='password')
- discovery.federated_query_notices('envid', collection_ids=['collid1', 'collid2'], filter='notices.sha1::9181d244*')
-
- called_url = urlparse(responses.calls[0].request.url)
- test_url = urlparse(discovery_url)
-
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_query():
- discovery_url = urljoin(base_discovery_url,
- 'environments/envid/collections/collid/query')
-
- responses.add(responses.POST, discovery_url,
- body="{\"body\": \"hello\"}", status=200,
- content_type='application/json')
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.query('envid', 'collid',
- filter='extracted_metadata.sha1::9181d244*',
- count=1,
- passages=True,
- passages_fields=['x', 'y'],
- logging_opt_out='True',
- passages_count=2)
-
- called_url = urlparse(responses.calls[0].request.url)
- test_url = urlparse(discovery_url)
-
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_query_2():
- discovery_url = urljoin(base_discovery_url,
- 'environments/envid/collections/collid/query')
-
- responses.add(responses.POST, discovery_url,
- body="{\"body\": \"hello\"}", status=200,
- content_type='application/json')
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.query('envid', 'collid',
- filter='extracted_metadata.sha1::9181d244*',
- count=1,
- passages=True,
- passages_fields=['x', 'y'],
- logging_opt_out='True',
- passages_count=2,
- bias='1',
- collection_ids='1,2')
-
- called_url = urlparse(responses.calls[0].request.url)
- test_url = urlparse(discovery_url)
-
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_query_relations():
- discovery_url = urljoin(
- base_discovery_url,
- 'environments/envid/collections/collid/query_relations')
-
- responses.add(
- responses.POST,
- discovery_url,
- body="{\"body\": \"hello\"}",
- status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1(
- '2016-11-07', username='username', password='password')
-
- discovery.query_relations('envid', 'collid', count=10)
- called_url = urlparse(responses.calls[0].request.url)
- test_url = urlparse(discovery_url)
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
- assert len(responses.calls) == 1
-
-
-@responses.activate
-def test_query_entities():
- discovery_url = urljoin(
- base_discovery_url,
- 'environments/envid/collections/collid/query_entities')
-
- responses.add(
- responses.POST,
- discovery_url,
- body="{\"body\": \"hello\"}",
- status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1(
- '2016-11-07', username='username', password='password')
-
- discovery.query_entities('envid', 'collid', {'count': 10})
- called_url = urlparse(responses.calls[0].request.url)
- test_url = urlparse(discovery_url)
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_query_notices():
- discovery_url = urljoin(
- base_discovery_url,
- 'environments/envid/collections/collid/notices')
-
- responses.add(
- responses.GET,
- discovery_url,
- body="{\"body\": \"hello\"}",
- status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1(
- '2016-11-07', username='username', password='password')
-
- discovery.query_notices('envid', 'collid', filter='notices.sha1::*')
- called_url = urlparse(responses.calls[0].request.url)
- test_url = urlparse(discovery_url)
- assert called_url.netloc == test_url.netloc
- assert called_url.path == test_url.path
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_configs():
- discovery_url = urljoin(base_discovery_url,
- 'environments/envid/configurations')
- discovery_config_id = urljoin(base_discovery_url,
- 'environments/envid/configurations/confid')
-
- results = {"configurations":
- [{"name": "Default Configuration",
- "configuration_id": "confid"}]}
-
- responses.add(responses.GET, discovery_url,
- body=json.dumps(results),
- status=200,
- content_type='application/json')
-
- responses.add(responses.GET, discovery_config_id,
- body=json.dumps(results['configurations'][0]),
- status=200,
- content_type='application/json')
- responses.add(responses.POST, discovery_url,
- body=json.dumps(results['configurations'][0]),
- status=200,
- content_type='application/json')
- responses.add(responses.PUT, discovery_config_id,
- body=json.dumps(results['configurations'][0]),
- status=200,
- content_type='application/json')
- responses.add(responses.DELETE, discovery_config_id,
- body=json.dumps({'deleted': 'bogus -- ok'}),
- status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- discovery.list_configurations(environment_id='envid')
-
- discovery.get_configuration(environment_id='envid',
- configuration_id='confid')
-
- assert len(responses.calls) == 2
-
- discovery.create_configuration(environment_id='envid',
- name='my name')
- discovery.create_configuration(environment_id='envid',
- name='my name',
- source={'type': 'salesforce', 'credential_id': 'xxx'})
- discovery.update_configuration(environment_id='envid',
- configuration_id='confid',
- name='my new name')
- discovery.update_configuration(environment_id='envid',
- configuration_id='confid',
- name='my new name',
- source={'type': 'salesforce', 'credential_id': 'xxx'})
- discovery.delete_configuration(environment_id='envid',
- configuration_id='confid')
-
- assert len(responses.calls) == 7
-
-
-@responses.activate
-def test_document():
- discovery_url = urljoin(base_discovery_url,
- 'environments/envid/preview')
- config_url = urljoin(base_discovery_url,
- 'environments/envid/configurations')
- responses.add(responses.POST, discovery_url,
- body="{\"configurations\": []}",
- status=200,
- content_type='application/json')
- responses.add(responses.GET, config_url,
- body=json.dumps({"configurations":
- [{"name": "Default Configuration",
- "configuration_id": "confid"}]}),
- status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07',
- username='username',
- password='password')
- html_path = os.path.join(os.getcwd(), 'resources', 'simple.html')
- with open(html_path) as fileinfo:
- conf_id = discovery.test_configuration_in_environment(environment_id='envid',
- configuration_id='bogus',
- file=fileinfo)
- assert conf_id is not None
- conf_id = discovery.test_configuration_in_environment(environment_id='envid',
- file=fileinfo)
- assert conf_id is not None
-
- assert len(responses.calls) == 2
-
- add_doc_url = urljoin(base_discovery_url,
- 'environments/envid/collections/collid/documents')
-
- doc_id_path = 'environments/envid/collections/collid/documents/docid'
-
- update_doc_url = urljoin(base_discovery_url, doc_id_path)
- del_doc_url = urljoin(base_discovery_url,
- doc_id_path)
- responses.add(responses.POST, add_doc_url,
- body="{\"body\": []}",
- status=200,
- content_type='application/json')
-
- doc_status = {
- "document_id": "45556e23-f2b1-449d-8f27-489b514000ff",
- "configuration_id": "2e079259-7dd2-40a9-998f-3e716f5a7b88",
- "created" : "2016-06-16T10:56:54.957Z",
- "updated" : "2017-05-16T13:56:54.957Z",
- "status": "available",
- "status_description": "Document is successfully ingested and indexed with no warnings",
- "notices": []
- }
-
- responses.add(responses.GET, del_doc_url,
- body=json.dumps(doc_status),
- status=200,
- content_type='application/json')
-
- responses.add(responses.POST, update_doc_url,
- body="{\"body\": []}",
- status=200,
- content_type='application/json')
-
- responses.add(responses.DELETE, del_doc_url,
- body="{\"body\": []}",
- status=200,
- content_type='application/json')
-
- html_path = os.path.join(os.getcwd(), 'resources', 'simple.html')
- with open(html_path) as fileinfo:
- conf_id = discovery.add_document(environment_id='envid',
- collection_id='collid',
- file=fileinfo)
- assert conf_id is not None
-
- assert len(responses.calls) == 3
-
- discovery.get_document_status(environment_id='envid',
- collection_id='collid',
- document_id='docid')
-
- assert len(responses.calls) == 4
-
- discovery.update_document(environment_id='envid',
- collection_id='collid',
- document_id='docid')
-
- assert len(responses.calls) == 5
-
- discovery.update_document(environment_id='envid',
- collection_id='collid',
- document_id='docid')
-
- assert len(responses.calls) == 6
-
- discovery.delete_document(environment_id='envid',
- collection_id='collid',
- document_id='docid')
-
- assert len(responses.calls) == 7
-
- conf_id = discovery.add_document(environment_id='envid',
- collection_id='collid',
- file=io.StringIO(u'my string of file'),
- filename='file.txt')
-
- assert len(responses.calls) == 8
-
- conf_id = discovery.add_document(environment_id='envid',
- collection_id='collid',
- file=io.StringIO(u'my string of file
'),
- filename='file.html',
- file_content_type='application/html')
-
- assert len(responses.calls) == 9
-
- conf_id = discovery.add_document(environment_id='envid',
- collection_id='collid',
- file=io.StringIO(u'my string of file
'),
- filename='file.html',
- file_content_type='application/html',
- metadata=io.StringIO(u'{"stuff": "woot!"}'))
-
- assert len(responses.calls) == 10
-
-
-@responses.activate
-def test_delete_all_training_data():
- training_endpoint = '/v1/environments/{0}/collections/{1}/training_data'
- endpoint = training_endpoint.format(environment_id, collection_id)
- url = '{0}{1}'.format(base_url, endpoint)
- responses.add(responses.DELETE, url, status=204)
-
- service = ibm_watson.DiscoveryV1(version, username='username', password='password')
- response = service.delete_all_training_data(environment_id=environment_id,
- collection_id=collection_id).get_result()
-
- assert response is None
-
-
-@responses.activate
-def test_list_training_data():
- training_endpoint = '/v1/environments/{0}/collections/{1}/training_data'
- endpoint = training_endpoint.format(environment_id, collection_id)
- url = '{0}{1}'.format(base_url, endpoint)
- mock_response = {
- "environment_id": "string",
- "collection_id": "string",
- "queries": [
- {
- "query_id": "string",
- "natural_language_query": "string",
- "filter": "string",
- "examples": [
- {
- "document_id": "string",
- "cross_reference": "string",
- "relevance": 0
- }
- ]
- }
- ]
- }
- responses.add(responses.GET,
- url,
- body=json.dumps(mock_response),
- status=200,
- content_type='application/json')
-
- service = ibm_watson.DiscoveryV1(version,
- username='username',
- password='password')
- response = service.list_training_data(environment_id=environment_id,
- collection_id=collection_id).get_result()
-
- assert response == mock_response
- # Verify that response can be converted to a TrainingDataSet
- TrainingDataSet._from_dict(response)
-
-
-@responses.activate
-def test_add_training_data():
- training_endpoint = '/v1/environments/{0}/collections/{1}/training_data'
- endpoint = training_endpoint.format(environment_id, collection_id)
- url = '{0}{1}'.format(base_url, endpoint)
- natural_language_query = "why is the sky blue"
- filter = "text:meteorology"
- examples = [
- {
- "document_id": "54f95ac0-3e4f-4756-bea6-7a67b2713c81",
- "relevance": 1
- },
- {
- "document_id": "01bcca32-7300-4c9f-8d32-33ed7ea643da",
- "cross_reference": "my_id_field:1463",
- "relevance": 5
- }
- ]
- mock_response = {
- "query_id": "string",
- "natural_language_query": "string",
- "filter": "string",
- "examples": [
- {
- "document_id": "string",
- "cross_reference": "string",
- "relevance": 0
- }
- ]
- }
- responses.add(responses.POST,
- url,
- body=json.dumps(mock_response),
- status=200,
- content_type='application/json')
-
- service = ibm_watson.DiscoveryV1(version,
- username='username',
- password='password')
- response = service.add_training_data(
- environment_id=environment_id,
- collection_id=collection_id,
- natural_language_query=natural_language_query,
- filter=filter,
- examples=examples).get_result()
-
- assert response == mock_response
- # Verify that response can be converted to a TrainingQuery
- TrainingQuery._from_dict(response)
-
-
-@responses.activate
-def test_delete_training_data():
- training_endpoint = '/v1/environments/{0}/collections/{1}/training_data/{2}'
- query_id = 'queryid'
- endpoint = training_endpoint.format(
- environment_id, collection_id, query_id)
- url = '{0}{1}'.format(base_url, endpoint)
- responses.add(responses.DELETE, url, status=204)
-
- service = ibm_watson.DiscoveryV1(version,
- username='username',
- password='password')
- response = service.delete_training_data(environment_id=environment_id,
- collection_id=collection_id,
- query_id=query_id).get_result()
-
- assert response is None
-
-
-@responses.activate
-def test_get_training_data():
- training_endpoint = '/v1/environments/{0}/collections/{1}/training_data/{2}'
- query_id = 'queryid'
- endpoint = training_endpoint.format(
- environment_id, collection_id, query_id)
- url = '{0}{1}'.format(base_url, endpoint)
- mock_response = {
- "query_id": "string",
- "natural_language_query": "string",
- "filter": "string",
- "examples": [
- {
- "document_id": "string",
- "cross_reference": "string",
- "relevance": 0
- }
- ]
- }
- responses.add(responses.GET,
- url,
- body=json.dumps(mock_response),
- status=200,
- content_type='application/json')
-
- service = ibm_watson.DiscoveryV1(version, username='username', password='password')
- response = service.get_training_data(environment_id=environment_id,
- collection_id=collection_id,
- query_id=query_id).get_result()
-
- assert response == mock_response
- # Verify that response can be converted to a TrainingQuery
- TrainingQuery._from_dict(response)
-
-
-@responses.activate
-def test_create_training_example():
- examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \
- '/{2}/examples'
- query_id = 'queryid'
- endpoint = examples_endpoint.format(
- environment_id, collection_id, query_id)
- url = '{0}{1}'.format(base_url, endpoint)
- document_id = "string"
- relevance = 0
- cross_reference = "string"
- mock_response = {
- "document_id": "string",
- "cross_reference": "string",
- "relevance": 0
- }
- responses.add(responses.POST,
- url,
- body=json.dumps(mock_response),
- status=201,
- content_type='application/json')
-
- service = ibm_watson.DiscoveryV1(version,
- username='username',
- password='password')
- response = service.create_training_example(
- environment_id=environment_id,
- collection_id=collection_id,
- query_id=query_id,
- document_id=document_id,
- relevance=relevance,
- cross_reference=cross_reference).get_result()
-
- assert response == mock_response
- # Verify that response can be converted to a TrainingExample
- TrainingExample._from_dict(response)
-
-
-@responses.activate
-def test_delete_training_example():
- examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \
- '/{2}/examples/{3}'
- query_id = 'queryid'
- example_id = 'exampleid'
- endpoint = examples_endpoint.format(environment_id,
- collection_id,
- query_id,
- example_id)
- url = '{0}{1}'.format(base_url, endpoint)
- responses.add(responses.DELETE, url, status=204)
-
- service = ibm_watson.DiscoveryV1(version, username='username', password='password')
- response = service.delete_training_example(
- environment_id=environment_id,
- collection_id=collection_id,
- query_id=query_id,
- example_id=example_id).get_result()
-
- assert response is None
-
-
-@responses.activate
-def test_get_training_example():
- examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \
- '/{2}/examples/{3}'
- query_id = 'queryid'
- example_id = 'exampleid'
- endpoint = examples_endpoint.format(environment_id,
- collection_id,
- query_id,
- example_id)
- url = '{0}{1}'.format(base_url, endpoint)
- mock_response = {
- "document_id": "string",
- "cross_reference": "string",
- "relevance": 0
- }
- responses.add(responses.GET,
- url,
- body=json.dumps(mock_response),
- status=200,
- content_type='application/json')
-
- service = ibm_watson.DiscoveryV1(version, username='username', password='password')
- response = service.get_training_example(
- environment_id=environment_id,
- collection_id=collection_id,
- query_id=query_id,
- example_id=example_id).get_result()
-
- assert response == mock_response
- # Verify that response can be converted to a TrainingExample
- TrainingExample._from_dict(response)
-
-
-@responses.activate
-def test_update_training_example():
- examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \
- '/{2}/examples/{3}'
- query_id = 'queryid'
- example_id = 'exampleid'
- endpoint = examples_endpoint.format(environment_id,
- collection_id,
- query_id,
- example_id)
- url = '{0}{1}'.format(base_url, endpoint)
- relevance = 0
- cross_reference = "string"
- mock_response = {
- "document_id": "string",
- "cross_reference": "string",
- "relevance": 0
- }
- responses.add(responses.PUT,
- url,
- body=json.dumps(mock_response),
- status=200,
- content_type='application/json')
-
- service = ibm_watson.DiscoveryV1(version,
- username='username',
- password='password')
- response = service.update_training_example(
- environment_id=environment_id,
- collection_id=collection_id,
- query_id=query_id,
- example_id=example_id,
- relevance=relevance,
- cross_reference=cross_reference).get_result()
-
- assert response == mock_response
- # Verify that response can be converted to a TrainingExample
- TrainingExample._from_dict(response)
-
-@responses.activate
-def test_expansions():
- url = 'https://gateway.watsonplatform.net/discovery/api/v1/environments/envid/collections/colid/expansions'
- responses.add(
- responses.GET,
- url,
- body='{"expansions": "results"}',
- status=200,
- content_type='application_json')
- responses.add(
- responses.DELETE,
- url,
- body='{"description": "success" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.POST,
- url,
- body='{"expansions": "success" }',
- status=200,
- content_type='application_json')
-
- discovery = ibm_watson.DiscoveryV1('2017-11-07', username="username", password="password")
-
- discovery.list_expansions('envid', 'colid')
- assert responses.calls[0].response.json() == {"expansions": "results"}
-
- discovery.create_expansions('envid', 'colid', [{"input_terms": "dumb", "expanded_terms": "dumb2"}])
- assert responses.calls[1].response.json() == {"expansions": "success"}
-
- discovery.delete_expansions('envid', 'colid')
- assert responses.calls[2].response.json() == {"description": "success"}
-
- assert len(responses.calls) == 3
-
-@responses.activate
-def test_delete_user_data():
- url = 'https://gateway.watsonplatform.net/discovery/api/v1/user_data'
- responses.add(
- responses.DELETE,
- url,
- body='{"description": "success" }',
- status=204,
- content_type='application_json')
-
- discovery = ibm_watson.DiscoveryV1('2017-11-07', username="username", password="password")
-
- response = discovery.delete_user_data('id').get_result()
- assert response is None
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_credentials():
- discovery_credentials_url = urljoin(base_discovery_url, 'environments/envid/credentials')
-
- results = {'credential_id': 'e68305ce-29f3-48ea-b829-06653ca0fdef',
- 'source_type': 'salesforce',
- 'credential_details': {
- 'url': 'https://login.salesforce.com',
- 'credential_type': 'username_password',
- 'username':'user@email.com'}
- }
-
- iam_url = "https://iam.cloud.ibm.com/identity/token"
- iam_token_response = """{
- "access_token": "oAeisG8yqPY7sFR_x66Z15",
- "token_type": "Bearer",
- "expires_in": 3600,
- "expiration": 1524167011,
- "refresh_token": "jy4gl91BQ"
- }"""
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
- responses.add(responses.GET, "{0}/{1}?version=2016-11-07".format(discovery_credentials_url, 'credential_id'),
- body=json.dumps(results),
- status=200,
- content_type='application/json')
- responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_credentials_url),
- body=json.dumps([results]),
- status=200,
- content_type='application/json')
-
- responses.add(responses.POST, "{0}?version=2016-11-07".format(discovery_credentials_url),
- body=json.dumps(results),
- status=200,
- content_type='application/json')
- results['source_type'] = 'ibm'
- responses.add(responses.PUT, "{0}/{1}?version=2016-11-07".format(discovery_credentials_url, 'credential_id'),
- body=json.dumps(results),
- status=200,
- content_type='application/json')
- responses.add(responses.DELETE, "{0}/{1}?version=2016-11-07".format(discovery_credentials_url, 'credential_id'),
- body=json.dumps({'deleted': 'bogus -- ok'}),
- status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07', iam_apikey='iam_apikey')
- discovery.create_credentials('envid', 'salesforce', {
- 'url': 'https://login.salesforce.com',
- 'credential_type': 'username_password',
- 'username':'user@email.com'
- })
-
- discovery.get_credentials('envid', 'credential_id')
-
- discovery.update_credentials(environment_id='envid',
- credential_id='credential_id',
- source_type='salesforce',
- credential_details=results['credential_details'])
- discovery.list_credentials('envid')
- discovery.delete_credentials(environment_id='envid', credential_id='credential_id')
- assert len(responses.calls) == 10
-
-@responses.activate
-def test_events_and_feedback():
- discovery_event_url = urljoin(base_discovery_url, 'events')
- discovery_metrics_event_rate_url = urljoin(base_discovery_url, 'metrics/event_rate')
- discovery_metrics_query_url = urljoin(base_discovery_url, 'metrics/number_of_queries')
- discovery_metrics_query_event_url = urljoin(base_discovery_url, 'metrics/number_of_queries_with_event')
- discovery_metrics_query_no_results_url = urljoin(base_discovery_url, 'metrics/number_of_queries_with_no_search_results')
- discovery_metrics_query_token_event_url = urljoin(base_discovery_url, 'metrics/top_query_tokens_with_event_rate')
- discovery_query_log_url = urljoin(base_discovery_url, 'logs')
-
- event_data = {
- "environment_id": "xxx",
- "session_token": "yyy",
- "client_timestamp": "2018-08-14T14:39:59.268Z",
- "display_rank": 0,
- "collection_id": "abc",
- "document_id": "xyz",
- "query_id": "cde"
- }
-
- create_event_response = {
- "type": "click",
- "data": event_data
- }
-
- metric_response = {
- "aggregations": [
- {
- "interval": "1d",
- "event_type": "click",
- "results": [
- {
- "key_as_string": "2018-08-14T14:39:59.309Z",
- "key": 1533513600000,
- "matching_results": 2,
- "event_rate": 0.0
- }
- ]
- }
- ]
- }
-
- metric_token_response = {
- "aggregations": [
- {
- "event_type": "click",
- "results": [
- {
- "key": "content",
- "matching_results": 5,
- "event_rate": 0.6
- },
- {
- "key": "first",
- "matching_results": 5,
- "event_rate": 0.6
- },
- {
- "key": "of",
- "matching_results": 5,
- "event_rate": 0.6
- }
- ]
- }
- ]
- }
-
- log_query_response = {
- "matching_results": 20,
- "results": [
- {
- "customer_id": "",
- "environment_id": "xxx",
- "natural_language_query": "The content of the first chapter",
- "query_id": "1ICUdh3Pab",
- "document_results": {
- "count": 1,
- "results": [
- {
- "collection_id": "b67a82f3-6507-4c25-9757-3485ff4f2a32",
- "score": 0.025773458,
- "position": 10,
- "document_id": "af0be20e-e130-4712-9a2e-37d9c8b9c52f"
- }
- ]
- },
- "event_type": "query",
- "session_token": "1_nbEfQtKVcg9qx3t41ICUdh3Pab",
- "created_timestamp": "2018-08-14T18:20:30.460Z"
- }
- ]
- }
-
- iam_url = "https://iam.cloud.ibm.com/identity/token"
- iam_token_response = """{
- "access_token": "oAeisG8yqPY7sFR_x66Z15",
- "token_type": "Bearer",
- "expires_in": 3600,
- "expiration": 1524167011,
- "refresh_token": "jy4gl91BQ"
- }"""
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
-
- responses.add(responses.POST, "{0}?version=2016-11-07".format(discovery_event_url),
- body=json.dumps(create_event_response),
- status=200,
- content_type='application/json')
-
- responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_event_rate_url),
- body=json.dumps(metric_response),
- status=200,
- content_type='application/json')
-
- responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_query_url),
- body=json.dumps(metric_response),
- status=200,
- content_type='application/json')
-
- responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_query_event_url),
- body=json.dumps(metric_response),
- status=200,
- content_type='application/json')
- responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_query_no_results_url),
- body=json.dumps(metric_response),
- status=200,
- content_type='application/json')
- responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_metrics_query_token_event_url),
- body=json.dumps(metric_token_response),
- status=200,
- content_type='application/json')
- responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_query_log_url),
- body=json.dumps(log_query_response),
- status=200,
- content_type='application/json')
-
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07', iam_apikey='iam_apikey')
-
- discovery.create_event('click', event_data)
- assert responses.calls[1].response.json()["data"] == event_data
-
- discovery.get_metrics_event_rate('2018-08-13T14:39:59.309Z',
- '2018-08-14T14:39:59.309Z',
- 'document')
- assert responses.calls[3].response.json() == metric_response
-
- discovery.get_metrics_query('2018-08-13T14:39:59.309Z',
- '2018-08-14T14:39:59.309Z',
- 'document')
- assert responses.calls[5].response.json() == metric_response
-
- discovery.get_metrics_query_event('2018-08-13T14:39:59.309Z',
- '2018-08-14T14:39:59.309Z',
- 'document')
- assert responses.calls[7].response.json() == metric_response
-
- discovery.get_metrics_query_no_results('2018-08-13T14:39:59.309Z',
- '2018-08-14T14:39:59.309Z',
- 'document')
- assert responses.calls[9].response.json() == metric_response
-
- discovery.get_metrics_query_token_event(2)
- assert responses.calls[11].response.json() == metric_token_response
-
- discovery.query_log()
- assert responses.calls[13].response.json() == log_query_response
-
- assert len(responses.calls) == 14
-
-@responses.activate
-def test_tokenization_dictionary():
- url = 'https://gateway.watsonplatform.net/discovery/api/v1/environments/envid/collections/colid/word_lists/tokenization_dictionary?version=2017-11-07'
- responses.add(
- responses.POST,
- url,
- body='{"status": "pending"}',
- status=200,
- content_type='application_json')
- responses.add(
- responses.DELETE,
- url,
- body='{"status": "pending"}',
- status=200)
- responses.add(
- responses.GET,
- url,
- body='{"status": "pending", "type":"tokenization_dictionary"}',
- status=200,
- content_type='application_json')
-
- discovery = ibm_watson.DiscoveryV1('2017-11-07', username="username", password="password")
-
- tokenization_rules = [
- {
- 'text': 'token',
- 'tokens': ['token 1', 'token 2'],
- 'readings': ['reading 1', 'reading 2'],
- 'part_of_speech': 'noun',
- }
- ]
- discovery.create_tokenization_dictionary('envid', 'colid', tokenization_rules)
- assert responses.calls[0].response.json() == {"status": "pending"}
-
- discovery.get_tokenization_dictionary_status('envid', 'colid')
- assert responses.calls[1].response.json() == {"status": "pending", "type":"tokenization_dictionary"}
-
- discovery.delete_tokenization_dictionary('envid', 'colid')
- assert responses.calls[2].response.status_code == 200
-
- assert len(responses.calls) == 3
-
-@responses.activate
-def test_stopword_operations():
- url = 'https://gateway.watsonplatform.net/discovery/api/v1/environments/envid/collections/colid/word_lists/stopwords?version=2017-11-07'
- responses.add(
- responses.POST,
- url,
- body='{"status": "pending", "type": "stopwords"}',
- status=200,
- content_type='application_json')
- responses.add(
- responses.DELETE,
- url,
- status=200)
- responses.add(
- responses.GET,
- url,
- body='{"status": "ready", "type": "stopwords"}',
- status=200,
- content_type='application_json')
-
- discovery = ibm_watson.DiscoveryV1('2017-11-07', username="username", password="password")
-
- stopwords_file_path = os.path.join(os.getcwd(), 'resources', 'stopwords.txt')
- with open(stopwords_file_path) as file:
- discovery.create_stopword_list('envid', 'colid', file)
- assert responses.calls[0].response.json() == {"status": "pending", "type": "stopwords"}
-
- discovery.get_stopword_list_status('envid', 'colid')
- assert responses.calls[1].response.json() == {"status": "ready", "type": "stopwords"}
-
- discovery.delete_stopword_list('envid', 'colid')
- assert responses.calls[2].response.status_code == 200
-
- assert len(responses.calls) == 3
-
-@responses.activate
-def test_gateway_configuration():
- discovery_gateway_url = urljoin(base_discovery_url, 'environments/envid/gateways')
-
- gateway_details = {
- "status": "idle",
- "token_id": "9GnaCreixek_prod_ng",
- "token": "4FByv9Mmd79x6c",
- "name": "test-gateway-configuration-python",
- "gateway_id": "gateway_id"
- }
-
- iam_url = "https://iam.cloud.ibm.com/identity/token"
- iam_token_response = """{
- "access_token": "oAeisG8yqPY7sFR_x66Z15",
- "token_type": "Bearer",
- "expires_in": 3600,
- "expiration": 1524167011,
- "refresh_token": "jy4gl91BQ"
- }"""
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
- responses.add(responses.GET, "{0}/{1}?version=2016-11-07".format(discovery_gateway_url, 'gateway_id'),
- body=json.dumps(gateway_details),
- status=200,
- content_type='application/json')
- responses.add(responses.POST, "{0}?version=2016-11-07".format(discovery_gateway_url),
- body=json.dumps(gateway_details),
- status=200,
- content_type='application/json')
- responses.add(responses.GET, "{0}?version=2016-11-07".format(discovery_gateway_url),
- body=json.dumps({'gateways': [gateway_details]}),
- status=200,
- content_type='application/json')
- responses.add(responses.DELETE, "{0}/{1}?version=2016-11-07".format(discovery_gateway_url, 'gateway_id'),
- body=json.dumps({'gateway_id': 'gateway_id', 'status': 'deleted'}),
- status=200,
- content_type='application/json')
-
- discovery = ibm_watson.DiscoveryV1('2016-11-07', iam_apikey='iam_apikey')
-
- discovery.create_gateway('envid', 'gateway_id')
- discovery.list_gateways('envid')
- discovery.get_gateway('envid', 'gateway_id')
- discovery.delete_gateway(environment_id='envid', gateway_id='gateway_id')
- assert len(responses.calls) == 8
diff --git a/test/unit/test_discovery_v2.py b/test/unit/test_discovery_v2.py
new file mode 100644
index 000000000..10401163d
--- /dev/null
+++ b/test/unit/test_discovery_v2.py
@@ -0,0 +1,9960 @@
+# -*- coding: utf-8 -*-
+# (C) Copyright IBM Corp. 2019, 2024.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit Tests for DiscoveryV2
+"""
+
+from datetime import datetime, timezone
+from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
+from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime
+import inspect
+import io
+import json
+import pytest
+import re
+import requests
+import responses
+import tempfile
+import urllib
+from ibm_watson.discovery_v2 import *
+
+version = 'testString'
+
+_service = DiscoveryV2(
+ authenticator=NoAuthAuthenticator(),
+ version=version,
+)
+
+_base_url = 'https://api.us-south.discovery.watson.cloud.ibm.com'
+_service.set_service_url(_base_url)
+
+
+def preprocess_url(operation_path: str):
+ """
+ Returns the request url associated with the specified operation path.
+ This will be base_url concatenated with a quoted version of operation_path.
+ The returned request URL is used to register the mock response so it needs
+ to match the request URL that is formed by the requests library.
+ """
+
+ # Form the request URL from the base URL and operation path.
+ request_url = _base_url + operation_path
+
+ # If the request url does NOT end with a /, then just return it as-is.
+ # Otherwise, return a regular expression that matches one or more trailing /.
+ if not request_url.endswith('/'):
+ return request_url
+ return re.compile(request_url.rstrip('/') + '/+')
+
+
+##############################################################################
+# Start of Service: Projects
+##############################################################################
+# region
+
+
+class TestListProjects:
+ """
+ Test Class for list_projects
+ """
+
+ @responses.activate
+ def test_list_projects_all_params(self):
+ """
+ list_projects()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects')
+ mock_response = '{"projects": [{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_projects()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_projects_all_params_with_retries(self):
+ # Enable retries and run test_list_projects_all_params.
+ _service.enable_retries()
+ self.test_list_projects_all_params()
+
+ # Disable retries and run test_list_projects_all_params.
+ _service.disable_retries()
+ self.test_list_projects_all_params()
+
+ @responses.activate
+ def test_list_projects_value_error(self):
+ """
+ test_list_projects_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects')
+ mock_response = '{"projects": [{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_projects(**req_copy)
+
+ def test_list_projects_value_error_with_retries(self):
+ # Enable retries and run test_list_projects_value_error.
+ _service.enable_retries()
+ self.test_list_projects_value_error()
+
+ # Disable retries and run test_list_projects_value_error.
+ _service.disable_retries()
+ self.test_list_projects_value_error()
+
+
+class TestCreateProject:
+ """
+ Test Class for create_project
+ """
+
+ @responses.activate
+ def test_create_project_all_params(self):
+ """
+ create_project()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects')
+ mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a DefaultQueryParamsPassages model
+ default_query_params_passages_model = {}
+ default_query_params_passages_model['enabled'] = True
+ default_query_params_passages_model['count'] = 38
+ default_query_params_passages_model['fields'] = ['testString']
+ default_query_params_passages_model['characters'] = 38
+ default_query_params_passages_model['per_document'] = True
+ default_query_params_passages_model['max_per_document'] = 38
+
+ # Construct a dict representation of a DefaultQueryParamsTableResults model
+ default_query_params_table_results_model = {}
+ default_query_params_table_results_model['enabled'] = True
+ default_query_params_table_results_model['count'] = 38
+ default_query_params_table_results_model['per_document'] = 0
+
+ # Construct a dict representation of a DefaultQueryParamsSuggestedRefinements model
+ default_query_params_suggested_refinements_model = {}
+ default_query_params_suggested_refinements_model['enabled'] = True
+ default_query_params_suggested_refinements_model['count'] = 38
+
+ # Construct a dict representation of a DefaultQueryParams model
+ default_query_params_model = {}
+ default_query_params_model['collection_ids'] = ['testString']
+ default_query_params_model['passages'] = default_query_params_passages_model
+ default_query_params_model['table_results'] = default_query_params_table_results_model
+ default_query_params_model['aggregation'] = 'testString'
+ default_query_params_model['suggested_refinements'] = default_query_params_suggested_refinements_model
+ default_query_params_model['spelling_suggestions'] = True
+ default_query_params_model['highlight'] = True
+ default_query_params_model['count'] = 38
+ default_query_params_model['sort'] = 'testString'
+ default_query_params_model['return'] = ['testString']
+
+ # Set up parameter values
+ name = 'testString'
+ type = 'intelligent_document_processing'
+ default_query_parameters = default_query_params_model
+
+ # Invoke method
+ response = _service.create_project(
+ name,
+ type,
+ default_query_parameters=default_query_parameters,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['type'] == 'intelligent_document_processing'
+ assert req_body['default_query_parameters'] == default_query_params_model
+
+ def test_create_project_all_params_with_retries(self):
+ # Enable retries and run test_create_project_all_params.
+ _service.enable_retries()
+ self.test_create_project_all_params()
+
+ # Disable retries and run test_create_project_all_params.
+ _service.disable_retries()
+ self.test_create_project_all_params()
+
+ @responses.activate
+ def test_create_project_value_error(self):
+ """
+ test_create_project_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects')
+ mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a DefaultQueryParamsPassages model
+ default_query_params_passages_model = {}
+ default_query_params_passages_model['enabled'] = True
+ default_query_params_passages_model['count'] = 38
+ default_query_params_passages_model['fields'] = ['testString']
+ default_query_params_passages_model['characters'] = 38
+ default_query_params_passages_model['per_document'] = True
+ default_query_params_passages_model['max_per_document'] = 38
+
+ # Construct a dict representation of a DefaultQueryParamsTableResults model
+ default_query_params_table_results_model = {}
+ default_query_params_table_results_model['enabled'] = True
+ default_query_params_table_results_model['count'] = 38
+ default_query_params_table_results_model['per_document'] = 0
+
+ # Construct a dict representation of a DefaultQueryParamsSuggestedRefinements model
+ default_query_params_suggested_refinements_model = {}
+ default_query_params_suggested_refinements_model['enabled'] = True
+ default_query_params_suggested_refinements_model['count'] = 38
+
+ # Construct a dict representation of a DefaultQueryParams model
+ default_query_params_model = {}
+ default_query_params_model['collection_ids'] = ['testString']
+ default_query_params_model['passages'] = default_query_params_passages_model
+ default_query_params_model['table_results'] = default_query_params_table_results_model
+ default_query_params_model['aggregation'] = 'testString'
+ default_query_params_model['suggested_refinements'] = default_query_params_suggested_refinements_model
+ default_query_params_model['spelling_suggestions'] = True
+ default_query_params_model['highlight'] = True
+ default_query_params_model['count'] = 38
+ default_query_params_model['sort'] = 'testString'
+ default_query_params_model['return'] = ['testString']
+
+ # Set up parameter values
+ name = 'testString'
+ type = 'intelligent_document_processing'
+ default_query_parameters = default_query_params_model
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "name": name,
+ "type": type,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_project(**req_copy)
+
+ def test_create_project_value_error_with_retries(self):
+ # Enable retries and run test_create_project_value_error.
+ _service.enable_retries()
+ self.test_create_project_value_error()
+
+ # Disable retries and run test_create_project_value_error.
+ _service.disable_retries()
+ self.test_create_project_value_error()
+
+
+class TestGetProject:
+ """
+ Test Class for get_project
+ """
+
+ @responses.activate
+ def test_get_project_all_params(self):
+ """
+ get_project()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString')
+ mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.get_project(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_project_all_params_with_retries(self):
+ # Enable retries and run test_get_project_all_params.
+ _service.enable_retries()
+ self.test_get_project_all_params()
+
+ # Disable retries and run test_get_project_all_params.
+ _service.disable_retries()
+ self.test_get_project_all_params()
+
+ @responses.activate
+ def test_get_project_value_error(self):
+ """
+ test_get_project_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString')
+ mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_project(**req_copy)
+
+ def test_get_project_value_error_with_retries(self):
+ # Enable retries and run test_get_project_value_error.
+ _service.enable_retries()
+ self.test_get_project_value_error()
+
+ # Disable retries and run test_get_project_value_error.
+ _service.disable_retries()
+ self.test_get_project_value_error()
+
+
+class TestUpdateProject:
+ """
+ Test Class for update_project
+ """
+
+ @responses.activate
+ def test_update_project_all_params(self):
+ """
+ update_project()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString')
+ mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ name = 'testString'
+
+ # Invoke method
+ response = _service.update_project(
+ project_id,
+ name=name,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+
+ def test_update_project_all_params_with_retries(self):
+ # Enable retries and run test_update_project_all_params.
+ _service.enable_retries()
+ self.test_update_project_all_params()
+
+ # Disable retries and run test_update_project_all_params.
+ _service.disable_retries()
+ self.test_update_project_all_params()
+
+ @responses.activate
+ def test_update_project_required_params(self):
+ """
+ test_update_project_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString')
+ mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.update_project(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_update_project_required_params_with_retries(self):
+ # Enable retries and run test_update_project_required_params.
+ _service.enable_retries()
+ self.test_update_project_required_params()
+
+ # Disable retries and run test_update_project_required_params.
+ _service.disable_retries()
+ self.test_update_project_required_params()
+
+ @responses.activate
+ def test_update_project_value_error(self):
+ """
+ test_update_project_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString')
+ mock_response = '{"project_id": "project_id", "name": "name", "type": "intelligent_document_processing", "relevancy_training_status": {"data_updated": "data_updated", "total_examples": 14, "sufficient_label_diversity": true, "processing": true, "minimum_examples_added": true, "successfully_trained": "successfully_trained", "available": false, "notices": 7, "minimum_queries_added": false}, "collection_count": 16, "default_query_parameters": {"collection_ids": ["collection_ids"], "passages": {"enabled": false, "count": 5, "fields": ["fields"], "characters": 10, "per_document": true, "max_per_document": 16}, "table_results": {"enabled": false, "count": 5, "per_document": 0}, "aggregation": "aggregation", "suggested_refinements": {"enabled": false, "count": 5}, "spelling_suggestions": true, "highlight": false, "count": 5, "sort": "sort", "return": ["return_"]}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_project(**req_copy)
+
+ def test_update_project_value_error_with_retries(self):
+ # Enable retries and run test_update_project_value_error.
+ _service.enable_retries()
+ self.test_update_project_value_error()
+
+ # Disable retries and run test_update_project_value_error.
+ _service.disable_retries()
+ self.test_update_project_value_error()
+
+
+class TestDeleteProject:
+ """
+ Test Class for delete_project
+ """
+
+ @responses.activate
+ def test_delete_project_all_params(self):
+ """
+ delete_project()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_project(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_project_all_params_with_retries(self):
+ # Enable retries and run test_delete_project_all_params.
+ _service.enable_retries()
+ self.test_delete_project_all_params()
+
+ # Disable retries and run test_delete_project_all_params.
+ _service.disable_retries()
+ self.test_delete_project_all_params()
+
+ @responses.activate
+ def test_delete_project_value_error(self):
+ """
+ test_delete_project_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_project(**req_copy)
+
+ def test_delete_project_value_error_with_retries(self):
+ # Enable retries and run test_delete_project_value_error.
+ _service.enable_retries()
+ self.test_delete_project_value_error()
+
+ # Disable retries and run test_delete_project_value_error.
+ _service.disable_retries()
+ self.test_delete_project_value_error()
+
+
+class TestListFields:
+ """
+ Test Class for list_fields
+ """
+
+ @responses.activate
+ def test_list_fields_all_params(self):
+ """
+ list_fields()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/fields')
+ mock_response = '{"fields": [{"field": "field", "type": "nested", "collection_id": "collection_id"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_ids = ['testString']
+
+ # Invoke method
+ response = _service.list_fields(
+ project_id,
+ collection_ids=collection_ids,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'collection_ids={}'.format(','.join(collection_ids)) in query_string
+
+ def test_list_fields_all_params_with_retries(self):
+ # Enable retries and run test_list_fields_all_params.
+ _service.enable_retries()
+ self.test_list_fields_all_params()
+
+ # Disable retries and run test_list_fields_all_params.
+ _service.disable_retries()
+ self.test_list_fields_all_params()
+
+ @responses.activate
+ def test_list_fields_required_params(self):
+ """
+ test_list_fields_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/fields')
+ mock_response = '{"fields": [{"field": "field", "type": "nested", "collection_id": "collection_id"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.list_fields(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_fields_required_params_with_retries(self):
+ # Enable retries and run test_list_fields_required_params.
+ _service.enable_retries()
+ self.test_list_fields_required_params()
+
+ # Disable retries and run test_list_fields_required_params.
+ _service.disable_retries()
+ self.test_list_fields_required_params()
+
+ @responses.activate
+ def test_list_fields_value_error(self):
+ """
+ test_list_fields_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/fields')
+ mock_response = '{"fields": [{"field": "field", "type": "nested", "collection_id": "collection_id"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_fields(**req_copy)
+
+ def test_list_fields_value_error_with_retries(self):
+ # Enable retries and run test_list_fields_value_error.
+ _service.enable_retries()
+ self.test_list_fields_value_error()
+
+ # Disable retries and run test_list_fields_value_error.
+ _service.disable_retries()
+ self.test_list_fields_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Projects
+##############################################################################
+
+##############################################################################
+# Start of Service: Collections
+##############################################################################
+# region
+
+
+class TestListCollections:
+ """
+ Test Class for list_collections
+ """
+
+ @responses.activate
+ def test_list_collections_all_params(self):
+ """
+ list_collections()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections')
+ mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.list_collections(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_collections_all_params_with_retries(self):
+ # Enable retries and run test_list_collections_all_params.
+ _service.enable_retries()
+ self.test_list_collections_all_params()
+
+ # Disable retries and run test_list_collections_all_params.
+ _service.disable_retries()
+ self.test_list_collections_all_params()
+
+ @responses.activate
+ def test_list_collections_value_error(self):
+ """
+ test_list_collections_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections')
+ mock_response = '{"collections": [{"collection_id": "collection_id", "name": "name"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_collections(**req_copy)
+
+ def test_list_collections_value_error_with_retries(self):
+ # Enable retries and run test_list_collections_value_error.
+ _service.enable_retries()
+ self.test_list_collections_value_error()
+
+ # Disable retries and run test_list_collections_value_error.
+ _service.disable_retries()
+ self.test_list_collections_value_error()
+
+
+class TestCreateCollection:
+ """
+ Test Class for create_collection
+ """
+
+ @responses.activate
+ def test_create_collection_all_params(self):
+ """
+ create_collection()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections')
+ mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a CollectionEnrichment model
+ collection_enrichment_model = {}
+ collection_enrichment_model['enrichment_id'] = 'testString'
+ collection_enrichment_model['fields'] = ['testString']
+
+ # Set up parameter values
+ project_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ language = 'en'
+ ocr_enabled = False
+ enrichments = [collection_enrichment_model]
+
+ # Invoke method
+ response = _service.create_collection(
+ project_id,
+ name,
+ description=description,
+ language=language,
+ ocr_enabled=ocr_enabled,
+ enrichments=enrichments,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['language'] == 'en'
+ assert req_body['ocr_enabled'] == False
+ assert req_body['enrichments'] == [collection_enrichment_model]
+
+ def test_create_collection_all_params_with_retries(self):
+ # Enable retries and run test_create_collection_all_params.
+ _service.enable_retries()
+ self.test_create_collection_all_params()
+
+ # Disable retries and run test_create_collection_all_params.
+ _service.disable_retries()
+ self.test_create_collection_all_params()
+
+ @responses.activate
+ def test_create_collection_value_error(self):
+ """
+ test_create_collection_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections')
+ mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a CollectionEnrichment model
+ collection_enrichment_model = {}
+ collection_enrichment_model['enrichment_id'] = 'testString'
+ collection_enrichment_model['fields'] = ['testString']
+
+ # Set up parameter values
+ project_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ language = 'en'
+ ocr_enabled = False
+ enrichments = [collection_enrichment_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "name": name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_collection(**req_copy)
+
+ def test_create_collection_value_error_with_retries(self):
+ # Enable retries and run test_create_collection_value_error.
+ _service.enable_retries()
+ self.test_create_collection_value_error()
+
+ # Disable retries and run test_create_collection_value_error.
+ _service.disable_retries()
+ self.test_create_collection_value_error()
+
+
+class TestGetCollection:
+ """
+ Test Class for get_collection
+ """
+
+ @responses.activate
+ def test_get_collection_all_params(self):
+ """
+ get_collection()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString')
+ mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.get_collection(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_collection_all_params_with_retries(self):
+ # Enable retries and run test_get_collection_all_params.
+ _service.enable_retries()
+ self.test_get_collection_all_params()
+
+ # Disable retries and run test_get_collection_all_params.
+ _service.disable_retries()
+ self.test_get_collection_all_params()
+
+ @responses.activate
+ def test_get_collection_value_error(self):
+ """
+ test_get_collection_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString')
+ mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_collection(**req_copy)
+
+ def test_get_collection_value_error_with_retries(self):
+ # Enable retries and run test_get_collection_value_error.
+ _service.enable_retries()
+ self.test_get_collection_value_error()
+
+ # Disable retries and run test_get_collection_value_error.
+ _service.disable_retries()
+ self.test_get_collection_value_error()
+
+
+class TestUpdateCollection:
+ """
+ Test Class for update_collection
+ """
+
+ @responses.activate
+ def test_update_collection_all_params(self):
+ """
+ update_collection()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString')
+ mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a CollectionEnrichment model
+ collection_enrichment_model = {}
+ collection_enrichment_model['enrichment_id'] = 'testString'
+ collection_enrichment_model['fields'] = ['testString']
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ ocr_enabled = False
+ enrichments = [collection_enrichment_model]
+
+ # Invoke method
+ response = _service.update_collection(
+ project_id,
+ collection_id,
+ name=name,
+ description=description,
+ ocr_enabled=ocr_enabled,
+ enrichments=enrichments,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['ocr_enabled'] == False
+ assert req_body['enrichments'] == [collection_enrichment_model]
+
+ def test_update_collection_all_params_with_retries(self):
+ # Enable retries and run test_update_collection_all_params.
+ _service.enable_retries()
+ self.test_update_collection_all_params()
+
+ # Disable retries and run test_update_collection_all_params.
+ _service.disable_retries()
+ self.test_update_collection_all_params()
+
+ @responses.activate
+ def test_update_collection_value_error(self):
+ """
+ test_update_collection_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString')
+ mock_response = '{"collection_id": "collection_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "ocr_enabled": false, "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "smart_document_understanding": {"enabled": false, "model": "custom"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a CollectionEnrichment model
+ collection_enrichment_model = {}
+ collection_enrichment_model['enrichment_id'] = 'testString'
+ collection_enrichment_model['fields'] = ['testString']
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ ocr_enabled = False
+ enrichments = [collection_enrichment_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_collection(**req_copy)
+
+ def test_update_collection_value_error_with_retries(self):
+ # Enable retries and run test_update_collection_value_error.
+ _service.enable_retries()
+ self.test_update_collection_value_error()
+
+ # Disable retries and run test_update_collection_value_error.
+ _service.disable_retries()
+ self.test_update_collection_value_error()
+
+
+class TestDeleteCollection:
+ """
+ Test Class for delete_collection
+ """
+
+ @responses.activate
+ def test_delete_collection_all_params(self):
+ """
+ delete_collection()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_collection(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_collection_all_params_with_retries(self):
+ # Enable retries and run test_delete_collection_all_params.
+ _service.enable_retries()
+ self.test_delete_collection_all_params()
+
+ # Disable retries and run test_delete_collection_all_params.
+ _service.disable_retries()
+ self.test_delete_collection_all_params()
+
+ @responses.activate
+ def test_delete_collection_value_error(self):
+ """
+ test_delete_collection_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_collection(**req_copy)
+
+ def test_delete_collection_value_error_with_retries(self):
+ # Enable retries and run test_delete_collection_value_error.
+ _service.enable_retries()
+ self.test_delete_collection_value_error()
+
+ # Disable retries and run test_delete_collection_value_error.
+ _service.disable_retries()
+ self.test_delete_collection_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Collections
+##############################################################################
+
+##############################################################################
+# Start of Service: Documents
+##############################################################################
+# region
+
+
+class TestListDocuments:
+ """
+ Test Class for list_documents
+ """
+
+ @responses.activate
+ def test_list_documents_all_params(self):
+ """
+ list_documents()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents')
+ mock_response = '{"matching_results": 16, "documents": [{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ count = 1000
+ status = 'testString'
+ has_notices = True
+ is_parent = True
+ parent_document_id = 'testString'
+ sha256 = 'testString'
+
+ # Invoke method
+ response = _service.list_documents(
+ project_id,
+ collection_id,
+ count=count,
+ status=status,
+ has_notices=has_notices,
+ is_parent=is_parent,
+ parent_document_id=parent_document_id,
+ sha256=sha256,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'count={}'.format(count) in query_string
+ assert 'status={}'.format(status) in query_string
+ assert 'has_notices={}'.format('true' if has_notices else 'false') in query_string
+ assert 'is_parent={}'.format('true' if is_parent else 'false') in query_string
+ assert 'parent_document_id={}'.format(parent_document_id) in query_string
+ assert 'sha256={}'.format(sha256) in query_string
+
+ def test_list_documents_all_params_with_retries(self):
+ # Enable retries and run test_list_documents_all_params.
+ _service.enable_retries()
+ self.test_list_documents_all_params()
+
+ # Disable retries and run test_list_documents_all_params.
+ _service.disable_retries()
+ self.test_list_documents_all_params()
+
+ @responses.activate
+ def test_list_documents_required_params(self):
+ """
+ test_list_documents_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents')
+ mock_response = '{"matching_results": 16, "documents": [{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.list_documents(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_documents_required_params_with_retries(self):
+ # Enable retries and run test_list_documents_required_params.
+ _service.enable_retries()
+ self.test_list_documents_required_params()
+
+ # Disable retries and run test_list_documents_required_params.
+ _service.disable_retries()
+ self.test_list_documents_required_params()
+
+ @responses.activate
+ def test_list_documents_value_error(self):
+ """
+ test_list_documents_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents')
+ mock_response = '{"matching_results": 16, "documents": [{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_documents(**req_copy)
+
+ def test_list_documents_value_error_with_retries(self):
+ # Enable retries and run test_list_documents_value_error.
+ _service.enable_retries()
+ self.test_list_documents_value_error()
+
+ # Disable retries and run test_list_documents_value_error.
+ _service.disable_retries()
+ self.test_list_documents_value_error()
+
+
+class TestAddDocument:
+ """
+ Test Class for add_document
+ """
+
+ @responses.activate
+ def test_add_document_all_params(self):
+ """
+ add_document()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents')
+ mock_response = '{"document_id": "document_id", "status": "processing"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ file = io.BytesIO(b'This is a mock file.').getvalue()
+ filename = 'testString'
+ file_content_type = 'application/json'
+ metadata = 'testString'
+ x_watson_discovery_force = False
+
+ # Invoke method
+ response = _service.add_document(
+ project_id,
+ collection_id,
+ file=file,
+ filename=filename,
+ file_content_type=file_content_type,
+ metadata=metadata,
+ x_watson_discovery_force=x_watson_discovery_force,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+
+ def test_add_document_all_params_with_retries(self):
+ # Enable retries and run test_add_document_all_params.
+ _service.enable_retries()
+ self.test_add_document_all_params()
+
+ # Disable retries and run test_add_document_all_params.
+ _service.disable_retries()
+ self.test_add_document_all_params()
+
+ @responses.activate
+ def test_add_document_required_params(self):
+ """
+ test_add_document_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents')
+ mock_response = '{"document_id": "document_id", "status": "processing"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.add_document(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+
+ def test_add_document_required_params_with_retries(self):
+ # Enable retries and run test_add_document_required_params.
+ _service.enable_retries()
+ self.test_add_document_required_params()
+
+ # Disable retries and run test_add_document_required_params.
+ _service.disable_retries()
+ self.test_add_document_required_params()
+
+ @responses.activate
+ def test_add_document_value_error(self):
+ """
+ test_add_document_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents')
+ mock_response = '{"document_id": "document_id", "status": "processing"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.add_document(**req_copy)
+
+ def test_add_document_value_error_with_retries(self):
+ # Enable retries and run test_add_document_value_error.
+ _service.enable_retries()
+ self.test_add_document_value_error()
+
+ # Disable retries and run test_add_document_value_error.
+ _service.disable_retries()
+ self.test_add_document_value_error()
+
+
+class TestGetDocument:
+ """
+ Test Class for get_document
+ """
+
+ @responses.activate
+ def test_get_document_all_params(self):
+ """
+ get_document()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString')
+ mock_response = '{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ document_id = 'testString'
+
+ # Invoke method
+ response = _service.get_document(
+ project_id,
+ collection_id,
+ document_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_document_all_params_with_retries(self):
+ # Enable retries and run test_get_document_all_params.
+ _service.enable_retries()
+ self.test_get_document_all_params()
+
+ # Disable retries and run test_get_document_all_params.
+ _service.disable_retries()
+ self.test_get_document_all_params()
+
+ @responses.activate
+ def test_get_document_value_error(self):
+ """
+ test_get_document_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString')
+ mock_response = '{"document_id": "document_id", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "status": "available", "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "children": {"have_notices": true, "count": 5}, "filename": "filename", "file_type": "file_type", "sha256": "sha256"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ document_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ "document_id": document_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_document(**req_copy)
+
+ def test_get_document_value_error_with_retries(self):
+ # Enable retries and run test_get_document_value_error.
+ _service.enable_retries()
+ self.test_get_document_value_error()
+
+ # Disable retries and run test_get_document_value_error.
+ _service.disable_retries()
+ self.test_get_document_value_error()
+
+
+class TestUpdateDocument:
+ """
+ Test Class for update_document
+ """
+
+ @responses.activate
+ def test_update_document_all_params(self):
+ """
+ update_document()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString')
+ mock_response = '{"document_id": "document_id", "status": "processing"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ document_id = 'testString'
+ file = io.BytesIO(b'This is a mock file.').getvalue()
+ filename = 'testString'
+ file_content_type = 'application/json'
+ metadata = 'testString'
+ x_watson_discovery_force = False
+
+ # Invoke method
+ response = _service.update_document(
+ project_id,
+ collection_id,
+ document_id,
+ file=file,
+ filename=filename,
+ file_content_type=file_content_type,
+ metadata=metadata,
+ x_watson_discovery_force=x_watson_discovery_force,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+
+ def test_update_document_all_params_with_retries(self):
+ # Enable retries and run test_update_document_all_params.
+ _service.enable_retries()
+ self.test_update_document_all_params()
+
+ # Disable retries and run test_update_document_all_params.
+ _service.disable_retries()
+ self.test_update_document_all_params()
+
+ @responses.activate
+ def test_update_document_required_params(self):
+ """
+ test_update_document_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString')
+ mock_response = '{"document_id": "document_id", "status": "processing"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ document_id = 'testString'
+
+ # Invoke method
+ response = _service.update_document(
+ project_id,
+ collection_id,
+ document_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+
+ def test_update_document_required_params_with_retries(self):
+ # Enable retries and run test_update_document_required_params.
+ _service.enable_retries()
+ self.test_update_document_required_params()
+
+ # Disable retries and run test_update_document_required_params.
+ _service.disable_retries()
+ self.test_update_document_required_params()
+
+ @responses.activate
+ def test_update_document_value_error(self):
+ """
+ test_update_document_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString')
+ mock_response = '{"document_id": "document_id", "status": "processing"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ document_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ "document_id": document_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_document(**req_copy)
+
+ def test_update_document_value_error_with_retries(self):
+ # Enable retries and run test_update_document_value_error.
+ _service.enable_retries()
+ self.test_update_document_value_error()
+
+ # Disable retries and run test_update_document_value_error.
+ _service.disable_retries()
+ self.test_update_document_value_error()
+
+
+class TestDeleteDocument:
+ """
+ Test Class for delete_document
+ """
+
+ @responses.activate
+ def test_delete_document_all_params(self):
+ """
+ delete_document()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString')
+ mock_response = '{"document_id": "document_id", "status": "deleted"}'
+ responses.add(
+ responses.DELETE,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ document_id = 'testString'
+ x_watson_discovery_force = False
+
+ # Invoke method
+ response = _service.delete_document(
+ project_id,
+ collection_id,
+ document_id,
+ x_watson_discovery_force=x_watson_discovery_force,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_document_all_params_with_retries(self):
+ # Enable retries and run test_delete_document_all_params.
+ _service.enable_retries()
+ self.test_delete_document_all_params()
+
+ # Disable retries and run test_delete_document_all_params.
+ _service.disable_retries()
+ self.test_delete_document_all_params()
+
+ @responses.activate
+ def test_delete_document_required_params(self):
+ """
+ test_delete_document_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString')
+ mock_response = '{"document_id": "document_id", "status": "deleted"}'
+ responses.add(
+ responses.DELETE,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ document_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_document(
+ project_id,
+ collection_id,
+ document_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_document_required_params_with_retries(self):
+ # Enable retries and run test_delete_document_required_params.
+ _service.enable_retries()
+ self.test_delete_document_required_params()
+
+ # Disable retries and run test_delete_document_required_params.
+ _service.disable_retries()
+ self.test_delete_document_required_params()
+
+ @responses.activate
+ def test_delete_document_value_error(self):
+ """
+ test_delete_document_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/documents/testString')
+ mock_response = '{"document_id": "document_id", "status": "deleted"}'
+ responses.add(
+ responses.DELETE,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ document_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ "document_id": document_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_document(**req_copy)
+
+ def test_delete_document_value_error_with_retries(self):
+ # Enable retries and run test_delete_document_value_error.
+ _service.enable_retries()
+ self.test_delete_document_value_error()
+
+ # Disable retries and run test_delete_document_value_error.
+ _service.disable_retries()
+ self.test_delete_document_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Documents
+##############################################################################
+
+##############################################################################
+# Start of Service: Queries
+##############################################################################
+# region
+
+
+class TestQuery:
+ """
+ Test Class for query
+ """
+
+ @responses.activate
+ def test_query_all_params(self):
+ """
+ query()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/query')
+ mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"anyKey": "anyValue"}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 0}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "term", "field": "field", "count": 5, "name": "name", "results": [{"key": "key", "matching_results": 16, "relevancy": 9, "total_matching_documents": 24, "estimated_matching_results": 26, "aggregations": [{"anyKey": "anyValue"}]}]}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": ["row_header_ids"], "row_header_texts": ["row_header_texts"], "row_header_texts_normalized": ["row_header_texts_normalized"], "column_header_ids": ["column_header_ids"], "column_header_texts": ["column_header_texts"], "column_header_texts_normalized": ["column_header_texts_normalized"], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a QueryLargeTableResults model
+ query_large_table_results_model = {}
+ query_large_table_results_model['enabled'] = True
+ query_large_table_results_model['count'] = 38
+
+ # Construct a dict representation of a QueryLargeSuggestedRefinements model
+ query_large_suggested_refinements_model = {}
+ query_large_suggested_refinements_model['enabled'] = True
+ query_large_suggested_refinements_model['count'] = 1
+
+ # Construct a dict representation of a QueryLargePassages model
+ query_large_passages_model = {}
+ query_large_passages_model['enabled'] = True
+ query_large_passages_model['per_document'] = True
+ query_large_passages_model['max_per_document'] = 38
+ query_large_passages_model['fields'] = ['testString']
+ query_large_passages_model['count'] = 400
+ query_large_passages_model['characters'] = 50
+ query_large_passages_model['find_answers'] = False
+ query_large_passages_model['max_answers_per_passage'] = 1
+
+ # Construct a dict representation of a QueryLargeSimilar model
+ query_large_similar_model = {}
+ query_large_similar_model['enabled'] = False
+ query_large_similar_model['document_ids'] = ['testString']
+ query_large_similar_model['fields'] = ['testString']
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_ids = ['testString']
+ filter = 'testString'
+ query = 'testString'
+ natural_language_query = 'testString'
+ aggregation = 'testString'
+ count = 38
+ return_ = ['testString']
+ offset = 38
+ sort = 'testString'
+ highlight = True
+ spelling_suggestions = True
+ table_results = query_large_table_results_model
+ suggested_refinements = query_large_suggested_refinements_model
+ passages = query_large_passages_model
+ similar = query_large_similar_model
+
+ # Invoke method
+ response = _service.query(
+ project_id,
+ collection_ids=collection_ids,
+ filter=filter,
+ query=query,
+ natural_language_query=natural_language_query,
+ aggregation=aggregation,
+ count=count,
+ return_=return_,
+ offset=offset,
+ sort=sort,
+ highlight=highlight,
+ spelling_suggestions=spelling_suggestions,
+ table_results=table_results,
+ suggested_refinements=suggested_refinements,
+ passages=passages,
+ similar=similar,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['collection_ids'] == ['testString']
+ assert req_body['filter'] == 'testString'
+ assert req_body['query'] == 'testString'
+ assert req_body['natural_language_query'] == 'testString'
+ assert req_body['aggregation'] == 'testString'
+ assert req_body['count'] == 38
+ assert req_body['return'] == ['testString']
+ assert req_body['offset'] == 38
+ assert req_body['sort'] == 'testString'
+ assert req_body['highlight'] == True
+ assert req_body['spelling_suggestions'] == True
+ assert req_body['table_results'] == query_large_table_results_model
+ assert req_body['suggested_refinements'] == query_large_suggested_refinements_model
+ assert req_body['passages'] == query_large_passages_model
+ assert req_body['similar'] == query_large_similar_model
+
+ def test_query_all_params_with_retries(self):
+ # Enable retries and run test_query_all_params.
+ _service.enable_retries()
+ self.test_query_all_params()
+
+ # Disable retries and run test_query_all_params.
+ _service.disable_retries()
+ self.test_query_all_params()
+
+ @responses.activate
+ def test_query_required_params(self):
+ """
+ test_query_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/query')
+ mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"anyKey": "anyValue"}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 0}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "term", "field": "field", "count": 5, "name": "name", "results": [{"key": "key", "matching_results": 16, "relevancy": 9, "total_matching_documents": 24, "estimated_matching_results": 26, "aggregations": [{"anyKey": "anyValue"}]}]}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": ["row_header_ids"], "row_header_texts": ["row_header_texts"], "row_header_texts_normalized": ["row_header_texts_normalized"], "column_header_ids": ["column_header_ids"], "column_header_texts": ["column_header_texts"], "column_header_texts_normalized": ["column_header_texts_normalized"], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.query(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_query_required_params_with_retries(self):
+ # Enable retries and run test_query_required_params.
+ _service.enable_retries()
+ self.test_query_required_params()
+
+ # Disable retries and run test_query_required_params.
+ _service.disable_retries()
+ self.test_query_required_params()
+
+ @responses.activate
+ def test_query_value_error(self):
+ """
+ test_query_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/query')
+ mock_response = '{"matching_results": 16, "results": [{"document_id": "document_id", "metadata": {"anyKey": "anyValue"}, "result_metadata": {"document_retrieval_source": "search", "collection_id": "collection_id", "confidence": 0}, "document_passages": [{"passage_text": "passage_text", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}], "aggregations": [{"type": "term", "field": "field", "count": 5, "name": "name", "results": [{"key": "key", "matching_results": 16, "relevancy": 9, "total_matching_documents": 24, "estimated_matching_results": 26, "aggregations": [{"anyKey": "anyValue"}]}]}], "retrieval_details": {"document_retrieval_strategy": "untrained"}, "suggested_query": "suggested_query", "suggested_refinements": [{"text": "text"}], "table_results": [{"table_id": "table_id", "source_document_id": "source_document_id", "collection_id": "collection_id", "table_html": "table_html", "table_html_offset": 17, "table": {"location": {"begin": 5, "end": 3}, "text": "text", "section_title": {"text": "text", "location": {"begin": 5, "end": 3}}, "title": {"text": "text", "location": {"begin": 5, "end": 3}}, "table_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "row_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "column_headers": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "text_normalized": "text_normalized", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16}], "key_value_pairs": [{"key": {"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}, "value": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text"}]}], "body_cells": [{"cell_id": "cell_id", "location": {"begin": 5, "end": 3}, "text": "text", "row_index_begin": 15, "row_index_end": 13, "column_index_begin": 18, "column_index_end": 16, "row_header_ids": ["row_header_ids"], "row_header_texts": ["row_header_texts"], "row_header_texts_normalized": ["row_header_texts_normalized"], "column_header_ids": ["column_header_ids"], "column_header_texts": ["column_header_texts"], "column_header_texts_normalized": ["column_header_texts_normalized"], "attributes": [{"type": "type", "text": "text", "location": {"begin": 5, "end": 3}}]}], "contexts": [{"text": "text", "location": {"begin": 5, "end": 3}}]}}], "passages": [{"passage_text": "passage_text", "passage_score": 13, "document_id": "document_id", "collection_id": "collection_id", "start_offset": 12, "end_offset": 10, "field": "field", "answers": [{"answer_text": "answer_text", "start_offset": 12, "end_offset": 10, "confidence": 0}]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.query(**req_copy)
+
+ def test_query_value_error_with_retries(self):
+ # Enable retries and run test_query_value_error.
+ _service.enable_retries()
+ self.test_query_value_error()
+
+ # Disable retries and run test_query_value_error.
+ _service.disable_retries()
+ self.test_query_value_error()
+
+
+class TestGetAutocompletion:
+ """
+ Test Class for get_autocompletion
+ """
+
+ @responses.activate
+ def test_get_autocompletion_all_params(self):
+ """
+ get_autocompletion()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/autocompletion')
+ mock_response = '{"completions": ["completions"]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ prefix = 'testString'
+ collection_ids = ['testString']
+ field = 'testString'
+ count = 5
+
+ # Invoke method
+ response = _service.get_autocompletion(
+ project_id,
+ prefix,
+ collection_ids=collection_ids,
+ field=field,
+ count=count,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'prefix={}'.format(prefix) in query_string
+ assert 'collection_ids={}'.format(','.join(collection_ids)) in query_string
+ assert 'field={}'.format(field) in query_string
+ assert 'count={}'.format(count) in query_string
+
+ def test_get_autocompletion_all_params_with_retries(self):
+ # Enable retries and run test_get_autocompletion_all_params.
+ _service.enable_retries()
+ self.test_get_autocompletion_all_params()
+
+ # Disable retries and run test_get_autocompletion_all_params.
+ _service.disable_retries()
+ self.test_get_autocompletion_all_params()
+
+ @responses.activate
+ def test_get_autocompletion_required_params(self):
+ """
+ test_get_autocompletion_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/autocompletion')
+ mock_response = '{"completions": ["completions"]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ prefix = 'testString'
+
+ # Invoke method
+ response = _service.get_autocompletion(
+ project_id,
+ prefix,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'prefix={}'.format(prefix) in query_string
+
+ def test_get_autocompletion_required_params_with_retries(self):
+ # Enable retries and run test_get_autocompletion_required_params.
+ _service.enable_retries()
+ self.test_get_autocompletion_required_params()
+
+ # Disable retries and run test_get_autocompletion_required_params.
+ _service.disable_retries()
+ self.test_get_autocompletion_required_params()
+
+ @responses.activate
+ def test_get_autocompletion_value_error(self):
+ """
+ test_get_autocompletion_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/autocompletion')
+ mock_response = '{"completions": ["completions"]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ prefix = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "prefix": prefix,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_autocompletion(**req_copy)
+
+ def test_get_autocompletion_value_error_with_retries(self):
+ # Enable retries and run test_get_autocompletion_value_error.
+ _service.enable_retries()
+ self.test_get_autocompletion_value_error()
+
+ # Disable retries and run test_get_autocompletion_value_error.
+ _service.disable_retries()
+ self.test_get_autocompletion_value_error()
+
+
+class TestQueryCollectionNotices:
+ """
+ Test Class for query_collection_notices
+ """
+
+ @responses.activate
+ def test_query_collection_notices_all_params(self):
+ """
+ query_collection_notices()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/notices')
+ mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ filter = 'testString'
+ query = 'testString'
+ natural_language_query = 'testString'
+ count = 10
+ offset = 38
+
+ # Invoke method
+ response = _service.query_collection_notices(
+ project_id,
+ collection_id,
+ filter=filter,
+ query=query,
+ natural_language_query=natural_language_query,
+ count=count,
+ offset=offset,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'filter={}'.format(filter) in query_string
+ assert 'query={}'.format(query) in query_string
+ assert 'natural_language_query={}'.format(natural_language_query) in query_string
+ assert 'count={}'.format(count) in query_string
+ assert 'offset={}'.format(offset) in query_string
+
+ def test_query_collection_notices_all_params_with_retries(self):
+ # Enable retries and run test_query_collection_notices_all_params.
+ _service.enable_retries()
+ self.test_query_collection_notices_all_params()
+
+ # Disable retries and run test_query_collection_notices_all_params.
+ _service.disable_retries()
+ self.test_query_collection_notices_all_params()
+
+ @responses.activate
+ def test_query_collection_notices_required_params(self):
+ """
+ test_query_collection_notices_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/notices')
+ mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.query_collection_notices(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_query_collection_notices_required_params_with_retries(self):
+ # Enable retries and run test_query_collection_notices_required_params.
+ _service.enable_retries()
+ self.test_query_collection_notices_required_params()
+
+ # Disable retries and run test_query_collection_notices_required_params.
+ _service.disable_retries()
+ self.test_query_collection_notices_required_params()
+
+ @responses.activate
+ def test_query_collection_notices_value_error(self):
+ """
+ test_query_collection_notices_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/notices')
+ mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.query_collection_notices(**req_copy)
+
+ def test_query_collection_notices_value_error_with_retries(self):
+ # Enable retries and run test_query_collection_notices_value_error.
+ _service.enable_retries()
+ self.test_query_collection_notices_value_error()
+
+ # Disable retries and run test_query_collection_notices_value_error.
+ _service.disable_retries()
+ self.test_query_collection_notices_value_error()
+
+
+class TestQueryNotices:
+ """
+ Test Class for query_notices
+ """
+
+ @responses.activate
+ def test_query_notices_all_params(self):
+ """
+ query_notices()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/notices')
+ mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ filter = 'testString'
+ query = 'testString'
+ natural_language_query = 'testString'
+ count = 10
+ offset = 38
+
+ # Invoke method
+ response = _service.query_notices(
+ project_id,
+ filter=filter,
+ query=query,
+ natural_language_query=natural_language_query,
+ count=count,
+ offset=offset,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'filter={}'.format(filter) in query_string
+ assert 'query={}'.format(query) in query_string
+ assert 'natural_language_query={}'.format(natural_language_query) in query_string
+ assert 'count={}'.format(count) in query_string
+ assert 'offset={}'.format(offset) in query_string
+
+ def test_query_notices_all_params_with_retries(self):
+ # Enable retries and run test_query_notices_all_params.
+ _service.enable_retries()
+ self.test_query_notices_all_params()
+
+ # Disable retries and run test_query_notices_all_params.
+ _service.disable_retries()
+ self.test_query_notices_all_params()
+
+ @responses.activate
+ def test_query_notices_required_params(self):
+ """
+ test_query_notices_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/notices')
+ mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.query_notices(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_query_notices_required_params_with_retries(self):
+ # Enable retries and run test_query_notices_required_params.
+ _service.enable_retries()
+ self.test_query_notices_required_params()
+
+ # Disable retries and run test_query_notices_required_params.
+ _service.disable_retries()
+ self.test_query_notices_required_params()
+
+ @responses.activate
+ def test_query_notices_value_error(self):
+ """
+ test_query_notices_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/notices')
+ mock_response = '{"matching_results": 16, "notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.query_notices(**req_copy)
+
+ def test_query_notices_value_error_with_retries(self):
+ # Enable retries and run test_query_notices_value_error.
+ _service.enable_retries()
+ self.test_query_notices_value_error()
+
+ # Disable retries and run test_query_notices_value_error.
+ _service.disable_retries()
+ self.test_query_notices_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Queries
+##############################################################################
+
+##############################################################################
+# Start of Service: QueryModifications
+##############################################################################
+# region
+
+
+class TestGetStopwordList:
+ """
+ Test Class for get_stopword_list
+ """
+
+ @responses.activate
+ def test_get_stopword_list_all_params(self):
+ """
+ get_stopword_list()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/stopwords')
+ mock_response = '{"stopwords": ["stopwords"]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.get_stopword_list(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_stopword_list_all_params_with_retries(self):
+ # Enable retries and run test_get_stopword_list_all_params.
+ _service.enable_retries()
+ self.test_get_stopword_list_all_params()
+
+ # Disable retries and run test_get_stopword_list_all_params.
+ _service.disable_retries()
+ self.test_get_stopword_list_all_params()
+
+ @responses.activate
+ def test_get_stopword_list_value_error(self):
+ """
+ test_get_stopword_list_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/stopwords')
+ mock_response = '{"stopwords": ["stopwords"]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_stopword_list(**req_copy)
+
+ def test_get_stopword_list_value_error_with_retries(self):
+ # Enable retries and run test_get_stopword_list_value_error.
+ _service.enable_retries()
+ self.test_get_stopword_list_value_error()
+
+ # Disable retries and run test_get_stopword_list_value_error.
+ _service.disable_retries()
+ self.test_get_stopword_list_value_error()
+
+
+class TestCreateStopwordList:
+ """
+ Test Class for create_stopword_list
+ """
+
+ @responses.activate
+ def test_create_stopword_list_all_params(self):
+ """
+ create_stopword_list()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/stopwords')
+ mock_response = '{"stopwords": ["stopwords"]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ stopwords = ['testString']
+
+ # Invoke method
+ response = _service.create_stopword_list(
+ project_id,
+ collection_id,
+ stopwords=stopwords,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['stopwords'] == ['testString']
+
+ def test_create_stopword_list_all_params_with_retries(self):
+ # Enable retries and run test_create_stopword_list_all_params.
+ _service.enable_retries()
+ self.test_create_stopword_list_all_params()
+
+ # Disable retries and run test_create_stopword_list_all_params.
+ _service.disable_retries()
+ self.test_create_stopword_list_all_params()
+
+ @responses.activate
+ def test_create_stopword_list_required_params(self):
+ """
+ test_create_stopword_list_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/stopwords')
+ mock_response = '{"stopwords": ["stopwords"]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.create_stopword_list(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_create_stopword_list_required_params_with_retries(self):
+ # Enable retries and run test_create_stopword_list_required_params.
+ _service.enable_retries()
+ self.test_create_stopword_list_required_params()
+
+ # Disable retries and run test_create_stopword_list_required_params.
+ _service.disable_retries()
+ self.test_create_stopword_list_required_params()
+
+ @responses.activate
+ def test_create_stopword_list_value_error(self):
+ """
+ test_create_stopword_list_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/stopwords')
+ mock_response = '{"stopwords": ["stopwords"]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_stopword_list(**req_copy)
+
+ def test_create_stopword_list_value_error_with_retries(self):
+ # Enable retries and run test_create_stopword_list_value_error.
+ _service.enable_retries()
+ self.test_create_stopword_list_value_error()
+
+ # Disable retries and run test_create_stopword_list_value_error.
+ _service.disable_retries()
+ self.test_create_stopword_list_value_error()
+
+
+class TestDeleteStopwordList:
+ """
+ Test Class for delete_stopword_list
+ """
+
+ @responses.activate
+ def test_delete_stopword_list_all_params(self):
+ """
+ delete_stopword_list()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/stopwords')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_stopword_list(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_stopword_list_all_params_with_retries(self):
+ # Enable retries and run test_delete_stopword_list_all_params.
+ _service.enable_retries()
+ self.test_delete_stopword_list_all_params()
+
+ # Disable retries and run test_delete_stopword_list_all_params.
+ _service.disable_retries()
+ self.test_delete_stopword_list_all_params()
+
+ @responses.activate
+ def test_delete_stopword_list_value_error(self):
+ """
+ test_delete_stopword_list_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/stopwords')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_stopword_list(**req_copy)
+
+ def test_delete_stopword_list_value_error_with_retries(self):
+ # Enable retries and run test_delete_stopword_list_value_error.
+ _service.enable_retries()
+ self.test_delete_stopword_list_value_error()
+
+ # Disable retries and run test_delete_stopword_list_value_error.
+ _service.disable_retries()
+ self.test_delete_stopword_list_value_error()
+
+
+class TestListExpansions:
+ """
+ Test Class for list_expansions
+ """
+
+ @responses.activate
+ def test_list_expansions_all_params(self):
+ """
+ list_expansions()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/expansions')
+ mock_response = '{"expansions": [{"input_terms": ["input_terms"], "expanded_terms": ["expanded_terms"]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.list_expansions(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_expansions_all_params_with_retries(self):
+ # Enable retries and run test_list_expansions_all_params.
+ _service.enable_retries()
+ self.test_list_expansions_all_params()
+
+ # Disable retries and run test_list_expansions_all_params.
+ _service.disable_retries()
+ self.test_list_expansions_all_params()
+
+ @responses.activate
+ def test_list_expansions_value_error(self):
+ """
+ test_list_expansions_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/expansions')
+ mock_response = '{"expansions": [{"input_terms": ["input_terms"], "expanded_terms": ["expanded_terms"]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_expansions(**req_copy)
+
+ def test_list_expansions_value_error_with_retries(self):
+ # Enable retries and run test_list_expansions_value_error.
+ _service.enable_retries()
+ self.test_list_expansions_value_error()
+
+ # Disable retries and run test_list_expansions_value_error.
+ _service.disable_retries()
+ self.test_list_expansions_value_error()
+
+
+class TestCreateExpansions:
+ """
+ Test Class for create_expansions
+ """
+
+ @responses.activate
+ def test_create_expansions_all_params(self):
+ """
+ create_expansions()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/expansions')
+ mock_response = '{"expansions": [{"input_terms": ["input_terms"], "expanded_terms": ["expanded_terms"]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a Expansion model
+ expansion_model = {}
+ expansion_model['input_terms'] = ['testString']
+ expansion_model['expanded_terms'] = ['testString']
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ expansions = [expansion_model]
+
+ # Invoke method
+ response = _service.create_expansions(
+ project_id,
+ collection_id,
+ expansions,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['expansions'] == [expansion_model]
+
+ def test_create_expansions_all_params_with_retries(self):
+ # Enable retries and run test_create_expansions_all_params.
+ _service.enable_retries()
+ self.test_create_expansions_all_params()
+
+ # Disable retries and run test_create_expansions_all_params.
+ _service.disable_retries()
+ self.test_create_expansions_all_params()
+
+ @responses.activate
+ def test_create_expansions_value_error(self):
+ """
+ test_create_expansions_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/expansions')
+ mock_response = '{"expansions": [{"input_terms": ["input_terms"], "expanded_terms": ["expanded_terms"]}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a Expansion model
+ expansion_model = {}
+ expansion_model['input_terms'] = ['testString']
+ expansion_model['expanded_terms'] = ['testString']
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ expansions = [expansion_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ "expansions": expansions,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_expansions(**req_copy)
+
+ def test_create_expansions_value_error_with_retries(self):
+ # Enable retries and run test_create_expansions_value_error.
+ _service.enable_retries()
+ self.test_create_expansions_value_error()
+
+ # Disable retries and run test_create_expansions_value_error.
+ _service.disable_retries()
+ self.test_create_expansions_value_error()
+
+
+class TestDeleteExpansions:
+ """
+ Test Class for delete_expansions
+ """
+
+ @responses.activate
+ def test_delete_expansions_all_params(self):
+ """
+ delete_expansions()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/expansions')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_expansions(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_expansions_all_params_with_retries(self):
+ # Enable retries and run test_delete_expansions_all_params.
+ _service.enable_retries()
+ self.test_delete_expansions_all_params()
+
+ # Disable retries and run test_delete_expansions_all_params.
+ _service.disable_retries()
+ self.test_delete_expansions_all_params()
+
+ @responses.activate
+ def test_delete_expansions_value_error(self):
+ """
+ test_delete_expansions_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/expansions')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_expansions(**req_copy)
+
+ def test_delete_expansions_value_error_with_retries(self):
+ # Enable retries and run test_delete_expansions_value_error.
+ _service.enable_retries()
+ self.test_delete_expansions_value_error()
+
+ # Disable retries and run test_delete_expansions_value_error.
+ _service.disable_retries()
+ self.test_delete_expansions_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: QueryModifications
+##############################################################################
+
+##############################################################################
+# Start of Service: ComponentSettings
+##############################################################################
+# region
+
+
+class TestGetComponentSettings:
+ """
+ Test Class for get_component_settings
+ """
+
+ @responses.activate
+ def test_get_component_settings_all_params(self):
+ """
+ get_component_settings()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/component_settings')
+ mock_response = '{"fields_shown": {"body": {"use_passage": false, "field": "field"}, "title": {"field": "field"}}, "autocomplete": true, "structured_search": false, "results_per_page": 16, "aggregations": [{"name": "name", "label": "label", "multiple_selections_allowed": false, "visualization_type": "auto"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.get_component_settings(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_component_settings_all_params_with_retries(self):
+ # Enable retries and run test_get_component_settings_all_params.
+ _service.enable_retries()
+ self.test_get_component_settings_all_params()
+
+ # Disable retries and run test_get_component_settings_all_params.
+ _service.disable_retries()
+ self.test_get_component_settings_all_params()
+
+ @responses.activate
+ def test_get_component_settings_value_error(self):
+ """
+ test_get_component_settings_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/component_settings')
+ mock_response = '{"fields_shown": {"body": {"use_passage": false, "field": "field"}, "title": {"field": "field"}}, "autocomplete": true, "structured_search": false, "results_per_page": 16, "aggregations": [{"name": "name", "label": "label", "multiple_selections_allowed": false, "visualization_type": "auto"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_component_settings(**req_copy)
+
+ def test_get_component_settings_value_error_with_retries(self):
+ # Enable retries and run test_get_component_settings_value_error.
+ _service.enable_retries()
+ self.test_get_component_settings_value_error()
+
+ # Disable retries and run test_get_component_settings_value_error.
+ _service.disable_retries()
+ self.test_get_component_settings_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: ComponentSettings
+##############################################################################
+
+##############################################################################
+# Start of Service: TrainingData
+##############################################################################
+# region
+
+
+class TestListTrainingQueries:
+ """
+ Test Class for list_training_queries
+ """
+
+ @responses.activate
+ def test_list_training_queries_all_params(self):
+ """
+ list_training_queries()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries')
+ mock_response = '{"queries": [{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.list_training_queries(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_training_queries_all_params_with_retries(self):
+ # Enable retries and run test_list_training_queries_all_params.
+ _service.enable_retries()
+ self.test_list_training_queries_all_params()
+
+ # Disable retries and run test_list_training_queries_all_params.
+ _service.disable_retries()
+ self.test_list_training_queries_all_params()
+
+ @responses.activate
+ def test_list_training_queries_value_error(self):
+ """
+ test_list_training_queries_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries')
+ mock_response = '{"queries": [{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_training_queries(**req_copy)
+
+ def test_list_training_queries_value_error_with_retries(self):
+ # Enable retries and run test_list_training_queries_value_error.
+ _service.enable_retries()
+ self.test_list_training_queries_value_error()
+
+ # Disable retries and run test_list_training_queries_value_error.
+ _service.disable_retries()
+ self.test_list_training_queries_value_error()
+
+
+class TestDeleteTrainingQueries:
+ """
+ Test Class for delete_training_queries
+ """
+
+ @responses.activate
+ def test_delete_training_queries_all_params(self):
+ """
+ delete_training_queries()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_training_queries(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_training_queries_all_params_with_retries(self):
+ # Enable retries and run test_delete_training_queries_all_params.
+ _service.enable_retries()
+ self.test_delete_training_queries_all_params()
+
+ # Disable retries and run test_delete_training_queries_all_params.
+ _service.disable_retries()
+ self.test_delete_training_queries_all_params()
+
+ @responses.activate
+ def test_delete_training_queries_value_error(self):
+ """
+ test_delete_training_queries_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_training_queries(**req_copy)
+
+ def test_delete_training_queries_value_error_with_retries(self):
+ # Enable retries and run test_delete_training_queries_value_error.
+ _service.enable_retries()
+ self.test_delete_training_queries_value_error()
+
+ # Disable retries and run test_delete_training_queries_value_error.
+ _service.disable_retries()
+ self.test_delete_training_queries_value_error()
+
+
+class TestCreateTrainingQuery:
+ """
+ Test Class for create_training_query
+ """
+
+ @responses.activate
+ def test_create_training_query_all_params(self):
+ """
+ create_training_query()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries')
+ mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a TrainingExample model
+ training_example_model = {}
+ training_example_model['document_id'] = 'testString'
+ training_example_model['collection_id'] = 'testString'
+ training_example_model['relevance'] = 38
+
+ # Set up parameter values
+ project_id = 'testString'
+ natural_language_query = 'testString'
+ examples = [training_example_model]
+ filter = 'testString'
+
+ # Invoke method
+ response = _service.create_training_query(
+ project_id,
+ natural_language_query,
+ examples,
+ filter=filter,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['natural_language_query'] == 'testString'
+ assert req_body['examples'] == [training_example_model]
+ assert req_body['filter'] == 'testString'
+
+ def test_create_training_query_all_params_with_retries(self):
+ # Enable retries and run test_create_training_query_all_params.
+ _service.enable_retries()
+ self.test_create_training_query_all_params()
+
+ # Disable retries and run test_create_training_query_all_params.
+ _service.disable_retries()
+ self.test_create_training_query_all_params()
+
+ @responses.activate
+ def test_create_training_query_value_error(self):
+ """
+ test_create_training_query_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries')
+ mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a TrainingExample model
+ training_example_model = {}
+ training_example_model['document_id'] = 'testString'
+ training_example_model['collection_id'] = 'testString'
+ training_example_model['relevance'] = 38
+
+ # Set up parameter values
+ project_id = 'testString'
+ natural_language_query = 'testString'
+ examples = [training_example_model]
+ filter = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "natural_language_query": natural_language_query,
+ "examples": examples,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_training_query(**req_copy)
+
+ def test_create_training_query_value_error_with_retries(self):
+ # Enable retries and run test_create_training_query_value_error.
+ _service.enable_retries()
+ self.test_create_training_query_value_error()
+
+ # Disable retries and run test_create_training_query_value_error.
+ _service.disable_retries()
+ self.test_create_training_query_value_error()
+
+
+class TestGetTrainingQuery:
+ """
+ Test Class for get_training_query
+ """
+
+ @responses.activate
+ def test_get_training_query_all_params(self):
+ """
+ get_training_query()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries/testString')
+ mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ query_id = 'testString'
+
+ # Invoke method
+ response = _service.get_training_query(
+ project_id,
+ query_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_training_query_all_params_with_retries(self):
+ # Enable retries and run test_get_training_query_all_params.
+ _service.enable_retries()
+ self.test_get_training_query_all_params()
+
+ # Disable retries and run test_get_training_query_all_params.
+ _service.disable_retries()
+ self.test_get_training_query_all_params()
+
+ @responses.activate
+ def test_get_training_query_value_error(self):
+ """
+ test_get_training_query_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries/testString')
+ mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ query_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "query_id": query_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_training_query(**req_copy)
+
+ def test_get_training_query_value_error_with_retries(self):
+ # Enable retries and run test_get_training_query_value_error.
+ _service.enable_retries()
+ self.test_get_training_query_value_error()
+
+ # Disable retries and run test_get_training_query_value_error.
+ _service.disable_retries()
+ self.test_get_training_query_value_error()
+
+
+class TestUpdateTrainingQuery:
+ """
+ Test Class for update_training_query
+ """
+
+ @responses.activate
+ def test_update_training_query_all_params(self):
+ """
+ update_training_query()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries/testString')
+ mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a TrainingExample model
+ training_example_model = {}
+ training_example_model['document_id'] = 'testString'
+ training_example_model['collection_id'] = 'testString'
+ training_example_model['relevance'] = 38
+
+ # Set up parameter values
+ project_id = 'testString'
+ query_id = 'testString'
+ natural_language_query = 'testString'
+ examples = [training_example_model]
+ filter = 'testString'
+
+ # Invoke method
+ response = _service.update_training_query(
+ project_id,
+ query_id,
+ natural_language_query,
+ examples,
+ filter=filter,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['natural_language_query'] == 'testString'
+ assert req_body['examples'] == [training_example_model]
+ assert req_body['filter'] == 'testString'
+
+ def test_update_training_query_all_params_with_retries(self):
+ # Enable retries and run test_update_training_query_all_params.
+ _service.enable_retries()
+ self.test_update_training_query_all_params()
+
+ # Disable retries and run test_update_training_query_all_params.
+ _service.disable_retries()
+ self.test_update_training_query_all_params()
+
+ @responses.activate
+ def test_update_training_query_value_error(self):
+ """
+ test_update_training_query_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries/testString')
+ mock_response = '{"query_id": "query_id", "natural_language_query": "natural_language_query", "filter": "filter", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "examples": [{"document_id": "document_id", "collection_id": "collection_id", "relevance": 9, "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a TrainingExample model
+ training_example_model = {}
+ training_example_model['document_id'] = 'testString'
+ training_example_model['collection_id'] = 'testString'
+ training_example_model['relevance'] = 38
+
+ # Set up parameter values
+ project_id = 'testString'
+ query_id = 'testString'
+ natural_language_query = 'testString'
+ examples = [training_example_model]
+ filter = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "query_id": query_id,
+ "natural_language_query": natural_language_query,
+ "examples": examples,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_training_query(**req_copy)
+
+ def test_update_training_query_value_error_with_retries(self):
+ # Enable retries and run test_update_training_query_value_error.
+ _service.enable_retries()
+ self.test_update_training_query_value_error()
+
+ # Disable retries and run test_update_training_query_value_error.
+ _service.disable_retries()
+ self.test_update_training_query_value_error()
+
+
+class TestDeleteTrainingQuery:
+ """
+ Test Class for delete_training_query
+ """
+
+ @responses.activate
+ def test_delete_training_query_all_params(self):
+ """
+ delete_training_query()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ query_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_training_query(
+ project_id,
+ query_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_training_query_all_params_with_retries(self):
+ # Enable retries and run test_delete_training_query_all_params.
+ _service.enable_retries()
+ self.test_delete_training_query_all_params()
+
+ # Disable retries and run test_delete_training_query_all_params.
+ _service.disable_retries()
+ self.test_delete_training_query_all_params()
+
+ @responses.activate
+ def test_delete_training_query_value_error(self):
+ """
+ test_delete_training_query_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/training_data/queries/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ query_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "query_id": query_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_training_query(**req_copy)
+
+ def test_delete_training_query_value_error_with_retries(self):
+ # Enable retries and run test_delete_training_query_value_error.
+ _service.enable_retries()
+ self.test_delete_training_query_value_error()
+
+ # Disable retries and run test_delete_training_query_value_error.
+ _service.disable_retries()
+ self.test_delete_training_query_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: TrainingData
+##############################################################################
+
+##############################################################################
+# Start of Service: Enrichments
+##############################################################################
+# region
+
+
+class TestListEnrichments:
+ """
+ Test Class for list_enrichments
+ """
+
+ @responses.activate
+ def test_list_enrichments_all_params(self):
+ """
+ list_enrichments()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments')
+ mock_response = '{"enrichments": [{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.list_enrichments(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_enrichments_all_params_with_retries(self):
+ # Enable retries and run test_list_enrichments_all_params.
+ _service.enable_retries()
+ self.test_list_enrichments_all_params()
+
+ # Disable retries and run test_list_enrichments_all_params.
+ _service.disable_retries()
+ self.test_list_enrichments_all_params()
+
+ @responses.activate
+ def test_list_enrichments_value_error(self):
+ """
+ test_list_enrichments_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments')
+ mock_response = '{"enrichments": [{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_enrichments(**req_copy)
+
+ def test_list_enrichments_value_error_with_retries(self):
+ # Enable retries and run test_list_enrichments_value_error.
+ _service.enable_retries()
+ self.test_list_enrichments_value_error()
+
+ # Disable retries and run test_list_enrichments_value_error.
+ _service.disable_retries()
+ self.test_list_enrichments_value_error()
+
+
+class TestCreateEnrichment:
+ """
+ Test Class for create_enrichment
+ """
+
+ @responses.activate
+ def test_create_enrichment_all_params(self):
+ """
+ create_enrichment()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments')
+ mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a WebhookHeader model
+ webhook_header_model = {}
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ # Construct a dict representation of a EnrichmentOptions model
+ enrichment_options_model = {}
+ enrichment_options_model['languages'] = ['testString']
+ enrichment_options_model['entity_type'] = 'testString'
+ enrichment_options_model['regular_expression'] = 'testString'
+ enrichment_options_model['result_field'] = 'testString'
+ enrichment_options_model['classifier_id'] = 'testString'
+ enrichment_options_model['model_id'] = 'testString'
+ enrichment_options_model['confidence_threshold'] = 0
+ enrichment_options_model['top_k'] = 0
+ enrichment_options_model['url'] = 'testString'
+ enrichment_options_model['version'] = '2023-03-31'
+ enrichment_options_model['secret'] = 'testString'
+ enrichment_options_model['headers'] = webhook_header_model
+ enrichment_options_model['location_encoding'] = '`utf-16`'
+
+ # Construct a dict representation of a CreateEnrichment model
+ create_enrichment_model = {}
+ create_enrichment_model['name'] = 'testString'
+ create_enrichment_model['description'] = 'testString'
+ create_enrichment_model['type'] = 'classifier'
+ create_enrichment_model['options'] = enrichment_options_model
+
+ # Set up parameter values
+ project_id = 'testString'
+ enrichment = create_enrichment_model
+ file = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.create_enrichment(
+ project_id,
+ enrichment,
+ file=file,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_enrichment_all_params_with_retries(self):
+ # Enable retries and run test_create_enrichment_all_params.
+ _service.enable_retries()
+ self.test_create_enrichment_all_params()
+
+ # Disable retries and run test_create_enrichment_all_params.
+ _service.disable_retries()
+ self.test_create_enrichment_all_params()
+
+ @responses.activate
+ def test_create_enrichment_required_params(self):
+ """
+ test_create_enrichment_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments')
+ mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a WebhookHeader model
+ webhook_header_model = {}
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ # Construct a dict representation of a EnrichmentOptions model
+ enrichment_options_model = {}
+ enrichment_options_model['languages'] = ['testString']
+ enrichment_options_model['entity_type'] = 'testString'
+ enrichment_options_model['regular_expression'] = 'testString'
+ enrichment_options_model['result_field'] = 'testString'
+ enrichment_options_model['classifier_id'] = 'testString'
+ enrichment_options_model['model_id'] = 'testString'
+ enrichment_options_model['confidence_threshold'] = 0
+ enrichment_options_model['top_k'] = 0
+ enrichment_options_model['url'] = 'testString'
+ enrichment_options_model['version'] = '2023-03-31'
+ enrichment_options_model['secret'] = 'testString'
+ enrichment_options_model['headers'] = webhook_header_model
+ enrichment_options_model['location_encoding'] = '`utf-16`'
+
+ # Construct a dict representation of a CreateEnrichment model
+ create_enrichment_model = {}
+ create_enrichment_model['name'] = 'testString'
+ create_enrichment_model['description'] = 'testString'
+ create_enrichment_model['type'] = 'classifier'
+ create_enrichment_model['options'] = enrichment_options_model
+
+ # Set up parameter values
+ project_id = 'testString'
+ enrichment = create_enrichment_model
+
+ # Invoke method
+ response = _service.create_enrichment(
+ project_id,
+ enrichment,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_enrichment_required_params_with_retries(self):
+ # Enable retries and run test_create_enrichment_required_params.
+ _service.enable_retries()
+ self.test_create_enrichment_required_params()
+
+ # Disable retries and run test_create_enrichment_required_params.
+ _service.disable_retries()
+ self.test_create_enrichment_required_params()
+
+ @responses.activate
+ def test_create_enrichment_value_error(self):
+ """
+ test_create_enrichment_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments')
+ mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a WebhookHeader model
+ webhook_header_model = {}
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ # Construct a dict representation of a EnrichmentOptions model
+ enrichment_options_model = {}
+ enrichment_options_model['languages'] = ['testString']
+ enrichment_options_model['entity_type'] = 'testString'
+ enrichment_options_model['regular_expression'] = 'testString'
+ enrichment_options_model['result_field'] = 'testString'
+ enrichment_options_model['classifier_id'] = 'testString'
+ enrichment_options_model['model_id'] = 'testString'
+ enrichment_options_model['confidence_threshold'] = 0
+ enrichment_options_model['top_k'] = 0
+ enrichment_options_model['url'] = 'testString'
+ enrichment_options_model['version'] = '2023-03-31'
+ enrichment_options_model['secret'] = 'testString'
+ enrichment_options_model['headers'] = webhook_header_model
+ enrichment_options_model['location_encoding'] = '`utf-16`'
+
+ # Construct a dict representation of a CreateEnrichment model
+ create_enrichment_model = {}
+ create_enrichment_model['name'] = 'testString'
+ create_enrichment_model['description'] = 'testString'
+ create_enrichment_model['type'] = 'classifier'
+ create_enrichment_model['options'] = enrichment_options_model
+
+ # Set up parameter values
+ project_id = 'testString'
+ enrichment = create_enrichment_model
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "enrichment": enrichment,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_enrichment(**req_copy)
+
+ def test_create_enrichment_value_error_with_retries(self):
+ # Enable retries and run test_create_enrichment_value_error.
+ _service.enable_retries()
+ self.test_create_enrichment_value_error()
+
+ # Disable retries and run test_create_enrichment_value_error.
+ _service.disable_retries()
+ self.test_create_enrichment_value_error()
+
+
+class TestGetEnrichment:
+ """
+ Test Class for get_enrichment
+ """
+
+ @responses.activate
+ def test_get_enrichment_all_params(self):
+ """
+ get_enrichment()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments/testString')
+ mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ enrichment_id = 'testString'
+
+ # Invoke method
+ response = _service.get_enrichment(
+ project_id,
+ enrichment_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_enrichment_all_params_with_retries(self):
+ # Enable retries and run test_get_enrichment_all_params.
+ _service.enable_retries()
+ self.test_get_enrichment_all_params()
+
+ # Disable retries and run test_get_enrichment_all_params.
+ _service.disable_retries()
+ self.test_get_enrichment_all_params()
+
+ @responses.activate
+ def test_get_enrichment_value_error(self):
+ """
+ test_get_enrichment_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments/testString')
+ mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ enrichment_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "enrichment_id": enrichment_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_enrichment(**req_copy)
+
+ def test_get_enrichment_value_error_with_retries(self):
+ # Enable retries and run test_get_enrichment_value_error.
+ _service.enable_retries()
+ self.test_get_enrichment_value_error()
+
+ # Disable retries and run test_get_enrichment_value_error.
+ _service.disable_retries()
+ self.test_get_enrichment_value_error()
+
+
+class TestUpdateEnrichment:
+ """
+ Test Class for update_enrichment
+ """
+
+ @responses.activate
+ def test_update_enrichment_all_params(self):
+ """
+ update_enrichment()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments/testString')
+ mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ enrichment_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+
+ # Invoke method
+ response = _service.update_enrichment(
+ project_id,
+ enrichment_id,
+ name,
+ description=description,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+
+ def test_update_enrichment_all_params_with_retries(self):
+ # Enable retries and run test_update_enrichment_all_params.
+ _service.enable_retries()
+ self.test_update_enrichment_all_params()
+
+ # Disable retries and run test_update_enrichment_all_params.
+ _service.disable_retries()
+ self.test_update_enrichment_all_params()
+
+ @responses.activate
+ def test_update_enrichment_value_error(self):
+ """
+ test_update_enrichment_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments/testString')
+ mock_response = '{"enrichment_id": "enrichment_id", "name": "name", "description": "description", "type": "part_of_speech", "options": {"languages": ["languages"], "entity_type": "entity_type", "regular_expression": "regular_expression", "result_field": "result_field", "classifier_id": "classifier_id", "model_id": "model_id", "confidence_threshold": 0, "top_k": 0, "url": "url", "version": "2023-03-31", "secret": "secret", "headers": {"name": "name", "value": "value"}, "location_encoding": "`utf-16`"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ enrichment_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "enrichment_id": enrichment_id,
+ "name": name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_enrichment(**req_copy)
+
+ def test_update_enrichment_value_error_with_retries(self):
+ # Enable retries and run test_update_enrichment_value_error.
+ _service.enable_retries()
+ self.test_update_enrichment_value_error()
+
+ # Disable retries and run test_update_enrichment_value_error.
+ _service.disable_retries()
+ self.test_update_enrichment_value_error()
+
+
+class TestDeleteEnrichment:
+ """
+ Test Class for delete_enrichment
+ """
+
+ @responses.activate
+ def test_delete_enrichment_all_params(self):
+ """
+ delete_enrichment()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ enrichment_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_enrichment(
+ project_id,
+ enrichment_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_enrichment_all_params_with_retries(self):
+ # Enable retries and run test_delete_enrichment_all_params.
+ _service.enable_retries()
+ self.test_delete_enrichment_all_params()
+
+ # Disable retries and run test_delete_enrichment_all_params.
+ _service.disable_retries()
+ self.test_delete_enrichment_all_params()
+
+ @responses.activate
+ def test_delete_enrichment_value_error(self):
+ """
+ test_delete_enrichment_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/enrichments/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ enrichment_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "enrichment_id": enrichment_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_enrichment(**req_copy)
+
+ def test_delete_enrichment_value_error_with_retries(self):
+ # Enable retries and run test_delete_enrichment_value_error.
+ _service.enable_retries()
+ self.test_delete_enrichment_value_error()
+
+ # Disable retries and run test_delete_enrichment_value_error.
+ _service.disable_retries()
+ self.test_delete_enrichment_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Enrichments
+##############################################################################
+
+##############################################################################
+# Start of Service: Batches
+##############################################################################
+# region
+
+
+class TestListBatches:
+ """
+ Test Class for list_batches
+ """
+
+ @responses.activate
+ def test_list_batches_all_params(self):
+ """
+ list_batches()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/batches')
+ mock_response = '{"batches": [{"batch_id": "batch_id", "created": "2019-01-01T12:00:00.000Z", "enrichment_id": "enrichment_id"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.list_batches(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_batches_all_params_with_retries(self):
+ # Enable retries and run test_list_batches_all_params.
+ _service.enable_retries()
+ self.test_list_batches_all_params()
+
+ # Disable retries and run test_list_batches_all_params.
+ _service.disable_retries()
+ self.test_list_batches_all_params()
+
+ @responses.activate
+ def test_list_batches_value_error(self):
+ """
+ test_list_batches_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/batches')
+ mock_response = '{"batches": [{"batch_id": "batch_id", "created": "2019-01-01T12:00:00.000Z", "enrichment_id": "enrichment_id"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_batches(**req_copy)
+
+ def test_list_batches_value_error_with_retries(self):
+ # Enable retries and run test_list_batches_value_error.
+ _service.enable_retries()
+ self.test_list_batches_value_error()
+
+ # Disable retries and run test_list_batches_value_error.
+ _service.disable_retries()
+ self.test_list_batches_value_error()
+
+
+class TestPullBatches:
+ """
+ Test Class for pull_batches
+ """
+
+ @responses.activate
+ def test_pull_batches_all_params(self):
+ """
+ pull_batches()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString')
+ mock_response = '{"file": "file"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ batch_id = 'testString'
+
+ # Invoke method
+ response = _service.pull_batches(
+ project_id,
+ collection_id,
+ batch_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_pull_batches_all_params_with_retries(self):
+ # Enable retries and run test_pull_batches_all_params.
+ _service.enable_retries()
+ self.test_pull_batches_all_params()
+
+ # Disable retries and run test_pull_batches_all_params.
+ _service.disable_retries()
+ self.test_pull_batches_all_params()
+
+ @responses.activate
+ def test_pull_batches_value_error(self):
+ """
+ test_pull_batches_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString')
+ mock_response = '{"file": "file"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ batch_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ "batch_id": batch_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.pull_batches(**req_copy)
+
+ def test_pull_batches_value_error_with_retries(self):
+ # Enable retries and run test_pull_batches_value_error.
+ _service.enable_retries()
+ self.test_pull_batches_value_error()
+
+ # Disable retries and run test_pull_batches_value_error.
+ _service.disable_retries()
+ self.test_pull_batches_value_error()
+
+
+class TestPushBatches:
+ """
+ Test Class for push_batches
+ """
+
+ @responses.activate
+ def test_push_batches_all_params(self):
+ """
+ push_batches()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString')
+ mock_response = 'false'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ batch_id = 'testString'
+ file = io.BytesIO(b'This is a mock file.').getvalue()
+ filename = 'testString'
+
+ # Invoke method
+ response = _service.push_batches(
+ project_id,
+ collection_id,
+ batch_id,
+ file=file,
+ filename=filename,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+
+ def test_push_batches_all_params_with_retries(self):
+ # Enable retries and run test_push_batches_all_params.
+ _service.enable_retries()
+ self.test_push_batches_all_params()
+
+ # Disable retries and run test_push_batches_all_params.
+ _service.disable_retries()
+ self.test_push_batches_all_params()
+
+ @responses.activate
+ def test_push_batches_required_params(self):
+ """
+ test_push_batches_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString')
+ mock_response = 'false'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ batch_id = 'testString'
+
+ # Invoke method
+ response = _service.push_batches(
+ project_id,
+ collection_id,
+ batch_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 202
+
+ def test_push_batches_required_params_with_retries(self):
+ # Enable retries and run test_push_batches_required_params.
+ _service.enable_retries()
+ self.test_push_batches_required_params()
+
+ # Disable retries and run test_push_batches_required_params.
+ _service.disable_retries()
+ self.test_push_batches_required_params()
+
+ @responses.activate
+ def test_push_batches_value_error(self):
+ """
+ test_push_batches_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/batches/testString')
+ mock_response = 'false'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=202,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ batch_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ "batch_id": batch_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.push_batches(**req_copy)
+
+ def test_push_batches_value_error_with_retries(self):
+ # Enable retries and run test_push_batches_value_error.
+ _service.enable_retries()
+ self.test_push_batches_value_error()
+
+ # Disable retries and run test_push_batches_value_error.
+ _service.disable_retries()
+ self.test_push_batches_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Batches
+##############################################################################
+
+##############################################################################
+# Start of Service: DocumentClassifiers
+##############################################################################
+# region
+
+
+class TestListDocumentClassifiers:
+ """
+ Test Class for list_document_classifiers
+ """
+
+ @responses.activate
+ def test_list_document_classifiers_all_params(self):
+ """
+ list_document_classifiers()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers')
+ mock_response = '{"classifiers": [{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Invoke method
+ response = _service.list_document_classifiers(
+ project_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_document_classifiers_all_params_with_retries(self):
+ # Enable retries and run test_list_document_classifiers_all_params.
+ _service.enable_retries()
+ self.test_list_document_classifiers_all_params()
+
+ # Disable retries and run test_list_document_classifiers_all_params.
+ _service.disable_retries()
+ self.test_list_document_classifiers_all_params()
+
+ @responses.activate
+ def test_list_document_classifiers_value_error(self):
+ """
+ test_list_document_classifiers_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers')
+ mock_response = '{"classifiers": [{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_document_classifiers(**req_copy)
+
+ def test_list_document_classifiers_value_error_with_retries(self):
+ # Enable retries and run test_list_document_classifiers_value_error.
+ _service.enable_retries()
+ self.test_list_document_classifiers_value_error()
+
+ # Disable retries and run test_list_document_classifiers_value_error.
+ _service.disable_retries()
+ self.test_list_document_classifiers_value_error()
+
+
+class TestCreateDocumentClassifier:
+ """
+ Test Class for create_document_classifier
+ """
+
+ @responses.activate
+ def test_create_document_classifier_all_params(self):
+ """
+ create_document_classifier()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers')
+ mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a DocumentClassifierEnrichment model
+ document_classifier_enrichment_model = {}
+ document_classifier_enrichment_model['enrichment_id'] = 'testString'
+ document_classifier_enrichment_model['fields'] = ['testString']
+
+ # Construct a dict representation of a ClassifierFederatedModel model
+ classifier_federated_model_model = {}
+ classifier_federated_model_model['field'] = 'testString'
+
+ # Construct a dict representation of a CreateDocumentClassifier model
+ create_document_classifier_model = {}
+ create_document_classifier_model['name'] = 'testString'
+ create_document_classifier_model['description'] = 'testString'
+ create_document_classifier_model['language'] = 'en'
+ create_document_classifier_model['answer_field'] = 'testString'
+ create_document_classifier_model['enrichments'] = [document_classifier_enrichment_model]
+ create_document_classifier_model['federated_classification'] = classifier_federated_model_model
+
+ # Set up parameter values
+ project_id = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+ classifier = create_document_classifier_model
+ test_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.create_document_classifier(
+ project_id,
+ training_data,
+ classifier,
+ test_data=test_data,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_document_classifier_all_params_with_retries(self):
+ # Enable retries and run test_create_document_classifier_all_params.
+ _service.enable_retries()
+ self.test_create_document_classifier_all_params()
+
+ # Disable retries and run test_create_document_classifier_all_params.
+ _service.disable_retries()
+ self.test_create_document_classifier_all_params()
+
+ @responses.activate
+ def test_create_document_classifier_required_params(self):
+ """
+ test_create_document_classifier_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers')
+ mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a DocumentClassifierEnrichment model
+ document_classifier_enrichment_model = {}
+ document_classifier_enrichment_model['enrichment_id'] = 'testString'
+ document_classifier_enrichment_model['fields'] = ['testString']
+
+ # Construct a dict representation of a ClassifierFederatedModel model
+ classifier_federated_model_model = {}
+ classifier_federated_model_model['field'] = 'testString'
+
+ # Construct a dict representation of a CreateDocumentClassifier model
+ create_document_classifier_model = {}
+ create_document_classifier_model['name'] = 'testString'
+ create_document_classifier_model['description'] = 'testString'
+ create_document_classifier_model['language'] = 'en'
+ create_document_classifier_model['answer_field'] = 'testString'
+ create_document_classifier_model['enrichments'] = [document_classifier_enrichment_model]
+ create_document_classifier_model['federated_classification'] = classifier_federated_model_model
+
+ # Set up parameter values
+ project_id = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+ classifier = create_document_classifier_model
+
+ # Invoke method
+ response = _service.create_document_classifier(
+ project_id,
+ training_data,
+ classifier,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_document_classifier_required_params_with_retries(self):
+ # Enable retries and run test_create_document_classifier_required_params.
+ _service.enable_retries()
+ self.test_create_document_classifier_required_params()
+
+ # Disable retries and run test_create_document_classifier_required_params.
+ _service.disable_retries()
+ self.test_create_document_classifier_required_params()
+
+ @responses.activate
+ def test_create_document_classifier_value_error(self):
+ """
+ test_create_document_classifier_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers')
+ mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a DocumentClassifierEnrichment model
+ document_classifier_enrichment_model = {}
+ document_classifier_enrichment_model['enrichment_id'] = 'testString'
+ document_classifier_enrichment_model['fields'] = ['testString']
+
+ # Construct a dict representation of a ClassifierFederatedModel model
+ classifier_federated_model_model = {}
+ classifier_federated_model_model['field'] = 'testString'
+
+ # Construct a dict representation of a CreateDocumentClassifier model
+ create_document_classifier_model = {}
+ create_document_classifier_model['name'] = 'testString'
+ create_document_classifier_model['description'] = 'testString'
+ create_document_classifier_model['language'] = 'en'
+ create_document_classifier_model['answer_field'] = 'testString'
+ create_document_classifier_model['enrichments'] = [document_classifier_enrichment_model]
+ create_document_classifier_model['federated_classification'] = classifier_federated_model_model
+
+ # Set up parameter values
+ project_id = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+ classifier = create_document_classifier_model
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "training_data": training_data,
+ "classifier": classifier,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_document_classifier(**req_copy)
+
+ def test_create_document_classifier_value_error_with_retries(self):
+ # Enable retries and run test_create_document_classifier_value_error.
+ _service.enable_retries()
+ self.test_create_document_classifier_value_error()
+
+ # Disable retries and run test_create_document_classifier_value_error.
+ _service.disable_retries()
+ self.test_create_document_classifier_value_error()
+
+
+class TestGetDocumentClassifier:
+ """
+ Test Class for get_document_classifier
+ """
+
+ @responses.activate
+ def test_get_document_classifier_all_params(self):
+ """
+ get_document_classifier()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString')
+ mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+
+ # Invoke method
+ response = _service.get_document_classifier(
+ project_id,
+ classifier_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_document_classifier_all_params_with_retries(self):
+ # Enable retries and run test_get_document_classifier_all_params.
+ _service.enable_retries()
+ self.test_get_document_classifier_all_params()
+
+ # Disable retries and run test_get_document_classifier_all_params.
+ _service.disable_retries()
+ self.test_get_document_classifier_all_params()
+
+ @responses.activate
+ def test_get_document_classifier_value_error(self):
+ """
+ test_get_document_classifier_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString')
+ mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "classifier_id": classifier_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_document_classifier(**req_copy)
+
+ def test_get_document_classifier_value_error_with_retries(self):
+ # Enable retries and run test_get_document_classifier_value_error.
+ _service.enable_retries()
+ self.test_get_document_classifier_value_error()
+
+ # Disable retries and run test_get_document_classifier_value_error.
+ _service.disable_retries()
+ self.test_get_document_classifier_value_error()
+
+
+class TestUpdateDocumentClassifier:
+ """
+ Test Class for update_document_classifier
+ """
+
+ @responses.activate
+ def test_update_document_classifier_all_params(self):
+ """
+ update_document_classifier()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString')
+ mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a UpdateDocumentClassifier model
+ update_document_classifier_model = {}
+ update_document_classifier_model['name'] = 'testString'
+ update_document_classifier_model['description'] = 'testString'
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ classifier = update_document_classifier_model
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+ test_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.update_document_classifier(
+ project_id,
+ classifier_id,
+ classifier,
+ training_data=training_data,
+ test_data=test_data,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_update_document_classifier_all_params_with_retries(self):
+ # Enable retries and run test_update_document_classifier_all_params.
+ _service.enable_retries()
+ self.test_update_document_classifier_all_params()
+
+ # Disable retries and run test_update_document_classifier_all_params.
+ _service.disable_retries()
+ self.test_update_document_classifier_all_params()
+
+ @responses.activate
+ def test_update_document_classifier_required_params(self):
+ """
+ test_update_document_classifier_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString')
+ mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a UpdateDocumentClassifier model
+ update_document_classifier_model = {}
+ update_document_classifier_model['name'] = 'testString'
+ update_document_classifier_model['description'] = 'testString'
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ classifier = update_document_classifier_model
+
+ # Invoke method
+ response = _service.update_document_classifier(
+ project_id,
+ classifier_id,
+ classifier,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_update_document_classifier_required_params_with_retries(self):
+ # Enable retries and run test_update_document_classifier_required_params.
+ _service.enable_retries()
+ self.test_update_document_classifier_required_params()
+
+ # Disable retries and run test_update_document_classifier_required_params.
+ _service.disable_retries()
+ self.test_update_document_classifier_required_params()
+
+ @responses.activate
+ def test_update_document_classifier_value_error(self):
+ """
+ test_update_document_classifier_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString')
+ mock_response = '{"classifier_id": "classifier_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "language": "en", "enrichments": [{"enrichment_id": "enrichment_id", "fields": ["fields"]}], "recognized_fields": ["recognized_fields"], "answer_field": "answer_field", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "federated_classification": {"field": "field"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a UpdateDocumentClassifier model
+ update_document_classifier_model = {}
+ update_document_classifier_model['name'] = 'testString'
+ update_document_classifier_model['description'] = 'testString'
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ classifier = update_document_classifier_model
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "classifier_id": classifier_id,
+ "classifier": classifier,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_document_classifier(**req_copy)
+
+ def test_update_document_classifier_value_error_with_retries(self):
+ # Enable retries and run test_update_document_classifier_value_error.
+ _service.enable_retries()
+ self.test_update_document_classifier_value_error()
+
+ # Disable retries and run test_update_document_classifier_value_error.
+ _service.disable_retries()
+ self.test_update_document_classifier_value_error()
+
+
+class TestDeleteDocumentClassifier:
+ """
+ Test Class for delete_document_classifier
+ """
+
+ @responses.activate
+ def test_delete_document_classifier_all_params(self):
+ """
+ delete_document_classifier()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_document_classifier(
+ project_id,
+ classifier_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_document_classifier_all_params_with_retries(self):
+ # Enable retries and run test_delete_document_classifier_all_params.
+ _service.enable_retries()
+ self.test_delete_document_classifier_all_params()
+
+ # Disable retries and run test_delete_document_classifier_all_params.
+ _service.disable_retries()
+ self.test_delete_document_classifier_all_params()
+
+ @responses.activate
+ def test_delete_document_classifier_value_error(self):
+ """
+ test_delete_document_classifier_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "classifier_id": classifier_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_document_classifier(**req_copy)
+
+ def test_delete_document_classifier_value_error_with_retries(self):
+ # Enable retries and run test_delete_document_classifier_value_error.
+ _service.enable_retries()
+ self.test_delete_document_classifier_value_error()
+
+ # Disable retries and run test_delete_document_classifier_value_error.
+ _service.disable_retries()
+ self.test_delete_document_classifier_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: DocumentClassifiers
+##############################################################################
+
+##############################################################################
+# Start of Service: DocumentClassifierModels
+##############################################################################
+# region
+
+
+class TestListDocumentClassifierModels:
+ """
+ Test Class for list_document_classifier_models
+ """
+
+ @responses.activate
+ def test_list_document_classifier_models_all_params(self):
+ """
+ list_document_classifier_models()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models')
+ mock_response = '{"models": [{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+
+ # Invoke method
+ response = _service.list_document_classifier_models(
+ project_id,
+ classifier_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_document_classifier_models_all_params_with_retries(self):
+ # Enable retries and run test_list_document_classifier_models_all_params.
+ _service.enable_retries()
+ self.test_list_document_classifier_models_all_params()
+
+ # Disable retries and run test_list_document_classifier_models_all_params.
+ _service.disable_retries()
+ self.test_list_document_classifier_models_all_params()
+
+ @responses.activate
+ def test_list_document_classifier_models_value_error(self):
+ """
+ test_list_document_classifier_models_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models')
+ mock_response = '{"models": [{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "classifier_id": classifier_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_document_classifier_models(**req_copy)
+
+ def test_list_document_classifier_models_value_error_with_retries(self):
+ # Enable retries and run test_list_document_classifier_models_value_error.
+ _service.enable_retries()
+ self.test_list_document_classifier_models_value_error()
+
+ # Disable retries and run test_list_document_classifier_models_value_error.
+ _service.disable_retries()
+ self.test_list_document_classifier_models_value_error()
+
+
+class TestCreateDocumentClassifierModel:
+ """
+ Test Class for create_document_classifier_model
+ """
+
+ @responses.activate
+ def test_create_document_classifier_model_all_params(self):
+ """
+ create_document_classifier_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models')
+ mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ learning_rate = 0.1
+ l1_regularization_strengths = [1.0E-6]
+ l2_regularization_strengths = [1.0E-6]
+ training_max_steps = 10000000
+ improvement_ratio = 0.000010
+
+ # Invoke method
+ response = _service.create_document_classifier_model(
+ project_id,
+ classifier_id,
+ name,
+ description=description,
+ learning_rate=learning_rate,
+ l1_regularization_strengths=l1_regularization_strengths,
+ l2_regularization_strengths=l2_regularization_strengths,
+ training_max_steps=training_max_steps,
+ improvement_ratio=improvement_ratio,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['learning_rate'] == 0.1
+ assert req_body['l1_regularization_strengths'] == [1.0E-6]
+ assert req_body['l2_regularization_strengths'] == [1.0E-6]
+ assert req_body['training_max_steps'] == 10000000
+ assert req_body['improvement_ratio'] == 0.000010
+
+ def test_create_document_classifier_model_all_params_with_retries(self):
+ # Enable retries and run test_create_document_classifier_model_all_params.
+ _service.enable_retries()
+ self.test_create_document_classifier_model_all_params()
+
+ # Disable retries and run test_create_document_classifier_model_all_params.
+ _service.disable_retries()
+ self.test_create_document_classifier_model_all_params()
+
+ @responses.activate
+ def test_create_document_classifier_model_value_error(self):
+ """
+ test_create_document_classifier_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models')
+ mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ learning_rate = 0.1
+ l1_regularization_strengths = [1.0E-6]
+ l2_regularization_strengths = [1.0E-6]
+ training_max_steps = 10000000
+ improvement_ratio = 0.000010
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "classifier_id": classifier_id,
+ "name": name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_document_classifier_model(**req_copy)
+
+ def test_create_document_classifier_model_value_error_with_retries(self):
+ # Enable retries and run test_create_document_classifier_model_value_error.
+ _service.enable_retries()
+ self.test_create_document_classifier_model_value_error()
+
+ # Disable retries and run test_create_document_classifier_model_value_error.
+ _service.disable_retries()
+ self.test_create_document_classifier_model_value_error()
+
+
+class TestGetDocumentClassifierModel:
+ """
+ Test Class for get_document_classifier_model
+ """
+
+ @responses.activate
+ def test_get_document_classifier_model_all_params(self):
+ """
+ get_document_classifier_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString')
+ mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ model_id = 'testString'
+
+ # Invoke method
+ response = _service.get_document_classifier_model(
+ project_id,
+ classifier_id,
+ model_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_document_classifier_model_all_params_with_retries(self):
+ # Enable retries and run test_get_document_classifier_model_all_params.
+ _service.enable_retries()
+ self.test_get_document_classifier_model_all_params()
+
+ # Disable retries and run test_get_document_classifier_model_all_params.
+ _service.disable_retries()
+ self.test_get_document_classifier_model_all_params()
+
+ @responses.activate
+ def test_get_document_classifier_model_value_error(self):
+ """
+ test_get_document_classifier_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString')
+ mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ model_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "classifier_id": classifier_id,
+ "model_id": model_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_document_classifier_model(**req_copy)
+
+ def test_get_document_classifier_model_value_error_with_retries(self):
+ # Enable retries and run test_get_document_classifier_model_value_error.
+ _service.enable_retries()
+ self.test_get_document_classifier_model_value_error()
+
+ # Disable retries and run test_get_document_classifier_model_value_error.
+ _service.disable_retries()
+ self.test_get_document_classifier_model_value_error()
+
+
+class TestUpdateDocumentClassifierModel:
+ """
+ Test Class for update_document_classifier_model
+ """
+
+ @responses.activate
+ def test_update_document_classifier_model_all_params(self):
+ """
+ update_document_classifier_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString')
+ mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ model_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+
+ # Invoke method
+ response = _service.update_document_classifier_model(
+ project_id,
+ classifier_id,
+ model_id,
+ name=name,
+ description=description,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+
+ def test_update_document_classifier_model_all_params_with_retries(self):
+ # Enable retries and run test_update_document_classifier_model_all_params.
+ _service.enable_retries()
+ self.test_update_document_classifier_model_all_params()
+
+ # Disable retries and run test_update_document_classifier_model_all_params.
+ _service.disable_retries()
+ self.test_update_document_classifier_model_all_params()
+
+ @responses.activate
+ def test_update_document_classifier_model_value_error(self):
+ """
+ test_update_document_classifier_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString')
+ mock_response = '{"model_id": "model_id", "name": "name", "description": "description", "created": "2019-01-01T12:00:00.000Z", "updated": "2019-01-01T12:00:00.000Z", "training_data_file": "training_data_file", "test_data_file": "test_data_file", "status": "training", "evaluation": {"micro_average": {"precision": 0, "recall": 0, "f1": 0}, "macro_average": {"precision": 0, "recall": 0, "f1": 0}, "per_class": [{"name": "name", "precision": 0, "recall": 0, "f1": 0}]}, "enrichment_id": "enrichment_id", "deployed_at": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ model_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "classifier_id": classifier_id,
+ "model_id": model_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_document_classifier_model(**req_copy)
+
+ def test_update_document_classifier_model_value_error_with_retries(self):
+ # Enable retries and run test_update_document_classifier_model_value_error.
+ _service.enable_retries()
+ self.test_update_document_classifier_model_value_error()
+
+ # Disable retries and run test_update_document_classifier_model_value_error.
+ _service.disable_retries()
+ self.test_update_document_classifier_model_value_error()
+
+
+class TestDeleteDocumentClassifierModel:
+ """
+ Test Class for delete_document_classifier_model
+ """
+
+ @responses.activate
+ def test_delete_document_classifier_model_all_params(self):
+ """
+ delete_document_classifier_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ model_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_document_classifier_model(
+ project_id,
+ classifier_id,
+ model_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_document_classifier_model_all_params_with_retries(self):
+ # Enable retries and run test_delete_document_classifier_model_all_params.
+ _service.enable_retries()
+ self.test_delete_document_classifier_model_all_params()
+
+ # Disable retries and run test_delete_document_classifier_model_all_params.
+ _service.disable_retries()
+ self.test_delete_document_classifier_model_all_params()
+
+ @responses.activate
+ def test_delete_document_classifier_model_value_error(self):
+ """
+ test_delete_document_classifier_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/document_classifiers/testString/models/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ classifier_id = 'testString'
+ model_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "classifier_id": classifier_id,
+ "model_id": model_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_document_classifier_model(**req_copy)
+
+ def test_delete_document_classifier_model_value_error_with_retries(self):
+ # Enable retries and run test_delete_document_classifier_model_value_error.
+ _service.enable_retries()
+ self.test_delete_document_classifier_model_value_error()
+
+ # Disable retries and run test_delete_document_classifier_model_value_error.
+ _service.disable_retries()
+ self.test_delete_document_classifier_model_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: DocumentClassifierModels
+##############################################################################
+
+##############################################################################
+# Start of Service: Analyze
+##############################################################################
+# region
+
+
+class TestAnalyzeDocument:
+ """
+ Test Class for analyze_document
+ """
+
+ @responses.activate
+ def test_analyze_document_all_params(self):
+ """
+ analyze_document()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/analyze')
+ mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"anyKey": "anyValue"}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+ file = io.BytesIO(b'This is a mock file.').getvalue()
+ filename = 'testString'
+ file_content_type = 'application/json'
+ metadata = 'testString'
+
+ # Invoke method
+ response = _service.analyze_document(
+ project_id,
+ collection_id,
+ file=file,
+ filename=filename,
+ file_content_type=file_content_type,
+ metadata=metadata,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_analyze_document_all_params_with_retries(self):
+ # Enable retries and run test_analyze_document_all_params.
+ _service.enable_retries()
+ self.test_analyze_document_all_params()
+
+ # Disable retries and run test_analyze_document_all_params.
+ _service.disable_retries()
+ self.test_analyze_document_all_params()
+
+ @responses.activate
+ def test_analyze_document_required_params(self):
+ """
+ test_analyze_document_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/analyze')
+ mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"anyKey": "anyValue"}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Invoke method
+ response = _service.analyze_document(
+ project_id,
+ collection_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_analyze_document_required_params_with_retries(self):
+ # Enable retries and run test_analyze_document_required_params.
+ _service.enable_retries()
+ self.test_analyze_document_required_params()
+
+ # Disable retries and run test_analyze_document_required_params.
+ _service.disable_retries()
+ self.test_analyze_document_required_params()
+
+ @responses.activate
+ def test_analyze_document_value_error(self):
+ """
+ test_analyze_document_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/projects/testString/collections/testString/analyze')
+ mock_response = '{"notices": [{"notice_id": "notice_id", "created": "2019-01-01T12:00:00.000Z", "document_id": "document_id", "collection_id": "collection_id", "query_id": "query_id", "severity": "warning", "step": "step", "description": "description"}], "result": {"metadata": {"anyKey": "anyValue"}}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ project_id = 'testString'
+ collection_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "project_id": project_id,
+ "collection_id": collection_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.analyze_document(**req_copy)
+
+ def test_analyze_document_value_error_with_retries(self):
+ # Enable retries and run test_analyze_document_value_error.
+ _service.enable_retries()
+ self.test_analyze_document_value_error()
+
+ # Disable retries and run test_analyze_document_value_error.
+ _service.disable_retries()
+ self.test_analyze_document_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Analyze
+##############################################################################
+
+##############################################################################
+# Start of Service: UserData
+##############################################################################
+# region
+
+
+class TestDeleteUserData:
+ """
+ Test Class for delete_user_data
+ """
+
+ @responses.activate
+ def test_delete_user_data_all_params(self):
+ """
+ delete_user_data()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_user_data(
+ customer_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'customer_id={}'.format(customer_id) in query_string
+
+ def test_delete_user_data_all_params_with_retries(self):
+ # Enable retries and run test_delete_user_data_all_params.
+ _service.enable_retries()
+ self.test_delete_user_data_all_params()
+
+ # Disable retries and run test_delete_user_data_all_params.
+ _service.disable_retries()
+ self.test_delete_user_data_all_params()
+
+ @responses.activate
+ def test_delete_user_data_value_error(self):
+ """
+ test_delete_user_data_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v2/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customer_id": customer_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_user_data(**req_copy)
+
+ def test_delete_user_data_value_error_with_retries(self):
+ # Enable retries and run test_delete_user_data_value_error.
+ _service.enable_retries()
+ self.test_delete_user_data_value_error()
+
+ # Disable retries and run test_delete_user_data_value_error.
+ _service.disable_retries()
+ self.test_delete_user_data_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: UserData
+##############################################################################
+
+
+##############################################################################
+# Start of Model Tests
+##############################################################################
+# region
+
+
+class TestModel_AnalyzedDocument:
+ """
+ Test Class for AnalyzedDocument
+ """
+
+ def test_analyzed_document_serialization(self):
+ """
+ Test serialization/deserialization for AnalyzedDocument
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ notice_model = {} # Notice
+
+ analyzed_result_model = {} # AnalyzedResult
+ analyzed_result_model['metadata'] = {'anyKey': 'anyValue'}
+ analyzed_result_model['foo'] = 'testString'
+
+ # Construct a json representation of a AnalyzedDocument model
+ analyzed_document_model_json = {}
+ analyzed_document_model_json['notices'] = [notice_model]
+ analyzed_document_model_json['result'] = analyzed_result_model
+
+ # Construct a model instance of AnalyzedDocument by calling from_dict on the json representation
+ analyzed_document_model = AnalyzedDocument.from_dict(analyzed_document_model_json)
+ assert analyzed_document_model != False
+
+ # Construct a model instance of AnalyzedDocument by calling from_dict on the json representation
+ analyzed_document_model_dict = AnalyzedDocument.from_dict(analyzed_document_model_json).__dict__
+ analyzed_document_model2 = AnalyzedDocument(**analyzed_document_model_dict)
+
+ # Verify the model instances are equivalent
+ assert analyzed_document_model == analyzed_document_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ analyzed_document_model_json2 = analyzed_document_model.to_dict()
+ assert analyzed_document_model_json2 == analyzed_document_model_json
+
+
+class TestModel_AnalyzedResult:
+ """
+ Test Class for AnalyzedResult
+ """
+
+ def test_analyzed_result_serialization(self):
+ """
+ Test serialization/deserialization for AnalyzedResult
+ """
+
+ # Construct a json representation of a AnalyzedResult model
+ analyzed_result_model_json = {}
+ analyzed_result_model_json['metadata'] = {'anyKey': 'anyValue'}
+ analyzed_result_model_json['foo'] = 'testString'
+
+ # Construct a model instance of AnalyzedResult by calling from_dict on the json representation
+ analyzed_result_model = AnalyzedResult.from_dict(analyzed_result_model_json)
+ assert analyzed_result_model != False
+
+ # Construct a model instance of AnalyzedResult by calling from_dict on the json representation
+ analyzed_result_model_dict = AnalyzedResult.from_dict(analyzed_result_model_json).__dict__
+ analyzed_result_model2 = AnalyzedResult(**analyzed_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert analyzed_result_model == analyzed_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ analyzed_result_model_json2 = analyzed_result_model.to_dict()
+ assert analyzed_result_model_json2 == analyzed_result_model_json
+
+ # Test get_properties and set_properties methods.
+ analyzed_result_model.set_properties({})
+ actual_dict = analyzed_result_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': 'testString'}
+ analyzed_result_model.set_properties(expected_dict)
+ actual_dict = analyzed_result_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_BatchDetails:
+ """
+ Test Class for BatchDetails
+ """
+
+ def test_batch_details_serialization(self):
+ """
+ Test serialization/deserialization for BatchDetails
+ """
+
+ # Construct a json representation of a BatchDetails model
+ batch_details_model_json = {}
+ batch_details_model_json['enrichment_id'] = 'testString'
+
+ # Construct a model instance of BatchDetails by calling from_dict on the json representation
+ batch_details_model = BatchDetails.from_dict(batch_details_model_json)
+ assert batch_details_model != False
+
+ # Construct a model instance of BatchDetails by calling from_dict on the json representation
+ batch_details_model_dict = BatchDetails.from_dict(batch_details_model_json).__dict__
+ batch_details_model2 = BatchDetails(**batch_details_model_dict)
+
+ # Verify the model instances are equivalent
+ assert batch_details_model == batch_details_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ batch_details_model_json2 = batch_details_model.to_dict()
+ assert batch_details_model_json2 == batch_details_model_json
+
+
+class TestModel_ClassifierFederatedModel:
+ """
+ Test Class for ClassifierFederatedModel
+ """
+
+ def test_classifier_federated_model_serialization(self):
+ """
+ Test serialization/deserialization for ClassifierFederatedModel
+ """
+
+ # Construct a json representation of a ClassifierFederatedModel model
+ classifier_federated_model_model_json = {}
+ classifier_federated_model_model_json['field'] = 'testString'
+
+ # Construct a model instance of ClassifierFederatedModel by calling from_dict on the json representation
+ classifier_federated_model_model = ClassifierFederatedModel.from_dict(classifier_federated_model_model_json)
+ assert classifier_federated_model_model != False
+
+ # Construct a model instance of ClassifierFederatedModel by calling from_dict on the json representation
+ classifier_federated_model_model_dict = ClassifierFederatedModel.from_dict(classifier_federated_model_model_json).__dict__
+ classifier_federated_model_model2 = ClassifierFederatedModel(**classifier_federated_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert classifier_federated_model_model == classifier_federated_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ classifier_federated_model_model_json2 = classifier_federated_model_model.to_dict()
+ assert classifier_federated_model_model_json2 == classifier_federated_model_model_json
+
+
+class TestModel_ClassifierModelEvaluation:
+ """
+ Test Class for ClassifierModelEvaluation
+ """
+
+ def test_classifier_model_evaluation_serialization(self):
+ """
+ Test serialization/deserialization for ClassifierModelEvaluation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ model_evaluation_micro_average_model = {} # ModelEvaluationMicroAverage
+ model_evaluation_micro_average_model['precision'] = 0
+ model_evaluation_micro_average_model['recall'] = 0
+ model_evaluation_micro_average_model['f1'] = 0
+
+ model_evaluation_macro_average_model = {} # ModelEvaluationMacroAverage
+ model_evaluation_macro_average_model['precision'] = 0
+ model_evaluation_macro_average_model['recall'] = 0
+ model_evaluation_macro_average_model['f1'] = 0
+
+ per_class_model_evaluation_model = {} # PerClassModelEvaluation
+ per_class_model_evaluation_model['name'] = 'testString'
+ per_class_model_evaluation_model['precision'] = 0
+ per_class_model_evaluation_model['recall'] = 0
+ per_class_model_evaluation_model['f1'] = 0
+
+ # Construct a json representation of a ClassifierModelEvaluation model
+ classifier_model_evaluation_model_json = {}
+ classifier_model_evaluation_model_json['micro_average'] = model_evaluation_micro_average_model
+ classifier_model_evaluation_model_json['macro_average'] = model_evaluation_macro_average_model
+ classifier_model_evaluation_model_json['per_class'] = [per_class_model_evaluation_model]
+
+ # Construct a model instance of ClassifierModelEvaluation by calling from_dict on the json representation
+ classifier_model_evaluation_model = ClassifierModelEvaluation.from_dict(classifier_model_evaluation_model_json)
+ assert classifier_model_evaluation_model != False
+
+ # Construct a model instance of ClassifierModelEvaluation by calling from_dict on the json representation
+ classifier_model_evaluation_model_dict = ClassifierModelEvaluation.from_dict(classifier_model_evaluation_model_json).__dict__
+ classifier_model_evaluation_model2 = ClassifierModelEvaluation(**classifier_model_evaluation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert classifier_model_evaluation_model == classifier_model_evaluation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ classifier_model_evaluation_model_json2 = classifier_model_evaluation_model.to_dict()
+ assert classifier_model_evaluation_model_json2 == classifier_model_evaluation_model_json
+
+
+class TestModel_Collection:
+ """
+ Test Class for Collection
+ """
+
+ def test_collection_serialization(self):
+ """
+ Test serialization/deserialization for Collection
+ """
+
+ # Construct a json representation of a Collection model
+ collection_model_json = {}
+ collection_model_json['name'] = 'testString'
+
+ # Construct a model instance of Collection by calling from_dict on the json representation
+ collection_model = Collection.from_dict(collection_model_json)
+ assert collection_model != False
+
+ # Construct a model instance of Collection by calling from_dict on the json representation
+ collection_model_dict = Collection.from_dict(collection_model_json).__dict__
+ collection_model2 = Collection(**collection_model_dict)
+
+ # Verify the model instances are equivalent
+ assert collection_model == collection_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ collection_model_json2 = collection_model.to_dict()
+ assert collection_model_json2 == collection_model_json
+
+
+class TestModel_CollectionDetails:
+ """
+ Test Class for CollectionDetails
+ """
+
+ def test_collection_details_serialization(self):
+ """
+ Test serialization/deserialization for CollectionDetails
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ collection_enrichment_model = {} # CollectionEnrichment
+ collection_enrichment_model['enrichment_id'] = 'testString'
+ collection_enrichment_model['fields'] = ['testString']
+
+ # Construct a json representation of a CollectionDetails model
+ collection_details_model_json = {}
+ collection_details_model_json['name'] = 'testString'
+ collection_details_model_json['description'] = 'testString'
+ collection_details_model_json['language'] = 'en'
+ collection_details_model_json['ocr_enabled'] = False
+ collection_details_model_json['enrichments'] = [collection_enrichment_model]
+
+ # Construct a model instance of CollectionDetails by calling from_dict on the json representation
+ collection_details_model = CollectionDetails.from_dict(collection_details_model_json)
+ assert collection_details_model != False
+
+ # Construct a model instance of CollectionDetails by calling from_dict on the json representation
+ collection_details_model_dict = CollectionDetails.from_dict(collection_details_model_json).__dict__
+ collection_details_model2 = CollectionDetails(**collection_details_model_dict)
+
+ # Verify the model instances are equivalent
+ assert collection_details_model == collection_details_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ collection_details_model_json2 = collection_details_model.to_dict()
+ assert collection_details_model_json2 == collection_details_model_json
+
+
+class TestModel_CollectionDetailsSmartDocumentUnderstanding:
+ """
+ Test Class for CollectionDetailsSmartDocumentUnderstanding
+ """
+
+ def test_collection_details_smart_document_understanding_serialization(self):
+ """
+ Test serialization/deserialization for CollectionDetailsSmartDocumentUnderstanding
+ """
+
+ # Construct a json representation of a CollectionDetailsSmartDocumentUnderstanding model
+ collection_details_smart_document_understanding_model_json = {}
+ collection_details_smart_document_understanding_model_json['enabled'] = True
+ collection_details_smart_document_understanding_model_json['model'] = 'custom'
+
+ # Construct a model instance of CollectionDetailsSmartDocumentUnderstanding by calling from_dict on the json representation
+ collection_details_smart_document_understanding_model = CollectionDetailsSmartDocumentUnderstanding.from_dict(collection_details_smart_document_understanding_model_json)
+ assert collection_details_smart_document_understanding_model != False
+
+ # Construct a model instance of CollectionDetailsSmartDocumentUnderstanding by calling from_dict on the json representation
+ collection_details_smart_document_understanding_model_dict = CollectionDetailsSmartDocumentUnderstanding.from_dict(collection_details_smart_document_understanding_model_json).__dict__
+ collection_details_smart_document_understanding_model2 = CollectionDetailsSmartDocumentUnderstanding(**collection_details_smart_document_understanding_model_dict)
+
+ # Verify the model instances are equivalent
+ assert collection_details_smart_document_understanding_model == collection_details_smart_document_understanding_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ collection_details_smart_document_understanding_model_json2 = collection_details_smart_document_understanding_model.to_dict()
+ assert collection_details_smart_document_understanding_model_json2 == collection_details_smart_document_understanding_model_json
+
+
+class TestModel_CollectionEnrichment:
+ """
+ Test Class for CollectionEnrichment
+ """
+
+ def test_collection_enrichment_serialization(self):
+ """
+ Test serialization/deserialization for CollectionEnrichment
+ """
+
+ # Construct a json representation of a CollectionEnrichment model
+ collection_enrichment_model_json = {}
+ collection_enrichment_model_json['enrichment_id'] = 'testString'
+ collection_enrichment_model_json['fields'] = ['testString']
+
+ # Construct a model instance of CollectionEnrichment by calling from_dict on the json representation
+ collection_enrichment_model = CollectionEnrichment.from_dict(collection_enrichment_model_json)
+ assert collection_enrichment_model != False
+
+ # Construct a model instance of CollectionEnrichment by calling from_dict on the json representation
+ collection_enrichment_model_dict = CollectionEnrichment.from_dict(collection_enrichment_model_json).__dict__
+ collection_enrichment_model2 = CollectionEnrichment(**collection_enrichment_model_dict)
+
+ # Verify the model instances are equivalent
+ assert collection_enrichment_model == collection_enrichment_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ collection_enrichment_model_json2 = collection_enrichment_model.to_dict()
+ assert collection_enrichment_model_json2 == collection_enrichment_model_json
+
+
+class TestModel_Completions:
+ """
+ Test Class for Completions
+ """
+
+ def test_completions_serialization(self):
+ """
+ Test serialization/deserialization for Completions
+ """
+
+ # Construct a json representation of a Completions model
+ completions_model_json = {}
+ completions_model_json['completions'] = ['testString']
+
+ # Construct a model instance of Completions by calling from_dict on the json representation
+ completions_model = Completions.from_dict(completions_model_json)
+ assert completions_model != False
+
+ # Construct a model instance of Completions by calling from_dict on the json representation
+ completions_model_dict = Completions.from_dict(completions_model_json).__dict__
+ completions_model2 = Completions(**completions_model_dict)
+
+ # Verify the model instances are equivalent
+ assert completions_model == completions_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ completions_model_json2 = completions_model.to_dict()
+ assert completions_model_json2 == completions_model_json
+
+
+class TestModel_ComponentSettingsAggregation:
+ """
+ Test Class for ComponentSettingsAggregation
+ """
+
+ def test_component_settings_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for ComponentSettingsAggregation
+ """
+
+ # Construct a json representation of a ComponentSettingsAggregation model
+ component_settings_aggregation_model_json = {}
+ component_settings_aggregation_model_json['name'] = 'testString'
+ component_settings_aggregation_model_json['label'] = 'testString'
+ component_settings_aggregation_model_json['multiple_selections_allowed'] = True
+ component_settings_aggregation_model_json['visualization_type'] = 'auto'
+
+ # Construct a model instance of ComponentSettingsAggregation by calling from_dict on the json representation
+ component_settings_aggregation_model = ComponentSettingsAggregation.from_dict(component_settings_aggregation_model_json)
+ assert component_settings_aggregation_model != False
+
+ # Construct a model instance of ComponentSettingsAggregation by calling from_dict on the json representation
+ component_settings_aggregation_model_dict = ComponentSettingsAggregation.from_dict(component_settings_aggregation_model_json).__dict__
+ component_settings_aggregation_model2 = ComponentSettingsAggregation(**component_settings_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert component_settings_aggregation_model == component_settings_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ component_settings_aggregation_model_json2 = component_settings_aggregation_model.to_dict()
+ assert component_settings_aggregation_model_json2 == component_settings_aggregation_model_json
+
+
+class TestModel_ComponentSettingsFieldsShown:
+ """
+ Test Class for ComponentSettingsFieldsShown
+ """
+
+ def test_component_settings_fields_shown_serialization(self):
+ """
+ Test serialization/deserialization for ComponentSettingsFieldsShown
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ component_settings_fields_shown_body_model = {} # ComponentSettingsFieldsShownBody
+ component_settings_fields_shown_body_model['use_passage'] = True
+ component_settings_fields_shown_body_model['field'] = 'testString'
+
+ component_settings_fields_shown_title_model = {} # ComponentSettingsFieldsShownTitle
+ component_settings_fields_shown_title_model['field'] = 'testString'
+
+ # Construct a json representation of a ComponentSettingsFieldsShown model
+ component_settings_fields_shown_model_json = {}
+ component_settings_fields_shown_model_json['body'] = component_settings_fields_shown_body_model
+ component_settings_fields_shown_model_json['title'] = component_settings_fields_shown_title_model
+
+ # Construct a model instance of ComponentSettingsFieldsShown by calling from_dict on the json representation
+ component_settings_fields_shown_model = ComponentSettingsFieldsShown.from_dict(component_settings_fields_shown_model_json)
+ assert component_settings_fields_shown_model != False
+
+ # Construct a model instance of ComponentSettingsFieldsShown by calling from_dict on the json representation
+ component_settings_fields_shown_model_dict = ComponentSettingsFieldsShown.from_dict(component_settings_fields_shown_model_json).__dict__
+ component_settings_fields_shown_model2 = ComponentSettingsFieldsShown(**component_settings_fields_shown_model_dict)
+
+ # Verify the model instances are equivalent
+ assert component_settings_fields_shown_model == component_settings_fields_shown_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ component_settings_fields_shown_model_json2 = component_settings_fields_shown_model.to_dict()
+ assert component_settings_fields_shown_model_json2 == component_settings_fields_shown_model_json
+
+
+class TestModel_ComponentSettingsFieldsShownBody:
+ """
+ Test Class for ComponentSettingsFieldsShownBody
+ """
+
+ def test_component_settings_fields_shown_body_serialization(self):
+ """
+ Test serialization/deserialization for ComponentSettingsFieldsShownBody
+ """
+
+ # Construct a json representation of a ComponentSettingsFieldsShownBody model
+ component_settings_fields_shown_body_model_json = {}
+ component_settings_fields_shown_body_model_json['use_passage'] = True
+ component_settings_fields_shown_body_model_json['field'] = 'testString'
+
+ # Construct a model instance of ComponentSettingsFieldsShownBody by calling from_dict on the json representation
+ component_settings_fields_shown_body_model = ComponentSettingsFieldsShownBody.from_dict(component_settings_fields_shown_body_model_json)
+ assert component_settings_fields_shown_body_model != False
+
+ # Construct a model instance of ComponentSettingsFieldsShownBody by calling from_dict on the json representation
+ component_settings_fields_shown_body_model_dict = ComponentSettingsFieldsShownBody.from_dict(component_settings_fields_shown_body_model_json).__dict__
+ component_settings_fields_shown_body_model2 = ComponentSettingsFieldsShownBody(**component_settings_fields_shown_body_model_dict)
+
+ # Verify the model instances are equivalent
+ assert component_settings_fields_shown_body_model == component_settings_fields_shown_body_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ component_settings_fields_shown_body_model_json2 = component_settings_fields_shown_body_model.to_dict()
+ assert component_settings_fields_shown_body_model_json2 == component_settings_fields_shown_body_model_json
+
+
+class TestModel_ComponentSettingsFieldsShownTitle:
+ """
+ Test Class for ComponentSettingsFieldsShownTitle
+ """
+
+ def test_component_settings_fields_shown_title_serialization(self):
+ """
+ Test serialization/deserialization for ComponentSettingsFieldsShownTitle
+ """
+
+ # Construct a json representation of a ComponentSettingsFieldsShownTitle model
+ component_settings_fields_shown_title_model_json = {}
+ component_settings_fields_shown_title_model_json['field'] = 'testString'
+
+ # Construct a model instance of ComponentSettingsFieldsShownTitle by calling from_dict on the json representation
+ component_settings_fields_shown_title_model = ComponentSettingsFieldsShownTitle.from_dict(component_settings_fields_shown_title_model_json)
+ assert component_settings_fields_shown_title_model != False
+
+ # Construct a model instance of ComponentSettingsFieldsShownTitle by calling from_dict on the json representation
+ component_settings_fields_shown_title_model_dict = ComponentSettingsFieldsShownTitle.from_dict(component_settings_fields_shown_title_model_json).__dict__
+ component_settings_fields_shown_title_model2 = ComponentSettingsFieldsShownTitle(**component_settings_fields_shown_title_model_dict)
+
+ # Verify the model instances are equivalent
+ assert component_settings_fields_shown_title_model == component_settings_fields_shown_title_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ component_settings_fields_shown_title_model_json2 = component_settings_fields_shown_title_model.to_dict()
+ assert component_settings_fields_shown_title_model_json2 == component_settings_fields_shown_title_model_json
+
+
+class TestModel_ComponentSettingsResponse:
+ """
+ Test Class for ComponentSettingsResponse
+ """
+
+ def test_component_settings_response_serialization(self):
+ """
+ Test serialization/deserialization for ComponentSettingsResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ component_settings_fields_shown_body_model = {} # ComponentSettingsFieldsShownBody
+ component_settings_fields_shown_body_model['use_passage'] = True
+ component_settings_fields_shown_body_model['field'] = 'testString'
+
+ component_settings_fields_shown_title_model = {} # ComponentSettingsFieldsShownTitle
+ component_settings_fields_shown_title_model['field'] = 'testString'
+
+ component_settings_fields_shown_model = {} # ComponentSettingsFieldsShown
+ component_settings_fields_shown_model['body'] = component_settings_fields_shown_body_model
+ component_settings_fields_shown_model['title'] = component_settings_fields_shown_title_model
+
+ component_settings_aggregation_model = {} # ComponentSettingsAggregation
+ component_settings_aggregation_model['name'] = 'testString'
+ component_settings_aggregation_model['label'] = 'testString'
+ component_settings_aggregation_model['multiple_selections_allowed'] = True
+ component_settings_aggregation_model['visualization_type'] = 'auto'
+
+ # Construct a json representation of a ComponentSettingsResponse model
+ component_settings_response_model_json = {}
+ component_settings_response_model_json['fields_shown'] = component_settings_fields_shown_model
+ component_settings_response_model_json['autocomplete'] = True
+ component_settings_response_model_json['structured_search'] = True
+ component_settings_response_model_json['results_per_page'] = 38
+ component_settings_response_model_json['aggregations'] = [component_settings_aggregation_model]
+
+ # Construct a model instance of ComponentSettingsResponse by calling from_dict on the json representation
+ component_settings_response_model = ComponentSettingsResponse.from_dict(component_settings_response_model_json)
+ assert component_settings_response_model != False
+
+ # Construct a model instance of ComponentSettingsResponse by calling from_dict on the json representation
+ component_settings_response_model_dict = ComponentSettingsResponse.from_dict(component_settings_response_model_json).__dict__
+ component_settings_response_model2 = ComponentSettingsResponse(**component_settings_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert component_settings_response_model == component_settings_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ component_settings_response_model_json2 = component_settings_response_model.to_dict()
+ assert component_settings_response_model_json2 == component_settings_response_model_json
+
+
+class TestModel_CreateDocumentClassifier:
+ """
+ Test Class for CreateDocumentClassifier
+ """
+
+ def test_create_document_classifier_serialization(self):
+ """
+ Test serialization/deserialization for CreateDocumentClassifier
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ document_classifier_enrichment_model = {} # DocumentClassifierEnrichment
+ document_classifier_enrichment_model['enrichment_id'] = 'testString'
+ document_classifier_enrichment_model['fields'] = ['testString']
+
+ classifier_federated_model_model = {} # ClassifierFederatedModel
+ classifier_federated_model_model['field'] = 'testString'
+
+ # Construct a json representation of a CreateDocumentClassifier model
+ create_document_classifier_model_json = {}
+ create_document_classifier_model_json['name'] = 'testString'
+ create_document_classifier_model_json['description'] = 'testString'
+ create_document_classifier_model_json['language'] = 'en'
+ create_document_classifier_model_json['answer_field'] = 'testString'
+ create_document_classifier_model_json['enrichments'] = [document_classifier_enrichment_model]
+ create_document_classifier_model_json['federated_classification'] = classifier_federated_model_model
+
+ # Construct a model instance of CreateDocumentClassifier by calling from_dict on the json representation
+ create_document_classifier_model = CreateDocumentClassifier.from_dict(create_document_classifier_model_json)
+ assert create_document_classifier_model != False
+
+ # Construct a model instance of CreateDocumentClassifier by calling from_dict on the json representation
+ create_document_classifier_model_dict = CreateDocumentClassifier.from_dict(create_document_classifier_model_json).__dict__
+ create_document_classifier_model2 = CreateDocumentClassifier(**create_document_classifier_model_dict)
+
+ # Verify the model instances are equivalent
+ assert create_document_classifier_model == create_document_classifier_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ create_document_classifier_model_json2 = create_document_classifier_model.to_dict()
+ assert create_document_classifier_model_json2 == create_document_classifier_model_json
+
+
+class TestModel_CreateEnrichment:
+ """
+ Test Class for CreateEnrichment
+ """
+
+ def test_create_enrichment_serialization(self):
+ """
+ Test serialization/deserialization for CreateEnrichment
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ webhook_header_model = {} # WebhookHeader
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ enrichment_options_model = {} # EnrichmentOptions
+ enrichment_options_model['languages'] = ['testString']
+ enrichment_options_model['entity_type'] = 'testString'
+ enrichment_options_model['regular_expression'] = 'testString'
+ enrichment_options_model['result_field'] = 'testString'
+ enrichment_options_model['classifier_id'] = 'testString'
+ enrichment_options_model['model_id'] = 'testString'
+ enrichment_options_model['confidence_threshold'] = 0
+ enrichment_options_model['top_k'] = 0
+ enrichment_options_model['url'] = 'testString'
+ enrichment_options_model['version'] = '2023-03-31'
+ enrichment_options_model['secret'] = 'testString'
+ enrichment_options_model['headers'] = webhook_header_model
+ enrichment_options_model['location_encoding'] = '`utf-16`'
+
+ # Construct a json representation of a CreateEnrichment model
+ create_enrichment_model_json = {}
+ create_enrichment_model_json['name'] = 'testString'
+ create_enrichment_model_json['description'] = 'testString'
+ create_enrichment_model_json['type'] = 'classifier'
+ create_enrichment_model_json['options'] = enrichment_options_model
+
+ # Construct a model instance of CreateEnrichment by calling from_dict on the json representation
+ create_enrichment_model = CreateEnrichment.from_dict(create_enrichment_model_json)
+ assert create_enrichment_model != False
+
+ # Construct a model instance of CreateEnrichment by calling from_dict on the json representation
+ create_enrichment_model_dict = CreateEnrichment.from_dict(create_enrichment_model_json).__dict__
+ create_enrichment_model2 = CreateEnrichment(**create_enrichment_model_dict)
+
+ # Verify the model instances are equivalent
+ assert create_enrichment_model == create_enrichment_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ create_enrichment_model_json2 = create_enrichment_model.to_dict()
+ assert create_enrichment_model_json2 == create_enrichment_model_json
+
+
+class TestModel_DefaultQueryParams:
+ """
+ Test Class for DefaultQueryParams
+ """
+
+ def test_default_query_params_serialization(self):
+ """
+ Test serialization/deserialization for DefaultQueryParams
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ default_query_params_passages_model = {} # DefaultQueryParamsPassages
+ default_query_params_passages_model['enabled'] = True
+ default_query_params_passages_model['count'] = 38
+ default_query_params_passages_model['fields'] = ['testString']
+ default_query_params_passages_model['characters'] = 38
+ default_query_params_passages_model['per_document'] = True
+ default_query_params_passages_model['max_per_document'] = 38
+
+ default_query_params_table_results_model = {} # DefaultQueryParamsTableResults
+ default_query_params_table_results_model['enabled'] = True
+ default_query_params_table_results_model['count'] = 38
+ default_query_params_table_results_model['per_document'] = 0
+
+ default_query_params_suggested_refinements_model = {} # DefaultQueryParamsSuggestedRefinements
+ default_query_params_suggested_refinements_model['enabled'] = True
+ default_query_params_suggested_refinements_model['count'] = 38
+
+ # Construct a json representation of a DefaultQueryParams model
+ default_query_params_model_json = {}
+ default_query_params_model_json['collection_ids'] = ['testString']
+ default_query_params_model_json['passages'] = default_query_params_passages_model
+ default_query_params_model_json['table_results'] = default_query_params_table_results_model
+ default_query_params_model_json['aggregation'] = 'testString'
+ default_query_params_model_json['suggested_refinements'] = default_query_params_suggested_refinements_model
+ default_query_params_model_json['spelling_suggestions'] = True
+ default_query_params_model_json['highlight'] = True
+ default_query_params_model_json['count'] = 38
+ default_query_params_model_json['sort'] = 'testString'
+ default_query_params_model_json['return'] = ['testString']
+
+ # Construct a model instance of DefaultQueryParams by calling from_dict on the json representation
+ default_query_params_model = DefaultQueryParams.from_dict(default_query_params_model_json)
+ assert default_query_params_model != False
+
+ # Construct a model instance of DefaultQueryParams by calling from_dict on the json representation
+ default_query_params_model_dict = DefaultQueryParams.from_dict(default_query_params_model_json).__dict__
+ default_query_params_model2 = DefaultQueryParams(**default_query_params_model_dict)
+
+ # Verify the model instances are equivalent
+ assert default_query_params_model == default_query_params_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ default_query_params_model_json2 = default_query_params_model.to_dict()
+ assert default_query_params_model_json2 == default_query_params_model_json
+
+
+class TestModel_DefaultQueryParamsPassages:
+ """
+ Test Class for DefaultQueryParamsPassages
+ """
+
+ def test_default_query_params_passages_serialization(self):
+ """
+ Test serialization/deserialization for DefaultQueryParamsPassages
+ """
+
+ # Construct a json representation of a DefaultQueryParamsPassages model
+ default_query_params_passages_model_json = {}
+ default_query_params_passages_model_json['enabled'] = True
+ default_query_params_passages_model_json['count'] = 38
+ default_query_params_passages_model_json['fields'] = ['testString']
+ default_query_params_passages_model_json['characters'] = 38
+ default_query_params_passages_model_json['per_document'] = True
+ default_query_params_passages_model_json['max_per_document'] = 38
+
+ # Construct a model instance of DefaultQueryParamsPassages by calling from_dict on the json representation
+ default_query_params_passages_model = DefaultQueryParamsPassages.from_dict(default_query_params_passages_model_json)
+ assert default_query_params_passages_model != False
+
+ # Construct a model instance of DefaultQueryParamsPassages by calling from_dict on the json representation
+ default_query_params_passages_model_dict = DefaultQueryParamsPassages.from_dict(default_query_params_passages_model_json).__dict__
+ default_query_params_passages_model2 = DefaultQueryParamsPassages(**default_query_params_passages_model_dict)
+
+ # Verify the model instances are equivalent
+ assert default_query_params_passages_model == default_query_params_passages_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ default_query_params_passages_model_json2 = default_query_params_passages_model.to_dict()
+ assert default_query_params_passages_model_json2 == default_query_params_passages_model_json
+
+
+class TestModel_DefaultQueryParamsSuggestedRefinements:
+ """
+ Test Class for DefaultQueryParamsSuggestedRefinements
+ """
+
+ def test_default_query_params_suggested_refinements_serialization(self):
+ """
+ Test serialization/deserialization for DefaultQueryParamsSuggestedRefinements
+ """
+
+ # Construct a json representation of a DefaultQueryParamsSuggestedRefinements model
+ default_query_params_suggested_refinements_model_json = {}
+ default_query_params_suggested_refinements_model_json['enabled'] = True
+ default_query_params_suggested_refinements_model_json['count'] = 38
+
+ # Construct a model instance of DefaultQueryParamsSuggestedRefinements by calling from_dict on the json representation
+ default_query_params_suggested_refinements_model = DefaultQueryParamsSuggestedRefinements.from_dict(default_query_params_suggested_refinements_model_json)
+ assert default_query_params_suggested_refinements_model != False
+
+ # Construct a model instance of DefaultQueryParamsSuggestedRefinements by calling from_dict on the json representation
+ default_query_params_suggested_refinements_model_dict = DefaultQueryParamsSuggestedRefinements.from_dict(default_query_params_suggested_refinements_model_json).__dict__
+ default_query_params_suggested_refinements_model2 = DefaultQueryParamsSuggestedRefinements(**default_query_params_suggested_refinements_model_dict)
+
+ # Verify the model instances are equivalent
+ assert default_query_params_suggested_refinements_model == default_query_params_suggested_refinements_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ default_query_params_suggested_refinements_model_json2 = default_query_params_suggested_refinements_model.to_dict()
+ assert default_query_params_suggested_refinements_model_json2 == default_query_params_suggested_refinements_model_json
+
+
+class TestModel_DefaultQueryParamsTableResults:
+ """
+ Test Class for DefaultQueryParamsTableResults
+ """
+
+ def test_default_query_params_table_results_serialization(self):
+ """
+ Test serialization/deserialization for DefaultQueryParamsTableResults
+ """
+
+ # Construct a json representation of a DefaultQueryParamsTableResults model
+ default_query_params_table_results_model_json = {}
+ default_query_params_table_results_model_json['enabled'] = True
+ default_query_params_table_results_model_json['count'] = 38
+ default_query_params_table_results_model_json['per_document'] = 0
+
+ # Construct a model instance of DefaultQueryParamsTableResults by calling from_dict on the json representation
+ default_query_params_table_results_model = DefaultQueryParamsTableResults.from_dict(default_query_params_table_results_model_json)
+ assert default_query_params_table_results_model != False
+
+ # Construct a model instance of DefaultQueryParamsTableResults by calling from_dict on the json representation
+ default_query_params_table_results_model_dict = DefaultQueryParamsTableResults.from_dict(default_query_params_table_results_model_json).__dict__
+ default_query_params_table_results_model2 = DefaultQueryParamsTableResults(**default_query_params_table_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert default_query_params_table_results_model == default_query_params_table_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ default_query_params_table_results_model_json2 = default_query_params_table_results_model.to_dict()
+ assert default_query_params_table_results_model_json2 == default_query_params_table_results_model_json
+
+
+class TestModel_DeleteDocumentResponse:
+ """
+ Test Class for DeleteDocumentResponse
+ """
+
+ def test_delete_document_response_serialization(self):
+ """
+ Test serialization/deserialization for DeleteDocumentResponse
+ """
+
+ # Construct a json representation of a DeleteDocumentResponse model
+ delete_document_response_model_json = {}
+ delete_document_response_model_json['document_id'] = 'testString'
+ delete_document_response_model_json['status'] = 'deleted'
+
+ # Construct a model instance of DeleteDocumentResponse by calling from_dict on the json representation
+ delete_document_response_model = DeleteDocumentResponse.from_dict(delete_document_response_model_json)
+ assert delete_document_response_model != False
+
+ # Construct a model instance of DeleteDocumentResponse by calling from_dict on the json representation
+ delete_document_response_model_dict = DeleteDocumentResponse.from_dict(delete_document_response_model_json).__dict__
+ delete_document_response_model2 = DeleteDocumentResponse(**delete_document_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert delete_document_response_model == delete_document_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ delete_document_response_model_json2 = delete_document_response_model.to_dict()
+ assert delete_document_response_model_json2 == delete_document_response_model_json
+
+
+class TestModel_DocumentAccepted:
+ """
+ Test Class for DocumentAccepted
+ """
+
+ def test_document_accepted_serialization(self):
+ """
+ Test serialization/deserialization for DocumentAccepted
+ """
+
+ # Construct a json representation of a DocumentAccepted model
+ document_accepted_model_json = {}
+ document_accepted_model_json['document_id'] = 'testString'
+ document_accepted_model_json['status'] = 'processing'
+
+ # Construct a model instance of DocumentAccepted by calling from_dict on the json representation
+ document_accepted_model = DocumentAccepted.from_dict(document_accepted_model_json)
+ assert document_accepted_model != False
+
+ # Construct a model instance of DocumentAccepted by calling from_dict on the json representation
+ document_accepted_model_dict = DocumentAccepted.from_dict(document_accepted_model_json).__dict__
+ document_accepted_model2 = DocumentAccepted(**document_accepted_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_accepted_model == document_accepted_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_accepted_model_json2 = document_accepted_model.to_dict()
+ assert document_accepted_model_json2 == document_accepted_model_json
+
+
+class TestModel_DocumentAttribute:
+ """
+ Test Class for DocumentAttribute
+ """
+
+ def test_document_attribute_serialization(self):
+ """
+ Test serialization/deserialization for DocumentAttribute
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ # Construct a json representation of a DocumentAttribute model
+ document_attribute_model_json = {}
+ document_attribute_model_json['type'] = 'testString'
+ document_attribute_model_json['text'] = 'testString'
+ document_attribute_model_json['location'] = table_element_location_model
+
+ # Construct a model instance of DocumentAttribute by calling from_dict on the json representation
+ document_attribute_model = DocumentAttribute.from_dict(document_attribute_model_json)
+ assert document_attribute_model != False
+
+ # Construct a model instance of DocumentAttribute by calling from_dict on the json representation
+ document_attribute_model_dict = DocumentAttribute.from_dict(document_attribute_model_json).__dict__
+ document_attribute_model2 = DocumentAttribute(**document_attribute_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_attribute_model == document_attribute_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_attribute_model_json2 = document_attribute_model.to_dict()
+ assert document_attribute_model_json2 == document_attribute_model_json
+
+
+class TestModel_DocumentClassifier:
+ """
+ Test Class for DocumentClassifier
+ """
+
+ def test_document_classifier_serialization(self):
+ """
+ Test serialization/deserialization for DocumentClassifier
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ document_classifier_enrichment_model = {} # DocumentClassifierEnrichment
+ document_classifier_enrichment_model['enrichment_id'] = 'testString'
+ document_classifier_enrichment_model['fields'] = ['testString']
+
+ classifier_federated_model_model = {} # ClassifierFederatedModel
+ classifier_federated_model_model['field'] = 'testString'
+
+ # Construct a json representation of a DocumentClassifier model
+ document_classifier_model_json = {}
+ document_classifier_model_json['name'] = 'testString'
+ document_classifier_model_json['description'] = 'testString'
+ document_classifier_model_json['language'] = 'en'
+ document_classifier_model_json['enrichments'] = [document_classifier_enrichment_model]
+ document_classifier_model_json['recognized_fields'] = ['testString']
+ document_classifier_model_json['answer_field'] = 'testString'
+ document_classifier_model_json['training_data_file'] = 'testString'
+ document_classifier_model_json['test_data_file'] = 'testString'
+ document_classifier_model_json['federated_classification'] = classifier_federated_model_model
+
+ # Construct a model instance of DocumentClassifier by calling from_dict on the json representation
+ document_classifier_model = DocumentClassifier.from_dict(document_classifier_model_json)
+ assert document_classifier_model != False
+
+ # Construct a model instance of DocumentClassifier by calling from_dict on the json representation
+ document_classifier_model_dict = DocumentClassifier.from_dict(document_classifier_model_json).__dict__
+ document_classifier_model2 = DocumentClassifier(**document_classifier_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_classifier_model == document_classifier_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_classifier_model_json2 = document_classifier_model.to_dict()
+ assert document_classifier_model_json2 == document_classifier_model_json
+
+
+class TestModel_DocumentClassifierEnrichment:
+ """
+ Test Class for DocumentClassifierEnrichment
+ """
+
+ def test_document_classifier_enrichment_serialization(self):
+ """
+ Test serialization/deserialization for DocumentClassifierEnrichment
+ """
+
+ # Construct a json representation of a DocumentClassifierEnrichment model
+ document_classifier_enrichment_model_json = {}
+ document_classifier_enrichment_model_json['enrichment_id'] = 'testString'
+ document_classifier_enrichment_model_json['fields'] = ['testString']
+
+ # Construct a model instance of DocumentClassifierEnrichment by calling from_dict on the json representation
+ document_classifier_enrichment_model = DocumentClassifierEnrichment.from_dict(document_classifier_enrichment_model_json)
+ assert document_classifier_enrichment_model != False
+
+ # Construct a model instance of DocumentClassifierEnrichment by calling from_dict on the json representation
+ document_classifier_enrichment_model_dict = DocumentClassifierEnrichment.from_dict(document_classifier_enrichment_model_json).__dict__
+ document_classifier_enrichment_model2 = DocumentClassifierEnrichment(**document_classifier_enrichment_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_classifier_enrichment_model == document_classifier_enrichment_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_classifier_enrichment_model_json2 = document_classifier_enrichment_model.to_dict()
+ assert document_classifier_enrichment_model_json2 == document_classifier_enrichment_model_json
+
+
+class TestModel_DocumentClassifierModel:
+ """
+ Test Class for DocumentClassifierModel
+ """
+
+ def test_document_classifier_model_serialization(self):
+ """
+ Test serialization/deserialization for DocumentClassifierModel
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ model_evaluation_micro_average_model = {} # ModelEvaluationMicroAverage
+ model_evaluation_micro_average_model['precision'] = 0
+ model_evaluation_micro_average_model['recall'] = 0
+ model_evaluation_micro_average_model['f1'] = 0
+
+ model_evaluation_macro_average_model = {} # ModelEvaluationMacroAverage
+ model_evaluation_macro_average_model['precision'] = 0
+ model_evaluation_macro_average_model['recall'] = 0
+ model_evaluation_macro_average_model['f1'] = 0
+
+ per_class_model_evaluation_model = {} # PerClassModelEvaluation
+ per_class_model_evaluation_model['name'] = 'testString'
+ per_class_model_evaluation_model['precision'] = 0
+ per_class_model_evaluation_model['recall'] = 0
+ per_class_model_evaluation_model['f1'] = 0
+
+ classifier_model_evaluation_model = {} # ClassifierModelEvaluation
+ classifier_model_evaluation_model['micro_average'] = model_evaluation_micro_average_model
+ classifier_model_evaluation_model['macro_average'] = model_evaluation_macro_average_model
+ classifier_model_evaluation_model['per_class'] = [per_class_model_evaluation_model]
+
+ # Construct a json representation of a DocumentClassifierModel model
+ document_classifier_model_model_json = {}
+ document_classifier_model_model_json['name'] = 'testString'
+ document_classifier_model_model_json['description'] = 'testString'
+ document_classifier_model_model_json['training_data_file'] = 'testString'
+ document_classifier_model_model_json['test_data_file'] = 'testString'
+ document_classifier_model_model_json['status'] = 'training'
+ document_classifier_model_model_json['evaluation'] = classifier_model_evaluation_model
+ document_classifier_model_model_json['enrichment_id'] = 'testString'
+
+ # Construct a model instance of DocumentClassifierModel by calling from_dict on the json representation
+ document_classifier_model_model = DocumentClassifierModel.from_dict(document_classifier_model_model_json)
+ assert document_classifier_model_model != False
+
+ # Construct a model instance of DocumentClassifierModel by calling from_dict on the json representation
+ document_classifier_model_model_dict = DocumentClassifierModel.from_dict(document_classifier_model_model_json).__dict__
+ document_classifier_model_model2 = DocumentClassifierModel(**document_classifier_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_classifier_model_model == document_classifier_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_classifier_model_model_json2 = document_classifier_model_model.to_dict()
+ assert document_classifier_model_model_json2 == document_classifier_model_model_json
+
+
+class TestModel_DocumentClassifierModels:
+ """
+ Test Class for DocumentClassifierModels
+ """
+
+ def test_document_classifier_models_serialization(self):
+ """
+ Test serialization/deserialization for DocumentClassifierModels
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ model_evaluation_micro_average_model = {} # ModelEvaluationMicroAverage
+ model_evaluation_micro_average_model['precision'] = 0
+ model_evaluation_micro_average_model['recall'] = 0
+ model_evaluation_micro_average_model['f1'] = 0
+
+ model_evaluation_macro_average_model = {} # ModelEvaluationMacroAverage
+ model_evaluation_macro_average_model['precision'] = 0
+ model_evaluation_macro_average_model['recall'] = 0
+ model_evaluation_macro_average_model['f1'] = 0
+
+ per_class_model_evaluation_model = {} # PerClassModelEvaluation
+ per_class_model_evaluation_model['name'] = 'testString'
+ per_class_model_evaluation_model['precision'] = 0
+ per_class_model_evaluation_model['recall'] = 0
+ per_class_model_evaluation_model['f1'] = 0
+
+ classifier_model_evaluation_model = {} # ClassifierModelEvaluation
+ classifier_model_evaluation_model['micro_average'] = model_evaluation_micro_average_model
+ classifier_model_evaluation_model['macro_average'] = model_evaluation_macro_average_model
+ classifier_model_evaluation_model['per_class'] = [per_class_model_evaluation_model]
+
+ document_classifier_model_model = {} # DocumentClassifierModel
+ document_classifier_model_model['name'] = 'testString'
+ document_classifier_model_model['description'] = 'testString'
+ document_classifier_model_model['training_data_file'] = 'testString'
+ document_classifier_model_model['test_data_file'] = 'testString'
+ document_classifier_model_model['status'] = 'training'
+ document_classifier_model_model['evaluation'] = classifier_model_evaluation_model
+ document_classifier_model_model['enrichment_id'] = 'testString'
+
+ # Construct a json representation of a DocumentClassifierModels model
+ document_classifier_models_model_json = {}
+ document_classifier_models_model_json['models'] = [document_classifier_model_model]
+
+ # Construct a model instance of DocumentClassifierModels by calling from_dict on the json representation
+ document_classifier_models_model = DocumentClassifierModels.from_dict(document_classifier_models_model_json)
+ assert document_classifier_models_model != False
+
+ # Construct a model instance of DocumentClassifierModels by calling from_dict on the json representation
+ document_classifier_models_model_dict = DocumentClassifierModels.from_dict(document_classifier_models_model_json).__dict__
+ document_classifier_models_model2 = DocumentClassifierModels(**document_classifier_models_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_classifier_models_model == document_classifier_models_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_classifier_models_model_json2 = document_classifier_models_model.to_dict()
+ assert document_classifier_models_model_json2 == document_classifier_models_model_json
+
+
+class TestModel_DocumentClassifiers:
+ """
+ Test Class for DocumentClassifiers
+ """
+
+ def test_document_classifiers_serialization(self):
+ """
+ Test serialization/deserialization for DocumentClassifiers
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ document_classifier_enrichment_model = {} # DocumentClassifierEnrichment
+ document_classifier_enrichment_model['enrichment_id'] = 'testString'
+ document_classifier_enrichment_model['fields'] = ['testString']
+
+ classifier_federated_model_model = {} # ClassifierFederatedModel
+ classifier_federated_model_model['field'] = 'testString'
+
+ document_classifier_model = {} # DocumentClassifier
+ document_classifier_model['name'] = 'testString'
+ document_classifier_model['description'] = 'testString'
+ document_classifier_model['language'] = 'en'
+ document_classifier_model['enrichments'] = [document_classifier_enrichment_model]
+ document_classifier_model['recognized_fields'] = ['testString']
+ document_classifier_model['answer_field'] = 'testString'
+ document_classifier_model['training_data_file'] = 'testString'
+ document_classifier_model['test_data_file'] = 'testString'
+ document_classifier_model['federated_classification'] = classifier_federated_model_model
+
+ # Construct a json representation of a DocumentClassifiers model
+ document_classifiers_model_json = {}
+ document_classifiers_model_json['classifiers'] = [document_classifier_model]
+
+ # Construct a model instance of DocumentClassifiers by calling from_dict on the json representation
+ document_classifiers_model = DocumentClassifiers.from_dict(document_classifiers_model_json)
+ assert document_classifiers_model != False
+
+ # Construct a model instance of DocumentClassifiers by calling from_dict on the json representation
+ document_classifiers_model_dict = DocumentClassifiers.from_dict(document_classifiers_model_json).__dict__
+ document_classifiers_model2 = DocumentClassifiers(**document_classifiers_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_classifiers_model == document_classifiers_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_classifiers_model_json2 = document_classifiers_model.to_dict()
+ assert document_classifiers_model_json2 == document_classifiers_model_json
+
+
+class TestModel_DocumentDetails:
+ """
+ Test Class for DocumentDetails
+ """
+
+ def test_document_details_serialization(self):
+ """
+ Test serialization/deserialization for DocumentDetails
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ notice_model = {} # Notice
+
+ document_details_children_model = {} # DocumentDetailsChildren
+ document_details_children_model['have_notices'] = True
+ document_details_children_model['count'] = 38
+
+ # Construct a json representation of a DocumentDetails model
+ document_details_model_json = {}
+ document_details_model_json['status'] = 'available'
+ document_details_model_json['notices'] = [notice_model]
+ document_details_model_json['children'] = document_details_children_model
+ document_details_model_json['filename'] = 'testString'
+ document_details_model_json['file_type'] = 'testString'
+ document_details_model_json['sha256'] = 'testString'
+
+ # Construct a model instance of DocumentDetails by calling from_dict on the json representation
+ document_details_model = DocumentDetails.from_dict(document_details_model_json)
+ assert document_details_model != False
+
+ # Construct a model instance of DocumentDetails by calling from_dict on the json representation
+ document_details_model_dict = DocumentDetails.from_dict(document_details_model_json).__dict__
+ document_details_model2 = DocumentDetails(**document_details_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_details_model == document_details_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_details_model_json2 = document_details_model.to_dict()
+ assert document_details_model_json2 == document_details_model_json
+
+
+class TestModel_DocumentDetailsChildren:
+ """
+ Test Class for DocumentDetailsChildren
+ """
+
+ def test_document_details_children_serialization(self):
+ """
+ Test serialization/deserialization for DocumentDetailsChildren
+ """
+
+ # Construct a json representation of a DocumentDetailsChildren model
+ document_details_children_model_json = {}
+ document_details_children_model_json['have_notices'] = True
+ document_details_children_model_json['count'] = 38
+
+ # Construct a model instance of DocumentDetailsChildren by calling from_dict on the json representation
+ document_details_children_model = DocumentDetailsChildren.from_dict(document_details_children_model_json)
+ assert document_details_children_model != False
+
+ # Construct a model instance of DocumentDetailsChildren by calling from_dict on the json representation
+ document_details_children_model_dict = DocumentDetailsChildren.from_dict(document_details_children_model_json).__dict__
+ document_details_children_model2 = DocumentDetailsChildren(**document_details_children_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_details_children_model == document_details_children_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_details_children_model_json2 = document_details_children_model.to_dict()
+ assert document_details_children_model_json2 == document_details_children_model_json
+
+
+class TestModel_Enrichment:
+ """
+ Test Class for Enrichment
+ """
+
+ def test_enrichment_serialization(self):
+ """
+ Test serialization/deserialization for Enrichment
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ webhook_header_model = {} # WebhookHeader
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ enrichment_options_model = {} # EnrichmentOptions
+ enrichment_options_model['languages'] = ['testString']
+ enrichment_options_model['entity_type'] = 'testString'
+ enrichment_options_model['regular_expression'] = 'testString'
+ enrichment_options_model['result_field'] = 'testString'
+ enrichment_options_model['classifier_id'] = 'testString'
+ enrichment_options_model['model_id'] = 'testString'
+ enrichment_options_model['confidence_threshold'] = 0
+ enrichment_options_model['top_k'] = 0
+ enrichment_options_model['url'] = 'testString'
+ enrichment_options_model['version'] = '2023-03-31'
+ enrichment_options_model['secret'] = 'testString'
+ enrichment_options_model['headers'] = webhook_header_model
+ enrichment_options_model['location_encoding'] = '`utf-16`'
+
+ # Construct a json representation of a Enrichment model
+ enrichment_model_json = {}
+ enrichment_model_json['name'] = 'testString'
+ enrichment_model_json['description'] = 'testString'
+ enrichment_model_json['type'] = 'part_of_speech'
+ enrichment_model_json['options'] = enrichment_options_model
+
+ # Construct a model instance of Enrichment by calling from_dict on the json representation
+ enrichment_model = Enrichment.from_dict(enrichment_model_json)
+ assert enrichment_model != False
+
+ # Construct a model instance of Enrichment by calling from_dict on the json representation
+ enrichment_model_dict = Enrichment.from_dict(enrichment_model_json).__dict__
+ enrichment_model2 = Enrichment(**enrichment_model_dict)
+
+ # Verify the model instances are equivalent
+ assert enrichment_model == enrichment_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ enrichment_model_json2 = enrichment_model.to_dict()
+ assert enrichment_model_json2 == enrichment_model_json
+
+
+class TestModel_EnrichmentOptions:
+ """
+ Test Class for EnrichmentOptions
+ """
+
+ def test_enrichment_options_serialization(self):
+ """
+ Test serialization/deserialization for EnrichmentOptions
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ webhook_header_model = {} # WebhookHeader
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ # Construct a json representation of a EnrichmentOptions model
+ enrichment_options_model_json = {}
+ enrichment_options_model_json['languages'] = ['testString']
+ enrichment_options_model_json['entity_type'] = 'testString'
+ enrichment_options_model_json['regular_expression'] = 'testString'
+ enrichment_options_model_json['result_field'] = 'testString'
+ enrichment_options_model_json['classifier_id'] = 'testString'
+ enrichment_options_model_json['model_id'] = 'testString'
+ enrichment_options_model_json['confidence_threshold'] = 0
+ enrichment_options_model_json['top_k'] = 0
+ enrichment_options_model_json['url'] = 'testString'
+ enrichment_options_model_json['version'] = '2023-03-31'
+ enrichment_options_model_json['secret'] = 'testString'
+ enrichment_options_model_json['headers'] = webhook_header_model
+ enrichment_options_model_json['location_encoding'] = '`utf-16`'
+
+ # Construct a model instance of EnrichmentOptions by calling from_dict on the json representation
+ enrichment_options_model = EnrichmentOptions.from_dict(enrichment_options_model_json)
+ assert enrichment_options_model != False
+
+ # Construct a model instance of EnrichmentOptions by calling from_dict on the json representation
+ enrichment_options_model_dict = EnrichmentOptions.from_dict(enrichment_options_model_json).__dict__
+ enrichment_options_model2 = EnrichmentOptions(**enrichment_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert enrichment_options_model == enrichment_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ enrichment_options_model_json2 = enrichment_options_model.to_dict()
+ assert enrichment_options_model_json2 == enrichment_options_model_json
+
+
+class TestModel_Enrichments:
+ """
+ Test Class for Enrichments
+ """
+
+ def test_enrichments_serialization(self):
+ """
+ Test serialization/deserialization for Enrichments
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ webhook_header_model = {} # WebhookHeader
+ webhook_header_model['name'] = 'testString'
+ webhook_header_model['value'] = 'testString'
+
+ enrichment_options_model = {} # EnrichmentOptions
+ enrichment_options_model['languages'] = ['testString']
+ enrichment_options_model['entity_type'] = 'testString'
+ enrichment_options_model['regular_expression'] = 'testString'
+ enrichment_options_model['result_field'] = 'testString'
+ enrichment_options_model['classifier_id'] = 'testString'
+ enrichment_options_model['model_id'] = 'testString'
+ enrichment_options_model['confidence_threshold'] = 0
+ enrichment_options_model['top_k'] = 0
+ enrichment_options_model['url'] = 'testString'
+ enrichment_options_model['version'] = '2023-03-31'
+ enrichment_options_model['secret'] = 'testString'
+ enrichment_options_model['headers'] = webhook_header_model
+ enrichment_options_model['location_encoding'] = '`utf-16`'
+
+ enrichment_model = {} # Enrichment
+ enrichment_model['name'] = 'testString'
+ enrichment_model['description'] = 'testString'
+ enrichment_model['type'] = 'part_of_speech'
+ enrichment_model['options'] = enrichment_options_model
+
+ # Construct a json representation of a Enrichments model
+ enrichments_model_json = {}
+ enrichments_model_json['enrichments'] = [enrichment_model]
+
+ # Construct a model instance of Enrichments by calling from_dict on the json representation
+ enrichments_model = Enrichments.from_dict(enrichments_model_json)
+ assert enrichments_model != False
+
+ # Construct a model instance of Enrichments by calling from_dict on the json representation
+ enrichments_model_dict = Enrichments.from_dict(enrichments_model_json).__dict__
+ enrichments_model2 = Enrichments(**enrichments_model_dict)
+
+ # Verify the model instances are equivalent
+ assert enrichments_model == enrichments_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ enrichments_model_json2 = enrichments_model.to_dict()
+ assert enrichments_model_json2 == enrichments_model_json
+
+
+class TestModel_Expansion:
+ """
+ Test Class for Expansion
+ """
+
+ def test_expansion_serialization(self):
+ """
+ Test serialization/deserialization for Expansion
+ """
+
+ # Construct a json representation of a Expansion model
+ expansion_model_json = {}
+ expansion_model_json['input_terms'] = ['testString']
+ expansion_model_json['expanded_terms'] = ['testString']
+
+ # Construct a model instance of Expansion by calling from_dict on the json representation
+ expansion_model = Expansion.from_dict(expansion_model_json)
+ assert expansion_model != False
+
+ # Construct a model instance of Expansion by calling from_dict on the json representation
+ expansion_model_dict = Expansion.from_dict(expansion_model_json).__dict__
+ expansion_model2 = Expansion(**expansion_model_dict)
+
+ # Verify the model instances are equivalent
+ assert expansion_model == expansion_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ expansion_model_json2 = expansion_model.to_dict()
+ assert expansion_model_json2 == expansion_model_json
+
+
+class TestModel_Expansions:
+ """
+ Test Class for Expansions
+ """
+
+ def test_expansions_serialization(self):
+ """
+ Test serialization/deserialization for Expansions
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ expansion_model = {} # Expansion
+ expansion_model['input_terms'] = ['testString']
+ expansion_model['expanded_terms'] = ['testString']
+
+ # Construct a json representation of a Expansions model
+ expansions_model_json = {}
+ expansions_model_json['expansions'] = [expansion_model]
+
+ # Construct a model instance of Expansions by calling from_dict on the json representation
+ expansions_model = Expansions.from_dict(expansions_model_json)
+ assert expansions_model != False
+
+ # Construct a model instance of Expansions by calling from_dict on the json representation
+ expansions_model_dict = Expansions.from_dict(expansions_model_json).__dict__
+ expansions_model2 = Expansions(**expansions_model_dict)
+
+ # Verify the model instances are equivalent
+ assert expansions_model == expansions_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ expansions_model_json2 = expansions_model.to_dict()
+ assert expansions_model_json2 == expansions_model_json
+
+
+class TestModel_Field:
+ """
+ Test Class for Field
+ """
+
+ def test_field_serialization(self):
+ """
+ Test serialization/deserialization for Field
+ """
+
+ # Construct a json representation of a Field model
+ field_model_json = {}
+
+ # Construct a model instance of Field by calling from_dict on the json representation
+ field_model = Field.from_dict(field_model_json)
+ assert field_model != False
+
+ # Construct a model instance of Field by calling from_dict on the json representation
+ field_model_dict = Field.from_dict(field_model_json).__dict__
+ field_model2 = Field(**field_model_dict)
+
+ # Verify the model instances are equivalent
+ assert field_model == field_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ field_model_json2 = field_model.to_dict()
+ assert field_model_json2 == field_model_json
+
+
+class TestModel_ListBatchesResponse:
+ """
+ Test Class for ListBatchesResponse
+ """
+
+ def test_list_batches_response_serialization(self):
+ """
+ Test serialization/deserialization for ListBatchesResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ batch_details_model = {} # BatchDetails
+ batch_details_model['enrichment_id'] = 'fd290d8b-53e2-dba1-0000-018a8d150b85'
+
+ # Construct a json representation of a ListBatchesResponse model
+ list_batches_response_model_json = {}
+ list_batches_response_model_json['batches'] = [batch_details_model]
+
+ # Construct a model instance of ListBatchesResponse by calling from_dict on the json representation
+ list_batches_response_model = ListBatchesResponse.from_dict(list_batches_response_model_json)
+ assert list_batches_response_model != False
+
+ # Construct a model instance of ListBatchesResponse by calling from_dict on the json representation
+ list_batches_response_model_dict = ListBatchesResponse.from_dict(list_batches_response_model_json).__dict__
+ list_batches_response_model2 = ListBatchesResponse(**list_batches_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert list_batches_response_model == list_batches_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ list_batches_response_model_json2 = list_batches_response_model.to_dict()
+ assert list_batches_response_model_json2 == list_batches_response_model_json
+
+
+class TestModel_ListCollectionsResponse:
+ """
+ Test Class for ListCollectionsResponse
+ """
+
+ def test_list_collections_response_serialization(self):
+ """
+ Test serialization/deserialization for ListCollectionsResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ collection_model = {} # Collection
+ collection_model['name'] = 'example'
+
+ # Construct a json representation of a ListCollectionsResponse model
+ list_collections_response_model_json = {}
+ list_collections_response_model_json['collections'] = [collection_model]
+
+ # Construct a model instance of ListCollectionsResponse by calling from_dict on the json representation
+ list_collections_response_model = ListCollectionsResponse.from_dict(list_collections_response_model_json)
+ assert list_collections_response_model != False
+
+ # Construct a model instance of ListCollectionsResponse by calling from_dict on the json representation
+ list_collections_response_model_dict = ListCollectionsResponse.from_dict(list_collections_response_model_json).__dict__
+ list_collections_response_model2 = ListCollectionsResponse(**list_collections_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert list_collections_response_model == list_collections_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ list_collections_response_model_json2 = list_collections_response_model.to_dict()
+ assert list_collections_response_model_json2 == list_collections_response_model_json
+
+
+class TestModel_ListDocumentsResponse:
+ """
+ Test Class for ListDocumentsResponse
+ """
+
+ def test_list_documents_response_serialization(self):
+ """
+ Test serialization/deserialization for ListDocumentsResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ notice_model = {} # Notice
+
+ document_details_children_model = {} # DocumentDetailsChildren
+ document_details_children_model['have_notices'] = True
+ document_details_children_model['count'] = 38
+
+ document_details_model = {} # DocumentDetails
+ document_details_model['status'] = 'available'
+ document_details_model['notices'] = [notice_model]
+ document_details_model['children'] = document_details_children_model
+ document_details_model['filename'] = 'testString'
+ document_details_model['file_type'] = 'testString'
+ document_details_model['sha256'] = 'testString'
+
+ # Construct a json representation of a ListDocumentsResponse model
+ list_documents_response_model_json = {}
+ list_documents_response_model_json['matching_results'] = 38
+ list_documents_response_model_json['documents'] = [document_details_model]
+
+ # Construct a model instance of ListDocumentsResponse by calling from_dict on the json representation
+ list_documents_response_model = ListDocumentsResponse.from_dict(list_documents_response_model_json)
+ assert list_documents_response_model != False
+
+ # Construct a model instance of ListDocumentsResponse by calling from_dict on the json representation
+ list_documents_response_model_dict = ListDocumentsResponse.from_dict(list_documents_response_model_json).__dict__
+ list_documents_response_model2 = ListDocumentsResponse(**list_documents_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert list_documents_response_model == list_documents_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ list_documents_response_model_json2 = list_documents_response_model.to_dict()
+ assert list_documents_response_model_json2 == list_documents_response_model_json
+
+
+class TestModel_ListFieldsResponse:
+ """
+ Test Class for ListFieldsResponse
+ """
+
+ def test_list_fields_response_serialization(self):
+ """
+ Test serialization/deserialization for ListFieldsResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ field_model = {} # Field
+
+ # Construct a json representation of a ListFieldsResponse model
+ list_fields_response_model_json = {}
+ list_fields_response_model_json['fields'] = [field_model]
+
+ # Construct a model instance of ListFieldsResponse by calling from_dict on the json representation
+ list_fields_response_model = ListFieldsResponse.from_dict(list_fields_response_model_json)
+ assert list_fields_response_model != False
+
+ # Construct a model instance of ListFieldsResponse by calling from_dict on the json representation
+ list_fields_response_model_dict = ListFieldsResponse.from_dict(list_fields_response_model_json).__dict__
+ list_fields_response_model2 = ListFieldsResponse(**list_fields_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert list_fields_response_model == list_fields_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ list_fields_response_model_json2 = list_fields_response_model.to_dict()
+ assert list_fields_response_model_json2 == list_fields_response_model_json
+
+
+class TestModel_ListProjectsResponse:
+ """
+ Test Class for ListProjectsResponse
+ """
+
+ def test_list_projects_response_serialization(self):
+ """
+ Test serialization/deserialization for ListProjectsResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ project_list_details_model = {} # ProjectListDetails
+ project_list_details_model['name'] = 'testString'
+ project_list_details_model['type'] = 'intelligent_document_processing'
+
+ # Construct a json representation of a ListProjectsResponse model
+ list_projects_response_model_json = {}
+ list_projects_response_model_json['projects'] = [project_list_details_model]
+
+ # Construct a model instance of ListProjectsResponse by calling from_dict on the json representation
+ list_projects_response_model = ListProjectsResponse.from_dict(list_projects_response_model_json)
+ assert list_projects_response_model != False
+
+ # Construct a model instance of ListProjectsResponse by calling from_dict on the json representation
+ list_projects_response_model_dict = ListProjectsResponse.from_dict(list_projects_response_model_json).__dict__
+ list_projects_response_model2 = ListProjectsResponse(**list_projects_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert list_projects_response_model == list_projects_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ list_projects_response_model_json2 = list_projects_response_model.to_dict()
+ assert list_projects_response_model_json2 == list_projects_response_model_json
+
+
+class TestModel_ModelEvaluationMacroAverage:
+ """
+ Test Class for ModelEvaluationMacroAverage
+ """
+
+ def test_model_evaluation_macro_average_serialization(self):
+ """
+ Test serialization/deserialization for ModelEvaluationMacroAverage
+ """
+
+ # Construct a json representation of a ModelEvaluationMacroAverage model
+ model_evaluation_macro_average_model_json = {}
+ model_evaluation_macro_average_model_json['precision'] = 0
+ model_evaluation_macro_average_model_json['recall'] = 0
+ model_evaluation_macro_average_model_json['f1'] = 0
+
+ # Construct a model instance of ModelEvaluationMacroAverage by calling from_dict on the json representation
+ model_evaluation_macro_average_model = ModelEvaluationMacroAverage.from_dict(model_evaluation_macro_average_model_json)
+ assert model_evaluation_macro_average_model != False
+
+ # Construct a model instance of ModelEvaluationMacroAverage by calling from_dict on the json representation
+ model_evaluation_macro_average_model_dict = ModelEvaluationMacroAverage.from_dict(model_evaluation_macro_average_model_json).__dict__
+ model_evaluation_macro_average_model2 = ModelEvaluationMacroAverage(**model_evaluation_macro_average_model_dict)
+
+ # Verify the model instances are equivalent
+ assert model_evaluation_macro_average_model == model_evaluation_macro_average_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ model_evaluation_macro_average_model_json2 = model_evaluation_macro_average_model.to_dict()
+ assert model_evaluation_macro_average_model_json2 == model_evaluation_macro_average_model_json
+
+
+class TestModel_ModelEvaluationMicroAverage:
+ """
+ Test Class for ModelEvaluationMicroAverage
+ """
+
+ def test_model_evaluation_micro_average_serialization(self):
+ """
+ Test serialization/deserialization for ModelEvaluationMicroAverage
+ """
+
+ # Construct a json representation of a ModelEvaluationMicroAverage model
+ model_evaluation_micro_average_model_json = {}
+ model_evaluation_micro_average_model_json['precision'] = 0
+ model_evaluation_micro_average_model_json['recall'] = 0
+ model_evaluation_micro_average_model_json['f1'] = 0
+
+ # Construct a model instance of ModelEvaluationMicroAverage by calling from_dict on the json representation
+ model_evaluation_micro_average_model = ModelEvaluationMicroAverage.from_dict(model_evaluation_micro_average_model_json)
+ assert model_evaluation_micro_average_model != False
+
+ # Construct a model instance of ModelEvaluationMicroAverage by calling from_dict on the json representation
+ model_evaluation_micro_average_model_dict = ModelEvaluationMicroAverage.from_dict(model_evaluation_micro_average_model_json).__dict__
+ model_evaluation_micro_average_model2 = ModelEvaluationMicroAverage(**model_evaluation_micro_average_model_dict)
+
+ # Verify the model instances are equivalent
+ assert model_evaluation_micro_average_model == model_evaluation_micro_average_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ model_evaluation_micro_average_model_json2 = model_evaluation_micro_average_model.to_dict()
+ assert model_evaluation_micro_average_model_json2 == model_evaluation_micro_average_model_json
+
+
+class TestModel_Notice:
+ """
+ Test Class for Notice
+ """
+
+ def test_notice_serialization(self):
+ """
+ Test serialization/deserialization for Notice
+ """
+
+ # Construct a json representation of a Notice model
+ notice_model_json = {}
+
+ # Construct a model instance of Notice by calling from_dict on the json representation
+ notice_model = Notice.from_dict(notice_model_json)
+ assert notice_model != False
+
+ # Construct a model instance of Notice by calling from_dict on the json representation
+ notice_model_dict = Notice.from_dict(notice_model_json).__dict__
+ notice_model2 = Notice(**notice_model_dict)
+
+ # Verify the model instances are equivalent
+ assert notice_model == notice_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ notice_model_json2 = notice_model.to_dict()
+ assert notice_model_json2 == notice_model_json
+
+
+class TestModel_PerClassModelEvaluation:
+ """
+ Test Class for PerClassModelEvaluation
+ """
+
+ def test_per_class_model_evaluation_serialization(self):
+ """
+ Test serialization/deserialization for PerClassModelEvaluation
+ """
+
+ # Construct a json representation of a PerClassModelEvaluation model
+ per_class_model_evaluation_model_json = {}
+ per_class_model_evaluation_model_json['name'] = 'testString'
+ per_class_model_evaluation_model_json['precision'] = 0
+ per_class_model_evaluation_model_json['recall'] = 0
+ per_class_model_evaluation_model_json['f1'] = 0
+
+ # Construct a model instance of PerClassModelEvaluation by calling from_dict on the json representation
+ per_class_model_evaluation_model = PerClassModelEvaluation.from_dict(per_class_model_evaluation_model_json)
+ assert per_class_model_evaluation_model != False
+
+ # Construct a model instance of PerClassModelEvaluation by calling from_dict on the json representation
+ per_class_model_evaluation_model_dict = PerClassModelEvaluation.from_dict(per_class_model_evaluation_model_json).__dict__
+ per_class_model_evaluation_model2 = PerClassModelEvaluation(**per_class_model_evaluation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert per_class_model_evaluation_model == per_class_model_evaluation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ per_class_model_evaluation_model_json2 = per_class_model_evaluation_model.to_dict()
+ assert per_class_model_evaluation_model_json2 == per_class_model_evaluation_model_json
+
+
+class TestModel_ProjectDetails:
+ """
+ Test Class for ProjectDetails
+ """
+
+ def test_project_details_serialization(self):
+ """
+ Test serialization/deserialization for ProjectDetails
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ default_query_params_passages_model = {} # DefaultQueryParamsPassages
+ default_query_params_passages_model['enabled'] = True
+ default_query_params_passages_model['count'] = 38
+ default_query_params_passages_model['fields'] = ['testString']
+ default_query_params_passages_model['characters'] = 38
+ default_query_params_passages_model['per_document'] = True
+ default_query_params_passages_model['max_per_document'] = 38
+
+ default_query_params_table_results_model = {} # DefaultQueryParamsTableResults
+ default_query_params_table_results_model['enabled'] = True
+ default_query_params_table_results_model['count'] = 38
+ default_query_params_table_results_model['per_document'] = 0
+
+ default_query_params_suggested_refinements_model = {} # DefaultQueryParamsSuggestedRefinements
+ default_query_params_suggested_refinements_model['enabled'] = True
+ default_query_params_suggested_refinements_model['count'] = 38
+
+ default_query_params_model = {} # DefaultQueryParams
+ default_query_params_model['collection_ids'] = ['testString']
+ default_query_params_model['passages'] = default_query_params_passages_model
+ default_query_params_model['table_results'] = default_query_params_table_results_model
+ default_query_params_model['aggregation'] = 'testString'
+ default_query_params_model['suggested_refinements'] = default_query_params_suggested_refinements_model
+ default_query_params_model['spelling_suggestions'] = True
+ default_query_params_model['highlight'] = True
+ default_query_params_model['count'] = 38
+ default_query_params_model['sort'] = 'testString'
+ default_query_params_model['return'] = ['testString']
+
+ # Construct a json representation of a ProjectDetails model
+ project_details_model_json = {}
+ project_details_model_json['name'] = 'testString'
+ project_details_model_json['type'] = 'intelligent_document_processing'
+ project_details_model_json['default_query_parameters'] = default_query_params_model
+
+ # Construct a model instance of ProjectDetails by calling from_dict on the json representation
+ project_details_model = ProjectDetails.from_dict(project_details_model_json)
+ assert project_details_model != False
+
+ # Construct a model instance of ProjectDetails by calling from_dict on the json representation
+ project_details_model_dict = ProjectDetails.from_dict(project_details_model_json).__dict__
+ project_details_model2 = ProjectDetails(**project_details_model_dict)
+
+ # Verify the model instances are equivalent
+ assert project_details_model == project_details_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ project_details_model_json2 = project_details_model.to_dict()
+ assert project_details_model_json2 == project_details_model_json
+
+
+class TestModel_ProjectListDetails:
+ """
+ Test Class for ProjectListDetails
+ """
+
+ def test_project_list_details_serialization(self):
+ """
+ Test serialization/deserialization for ProjectListDetails
+ """
+
+ # Construct a json representation of a ProjectListDetails model
+ project_list_details_model_json = {}
+ project_list_details_model_json['name'] = 'testString'
+ project_list_details_model_json['type'] = 'intelligent_document_processing'
+
+ # Construct a model instance of ProjectListDetails by calling from_dict on the json representation
+ project_list_details_model = ProjectListDetails.from_dict(project_list_details_model_json)
+ assert project_list_details_model != False
+
+ # Construct a model instance of ProjectListDetails by calling from_dict on the json representation
+ project_list_details_model_dict = ProjectListDetails.from_dict(project_list_details_model_json).__dict__
+ project_list_details_model2 = ProjectListDetails(**project_list_details_model_dict)
+
+ # Verify the model instances are equivalent
+ assert project_list_details_model == project_list_details_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ project_list_details_model_json2 = project_list_details_model.to_dict()
+ assert project_list_details_model_json2 == project_list_details_model_json
+
+
+class TestModel_ProjectListDetailsRelevancyTrainingStatus:
+ """
+ Test Class for ProjectListDetailsRelevancyTrainingStatus
+ """
+
+ def test_project_list_details_relevancy_training_status_serialization(self):
+ """
+ Test serialization/deserialization for ProjectListDetailsRelevancyTrainingStatus
+ """
+
+ # Construct a json representation of a ProjectListDetailsRelevancyTrainingStatus model
+ project_list_details_relevancy_training_status_model_json = {}
+ project_list_details_relevancy_training_status_model_json['data_updated'] = 'testString'
+ project_list_details_relevancy_training_status_model_json['total_examples'] = 38
+ project_list_details_relevancy_training_status_model_json['sufficient_label_diversity'] = True
+ project_list_details_relevancy_training_status_model_json['processing'] = True
+ project_list_details_relevancy_training_status_model_json['minimum_examples_added'] = True
+ project_list_details_relevancy_training_status_model_json['successfully_trained'] = 'testString'
+ project_list_details_relevancy_training_status_model_json['available'] = True
+ project_list_details_relevancy_training_status_model_json['notices'] = 38
+ project_list_details_relevancy_training_status_model_json['minimum_queries_added'] = True
+
+ # Construct a model instance of ProjectListDetailsRelevancyTrainingStatus by calling from_dict on the json representation
+ project_list_details_relevancy_training_status_model = ProjectListDetailsRelevancyTrainingStatus.from_dict(project_list_details_relevancy_training_status_model_json)
+ assert project_list_details_relevancy_training_status_model != False
+
+ # Construct a model instance of ProjectListDetailsRelevancyTrainingStatus by calling from_dict on the json representation
+ project_list_details_relevancy_training_status_model_dict = ProjectListDetailsRelevancyTrainingStatus.from_dict(project_list_details_relevancy_training_status_model_json).__dict__
+ project_list_details_relevancy_training_status_model2 = ProjectListDetailsRelevancyTrainingStatus(**project_list_details_relevancy_training_status_model_dict)
+
+ # Verify the model instances are equivalent
+ assert project_list_details_relevancy_training_status_model == project_list_details_relevancy_training_status_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ project_list_details_relevancy_training_status_model_json2 = project_list_details_relevancy_training_status_model.to_dict()
+ assert project_list_details_relevancy_training_status_model_json2 == project_list_details_relevancy_training_status_model_json
+
+
+class TestModel_QueryGroupByAggregationResult:
+ """
+ Test Class for QueryGroupByAggregationResult
+ """
+
+ def test_query_group_by_aggregation_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryGroupByAggregationResult
+ """
+
+ # Construct a json representation of a QueryGroupByAggregationResult model
+ query_group_by_aggregation_result_model_json = {}
+ query_group_by_aggregation_result_model_json['key'] = 'testString'
+ query_group_by_aggregation_result_model_json['matching_results'] = 38
+ query_group_by_aggregation_result_model_json['relevancy'] = 72.5
+ query_group_by_aggregation_result_model_json['total_matching_documents'] = 38
+ query_group_by_aggregation_result_model_json['estimated_matching_results'] = 72.5
+ query_group_by_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryGroupByAggregationResult by calling from_dict on the json representation
+ query_group_by_aggregation_result_model = QueryGroupByAggregationResult.from_dict(query_group_by_aggregation_result_model_json)
+ assert query_group_by_aggregation_result_model != False
+
+ # Construct a model instance of QueryGroupByAggregationResult by calling from_dict on the json representation
+ query_group_by_aggregation_result_model_dict = QueryGroupByAggregationResult.from_dict(query_group_by_aggregation_result_model_json).__dict__
+ query_group_by_aggregation_result_model2 = QueryGroupByAggregationResult(**query_group_by_aggregation_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_group_by_aggregation_result_model == query_group_by_aggregation_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_group_by_aggregation_result_model_json2 = query_group_by_aggregation_result_model.to_dict()
+ assert query_group_by_aggregation_result_model_json2 == query_group_by_aggregation_result_model_json
+
+
+class TestModel_QueryHistogramAggregationResult:
+ """
+ Test Class for QueryHistogramAggregationResult
+ """
+
+ def test_query_histogram_aggregation_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryHistogramAggregationResult
+ """
+
+ # Construct a json representation of a QueryHistogramAggregationResult model
+ query_histogram_aggregation_result_model_json = {}
+ query_histogram_aggregation_result_model_json['key'] = 26
+ query_histogram_aggregation_result_model_json['matching_results'] = 38
+ query_histogram_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryHistogramAggregationResult by calling from_dict on the json representation
+ query_histogram_aggregation_result_model = QueryHistogramAggregationResult.from_dict(query_histogram_aggregation_result_model_json)
+ assert query_histogram_aggregation_result_model != False
+
+ # Construct a model instance of QueryHistogramAggregationResult by calling from_dict on the json representation
+ query_histogram_aggregation_result_model_dict = QueryHistogramAggregationResult.from_dict(query_histogram_aggregation_result_model_json).__dict__
+ query_histogram_aggregation_result_model2 = QueryHistogramAggregationResult(**query_histogram_aggregation_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_histogram_aggregation_result_model == query_histogram_aggregation_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_histogram_aggregation_result_model_json2 = query_histogram_aggregation_result_model.to_dict()
+ assert query_histogram_aggregation_result_model_json2 == query_histogram_aggregation_result_model_json
+
+
+class TestModel_QueryLargePassages:
+ """
+ Test Class for QueryLargePassages
+ """
+
+ def test_query_large_passages_serialization(self):
+ """
+ Test serialization/deserialization for QueryLargePassages
+ """
+
+ # Construct a json representation of a QueryLargePassages model
+ query_large_passages_model_json = {}
+ query_large_passages_model_json['enabled'] = True
+ query_large_passages_model_json['per_document'] = True
+ query_large_passages_model_json['max_per_document'] = 38
+ query_large_passages_model_json['fields'] = ['testString']
+ query_large_passages_model_json['count'] = 400
+ query_large_passages_model_json['characters'] = 50
+ query_large_passages_model_json['find_answers'] = False
+ query_large_passages_model_json['max_answers_per_passage'] = 1
+
+ # Construct a model instance of QueryLargePassages by calling from_dict on the json representation
+ query_large_passages_model = QueryLargePassages.from_dict(query_large_passages_model_json)
+ assert query_large_passages_model != False
+
+ # Construct a model instance of QueryLargePassages by calling from_dict on the json representation
+ query_large_passages_model_dict = QueryLargePassages.from_dict(query_large_passages_model_json).__dict__
+ query_large_passages_model2 = QueryLargePassages(**query_large_passages_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_large_passages_model == query_large_passages_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_large_passages_model_json2 = query_large_passages_model.to_dict()
+ assert query_large_passages_model_json2 == query_large_passages_model_json
+
+
+class TestModel_QueryLargeSimilar:
+ """
+ Test Class for QueryLargeSimilar
+ """
+
+ def test_query_large_similar_serialization(self):
+ """
+ Test serialization/deserialization for QueryLargeSimilar
+ """
+
+ # Construct a json representation of a QueryLargeSimilar model
+ query_large_similar_model_json = {}
+ query_large_similar_model_json['enabled'] = False
+ query_large_similar_model_json['document_ids'] = ['testString']
+ query_large_similar_model_json['fields'] = ['testString']
+
+ # Construct a model instance of QueryLargeSimilar by calling from_dict on the json representation
+ query_large_similar_model = QueryLargeSimilar.from_dict(query_large_similar_model_json)
+ assert query_large_similar_model != False
+
+ # Construct a model instance of QueryLargeSimilar by calling from_dict on the json representation
+ query_large_similar_model_dict = QueryLargeSimilar.from_dict(query_large_similar_model_json).__dict__
+ query_large_similar_model2 = QueryLargeSimilar(**query_large_similar_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_large_similar_model == query_large_similar_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_large_similar_model_json2 = query_large_similar_model.to_dict()
+ assert query_large_similar_model_json2 == query_large_similar_model_json
+
+
+class TestModel_QueryLargeSuggestedRefinements:
+ """
+ Test Class for QueryLargeSuggestedRefinements
+ """
+
+ def test_query_large_suggested_refinements_serialization(self):
+ """
+ Test serialization/deserialization for QueryLargeSuggestedRefinements
+ """
+
+ # Construct a json representation of a QueryLargeSuggestedRefinements model
+ query_large_suggested_refinements_model_json = {}
+ query_large_suggested_refinements_model_json['enabled'] = True
+ query_large_suggested_refinements_model_json['count'] = 1
+
+ # Construct a model instance of QueryLargeSuggestedRefinements by calling from_dict on the json representation
+ query_large_suggested_refinements_model = QueryLargeSuggestedRefinements.from_dict(query_large_suggested_refinements_model_json)
+ assert query_large_suggested_refinements_model != False
+
+ # Construct a model instance of QueryLargeSuggestedRefinements by calling from_dict on the json representation
+ query_large_suggested_refinements_model_dict = QueryLargeSuggestedRefinements.from_dict(query_large_suggested_refinements_model_json).__dict__
+ query_large_suggested_refinements_model2 = QueryLargeSuggestedRefinements(**query_large_suggested_refinements_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_large_suggested_refinements_model == query_large_suggested_refinements_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_large_suggested_refinements_model_json2 = query_large_suggested_refinements_model.to_dict()
+ assert query_large_suggested_refinements_model_json2 == query_large_suggested_refinements_model_json
+
+
+class TestModel_QueryLargeTableResults:
+ """
+ Test Class for QueryLargeTableResults
+ """
+
+ def test_query_large_table_results_serialization(self):
+ """
+ Test serialization/deserialization for QueryLargeTableResults
+ """
+
+ # Construct a json representation of a QueryLargeTableResults model
+ query_large_table_results_model_json = {}
+ query_large_table_results_model_json['enabled'] = True
+ query_large_table_results_model_json['count'] = 38
+
+ # Construct a model instance of QueryLargeTableResults by calling from_dict on the json representation
+ query_large_table_results_model = QueryLargeTableResults.from_dict(query_large_table_results_model_json)
+ assert query_large_table_results_model != False
+
+ # Construct a model instance of QueryLargeTableResults by calling from_dict on the json representation
+ query_large_table_results_model_dict = QueryLargeTableResults.from_dict(query_large_table_results_model_json).__dict__
+ query_large_table_results_model2 = QueryLargeTableResults(**query_large_table_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_large_table_results_model == query_large_table_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_large_table_results_model_json2 = query_large_table_results_model.to_dict()
+ assert query_large_table_results_model_json2 == query_large_table_results_model_json
+
+
+class TestModel_QueryNoticesResponse:
+ """
+ Test Class for QueryNoticesResponse
+ """
+
+ def test_query_notices_response_serialization(self):
+ """
+ Test serialization/deserialization for QueryNoticesResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ notice_model = {} # Notice
+
+ # Construct a json representation of a QueryNoticesResponse model
+ query_notices_response_model_json = {}
+ query_notices_response_model_json['matching_results'] = 38
+ query_notices_response_model_json['notices'] = [notice_model]
+
+ # Construct a model instance of QueryNoticesResponse by calling from_dict on the json representation
+ query_notices_response_model = QueryNoticesResponse.from_dict(query_notices_response_model_json)
+ assert query_notices_response_model != False
+
+ # Construct a model instance of QueryNoticesResponse by calling from_dict on the json representation
+ query_notices_response_model_dict = QueryNoticesResponse.from_dict(query_notices_response_model_json).__dict__
+ query_notices_response_model2 = QueryNoticesResponse(**query_notices_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_notices_response_model == query_notices_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_notices_response_model_json2 = query_notices_response_model.to_dict()
+ assert query_notices_response_model_json2 == query_notices_response_model_json
+
+
+class TestModel_QueryPairAggregationResult:
+ """
+ Test Class for QueryPairAggregationResult
+ """
+
+ def test_query_pair_aggregation_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryPairAggregationResult
+ """
+
+ # Construct a json representation of a QueryPairAggregationResult model
+ query_pair_aggregation_result_model_json = {}
+ query_pair_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryPairAggregationResult by calling from_dict on the json representation
+ query_pair_aggregation_result_model = QueryPairAggregationResult.from_dict(query_pair_aggregation_result_model_json)
+ assert query_pair_aggregation_result_model != False
+
+ # Construct a model instance of QueryPairAggregationResult by calling from_dict on the json representation
+ query_pair_aggregation_result_model_dict = QueryPairAggregationResult.from_dict(query_pair_aggregation_result_model_json).__dict__
+ query_pair_aggregation_result_model2 = QueryPairAggregationResult(**query_pair_aggregation_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_pair_aggregation_result_model == query_pair_aggregation_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_pair_aggregation_result_model_json2 = query_pair_aggregation_result_model.to_dict()
+ assert query_pair_aggregation_result_model_json2 == query_pair_aggregation_result_model_json
+
+
+class TestModel_QueryResponse:
+ """
+ Test Class for QueryResponse
+ """
+
+ def test_query_response_serialization(self):
+ """
+ Test serialization/deserialization for QueryResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_result_metadata_model = {} # QueryResultMetadata
+ query_result_metadata_model['document_retrieval_source'] = 'search'
+ query_result_metadata_model['collection_id'] = 'testString'
+ query_result_metadata_model['confidence'] = 0
+
+ result_passage_answer_model = {} # ResultPassageAnswer
+ result_passage_answer_model['answer_text'] = 'testString'
+ result_passage_answer_model['start_offset'] = 38
+ result_passage_answer_model['end_offset'] = 38
+ result_passage_answer_model['confidence'] = 0
+
+ query_result_passage_model = {} # QueryResultPassage
+ query_result_passage_model['passage_text'] = 'testString'
+ query_result_passage_model['start_offset'] = 38
+ query_result_passage_model['end_offset'] = 38
+ query_result_passage_model['field'] = 'testString'
+ query_result_passage_model['answers'] = [result_passage_answer_model]
+
+ query_result_model = {} # QueryResult
+ query_result_model['document_id'] = 'testString'
+ query_result_model['metadata'] = {'anyKey': 'anyValue'}
+ query_result_model['result_metadata'] = query_result_metadata_model
+ query_result_model['document_passages'] = [query_result_passage_model]
+ query_result_model['id'] = 'watson-generated ID'
+
+ query_term_aggregation_result_model = {} # QueryTermAggregationResult
+ query_term_aggregation_result_model['key'] = 'active'
+ query_term_aggregation_result_model['matching_results'] = 34
+ query_term_aggregation_result_model['relevancy'] = 72.5
+ query_term_aggregation_result_model['total_matching_documents'] = 38
+ query_term_aggregation_result_model['estimated_matching_results'] = 72.5
+ query_term_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ query_aggregation_model = {} # QueryAggregationQueryTermAggregation
+ query_aggregation_model['type'] = 'term'
+ query_aggregation_model['field'] = 'field'
+ query_aggregation_model['count'] = 1
+ query_aggregation_model['name'] = 'testString'
+ query_aggregation_model['results'] = [query_term_aggregation_result_model]
+
+ retrieval_details_model = {} # RetrievalDetails
+ retrieval_details_model['document_retrieval_strategy'] = 'untrained'
+
+ query_suggested_refinement_model = {} # QuerySuggestedRefinement
+ query_suggested_refinement_model['text'] = 'testString'
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ table_text_location_model = {} # TableTextLocation
+ table_text_location_model['text'] = 'testString'
+ table_text_location_model['location'] = table_element_location_model
+
+ table_headers_model = {} # TableHeaders
+ table_headers_model['cell_id'] = 'testString'
+ table_headers_model['location'] = table_element_location_model
+ table_headers_model['text'] = 'testString'
+ table_headers_model['row_index_begin'] = 26
+ table_headers_model['row_index_end'] = 26
+ table_headers_model['column_index_begin'] = 26
+ table_headers_model['column_index_end'] = 26
+
+ table_row_headers_model = {} # TableRowHeaders
+ table_row_headers_model['cell_id'] = 'testString'
+ table_row_headers_model['location'] = table_element_location_model
+ table_row_headers_model['text'] = 'testString'
+ table_row_headers_model['text_normalized'] = 'testString'
+ table_row_headers_model['row_index_begin'] = 26
+ table_row_headers_model['row_index_end'] = 26
+ table_row_headers_model['column_index_begin'] = 26
+ table_row_headers_model['column_index_end'] = 26
+
+ table_column_headers_model = {} # TableColumnHeaders
+ table_column_headers_model['cell_id'] = 'testString'
+ table_column_headers_model['location'] = table_element_location_model
+ table_column_headers_model['text'] = 'testString'
+ table_column_headers_model['text_normalized'] = 'testString'
+ table_column_headers_model['row_index_begin'] = 26
+ table_column_headers_model['row_index_end'] = 26
+ table_column_headers_model['column_index_begin'] = 26
+ table_column_headers_model['column_index_end'] = 26
+
+ table_cell_key_model = {} # TableCellKey
+ table_cell_key_model['cell_id'] = 'testString'
+ table_cell_key_model['location'] = table_element_location_model
+ table_cell_key_model['text'] = 'testString'
+
+ table_cell_values_model = {} # TableCellValues
+ table_cell_values_model['cell_id'] = 'testString'
+ table_cell_values_model['location'] = table_element_location_model
+ table_cell_values_model['text'] = 'testString'
+
+ table_key_value_pairs_model = {} # TableKeyValuePairs
+ table_key_value_pairs_model['key'] = table_cell_key_model
+ table_key_value_pairs_model['value'] = [table_cell_values_model]
+
+ document_attribute_model = {} # DocumentAttribute
+ document_attribute_model['type'] = 'testString'
+ document_attribute_model['text'] = 'testString'
+ document_attribute_model['location'] = table_element_location_model
+
+ table_body_cells_model = {} # TableBodyCells
+ table_body_cells_model['cell_id'] = 'testString'
+ table_body_cells_model['location'] = table_element_location_model
+ table_body_cells_model['text'] = 'testString'
+ table_body_cells_model['row_index_begin'] = 26
+ table_body_cells_model['row_index_end'] = 26
+ table_body_cells_model['column_index_begin'] = 26
+ table_body_cells_model['column_index_end'] = 26
+ table_body_cells_model['row_header_ids'] = ['testString']
+ table_body_cells_model['row_header_texts'] = ['testString']
+ table_body_cells_model['row_header_texts_normalized'] = ['testString']
+ table_body_cells_model['column_header_ids'] = ['testString']
+ table_body_cells_model['column_header_texts'] = ['testString']
+ table_body_cells_model['column_header_texts_normalized'] = ['testString']
+ table_body_cells_model['attributes'] = [document_attribute_model]
+
+ table_result_table_model = {} # TableResultTable
+ table_result_table_model['location'] = table_element_location_model
+ table_result_table_model['text'] = 'testString'
+ table_result_table_model['section_title'] = table_text_location_model
+ table_result_table_model['title'] = table_text_location_model
+ table_result_table_model['table_headers'] = [table_headers_model]
+ table_result_table_model['row_headers'] = [table_row_headers_model]
+ table_result_table_model['column_headers'] = [table_column_headers_model]
+ table_result_table_model['key_value_pairs'] = [table_key_value_pairs_model]
+ table_result_table_model['body_cells'] = [table_body_cells_model]
+ table_result_table_model['contexts'] = [table_text_location_model]
+
+ query_table_result_model = {} # QueryTableResult
+ query_table_result_model['table_id'] = 'testString'
+ query_table_result_model['source_document_id'] = 'testString'
+ query_table_result_model['collection_id'] = 'testString'
+ query_table_result_model['table_html'] = 'testString'
+ query_table_result_model['table_html_offset'] = 38
+ query_table_result_model['table'] = table_result_table_model
+
+ query_response_passage_model = {} # QueryResponsePassage
+ query_response_passage_model['passage_text'] = 'testString'
+ query_response_passage_model['passage_score'] = 72.5
+ query_response_passage_model['document_id'] = 'testString'
+ query_response_passage_model['collection_id'] = 'testString'
+ query_response_passage_model['start_offset'] = 38
+ query_response_passage_model['end_offset'] = 38
+ query_response_passage_model['field'] = 'testString'
+ query_response_passage_model['answers'] = [result_passage_answer_model]
+
+ # Construct a json representation of a QueryResponse model
+ query_response_model_json = {}
+ query_response_model_json['matching_results'] = 38
+ query_response_model_json['results'] = [query_result_model]
+ query_response_model_json['aggregations'] = [query_aggregation_model]
+ query_response_model_json['retrieval_details'] = retrieval_details_model
+ query_response_model_json['suggested_query'] = 'testString'
+ query_response_model_json['suggested_refinements'] = [query_suggested_refinement_model]
+ query_response_model_json['table_results'] = [query_table_result_model]
+ query_response_model_json['passages'] = [query_response_passage_model]
+
+ # Construct a model instance of QueryResponse by calling from_dict on the json representation
+ query_response_model = QueryResponse.from_dict(query_response_model_json)
+ assert query_response_model != False
+
+ # Construct a model instance of QueryResponse by calling from_dict on the json representation
+ query_response_model_dict = QueryResponse.from_dict(query_response_model_json).__dict__
+ query_response_model2 = QueryResponse(**query_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_response_model == query_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_response_model_json2 = query_response_model.to_dict()
+ assert query_response_model_json2 == query_response_model_json
+
+
+class TestModel_QueryResponsePassage:
+ """
+ Test Class for QueryResponsePassage
+ """
+
+ def test_query_response_passage_serialization(self):
+ """
+ Test serialization/deserialization for QueryResponsePassage
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ result_passage_answer_model = {} # ResultPassageAnswer
+ result_passage_answer_model['answer_text'] = 'testString'
+ result_passage_answer_model['start_offset'] = 38
+ result_passage_answer_model['end_offset'] = 38
+ result_passage_answer_model['confidence'] = 0
+
+ # Construct a json representation of a QueryResponsePassage model
+ query_response_passage_model_json = {}
+ query_response_passage_model_json['passage_text'] = 'testString'
+ query_response_passage_model_json['passage_score'] = 72.5
+ query_response_passage_model_json['document_id'] = 'testString'
+ query_response_passage_model_json['collection_id'] = 'testString'
+ query_response_passage_model_json['start_offset'] = 38
+ query_response_passage_model_json['end_offset'] = 38
+ query_response_passage_model_json['field'] = 'testString'
+ query_response_passage_model_json['answers'] = [result_passage_answer_model]
+
+ # Construct a model instance of QueryResponsePassage by calling from_dict on the json representation
+ query_response_passage_model = QueryResponsePassage.from_dict(query_response_passage_model_json)
+ assert query_response_passage_model != False
+
+ # Construct a model instance of QueryResponsePassage by calling from_dict on the json representation
+ query_response_passage_model_dict = QueryResponsePassage.from_dict(query_response_passage_model_json).__dict__
+ query_response_passage_model2 = QueryResponsePassage(**query_response_passage_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_response_passage_model == query_response_passage_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_response_passage_model_json2 = query_response_passage_model.to_dict()
+ assert query_response_passage_model_json2 == query_response_passage_model_json
+
+
+class TestModel_QueryResult:
+ """
+ Test Class for QueryResult
+ """
+
+ def test_query_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_result_metadata_model = {} # QueryResultMetadata
+ query_result_metadata_model['document_retrieval_source'] = 'search'
+ query_result_metadata_model['collection_id'] = 'testString'
+ query_result_metadata_model['confidence'] = 0
+
+ result_passage_answer_model = {} # ResultPassageAnswer
+ result_passage_answer_model['answer_text'] = 'testString'
+ result_passage_answer_model['start_offset'] = 38
+ result_passage_answer_model['end_offset'] = 38
+ result_passage_answer_model['confidence'] = 0
+
+ query_result_passage_model = {} # QueryResultPassage
+ query_result_passage_model['passage_text'] = 'testString'
+ query_result_passage_model['start_offset'] = 38
+ query_result_passage_model['end_offset'] = 38
+ query_result_passage_model['field'] = 'testString'
+ query_result_passage_model['answers'] = [result_passage_answer_model]
+
+ # Construct a json representation of a QueryResult model
+ query_result_model_json = {}
+ query_result_model_json['document_id'] = 'testString'
+ query_result_model_json['metadata'] = {'anyKey': 'anyValue'}
+ query_result_model_json['result_metadata'] = query_result_metadata_model
+ query_result_model_json['document_passages'] = [query_result_passage_model]
+ query_result_model_json['foo'] = 'testString'
+
+ # Construct a model instance of QueryResult by calling from_dict on the json representation
+ query_result_model = QueryResult.from_dict(query_result_model_json)
+ assert query_result_model != False
+
+ # Construct a model instance of QueryResult by calling from_dict on the json representation
+ query_result_model_dict = QueryResult.from_dict(query_result_model_json).__dict__
+ query_result_model2 = QueryResult(**query_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_result_model == query_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_result_model_json2 = query_result_model.to_dict()
+ assert query_result_model_json2 == query_result_model_json
+
+ # Test get_properties and set_properties methods.
+ query_result_model.set_properties({})
+ actual_dict = query_result_model.get_properties()
+ assert actual_dict == {}
+
+ expected_dict = {'foo': 'testString'}
+ query_result_model.set_properties(expected_dict)
+ actual_dict = query_result_model.get_properties()
+ assert actual_dict.keys() == expected_dict.keys()
+
+
+class TestModel_QueryResultMetadata:
+ """
+ Test Class for QueryResultMetadata
+ """
+
+ def test_query_result_metadata_serialization(self):
+ """
+ Test serialization/deserialization for QueryResultMetadata
+ """
+
+ # Construct a json representation of a QueryResultMetadata model
+ query_result_metadata_model_json = {}
+ query_result_metadata_model_json['document_retrieval_source'] = 'search'
+ query_result_metadata_model_json['collection_id'] = 'testString'
+ query_result_metadata_model_json['confidence'] = 0
+
+ # Construct a model instance of QueryResultMetadata by calling from_dict on the json representation
+ query_result_metadata_model = QueryResultMetadata.from_dict(query_result_metadata_model_json)
+ assert query_result_metadata_model != False
+
+ # Construct a model instance of QueryResultMetadata by calling from_dict on the json representation
+ query_result_metadata_model_dict = QueryResultMetadata.from_dict(query_result_metadata_model_json).__dict__
+ query_result_metadata_model2 = QueryResultMetadata(**query_result_metadata_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_result_metadata_model == query_result_metadata_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_result_metadata_model_json2 = query_result_metadata_model.to_dict()
+ assert query_result_metadata_model_json2 == query_result_metadata_model_json
+
+
+class TestModel_QueryResultPassage:
+ """
+ Test Class for QueryResultPassage
+ """
+
+ def test_query_result_passage_serialization(self):
+ """
+ Test serialization/deserialization for QueryResultPassage
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ result_passage_answer_model = {} # ResultPassageAnswer
+ result_passage_answer_model['answer_text'] = 'testString'
+ result_passage_answer_model['start_offset'] = 38
+ result_passage_answer_model['end_offset'] = 38
+ result_passage_answer_model['confidence'] = 0
+
+ # Construct a json representation of a QueryResultPassage model
+ query_result_passage_model_json = {}
+ query_result_passage_model_json['passage_text'] = 'testString'
+ query_result_passage_model_json['start_offset'] = 38
+ query_result_passage_model_json['end_offset'] = 38
+ query_result_passage_model_json['field'] = 'testString'
+ query_result_passage_model_json['answers'] = [result_passage_answer_model]
+
+ # Construct a model instance of QueryResultPassage by calling from_dict on the json representation
+ query_result_passage_model = QueryResultPassage.from_dict(query_result_passage_model_json)
+ assert query_result_passage_model != False
+
+ # Construct a model instance of QueryResultPassage by calling from_dict on the json representation
+ query_result_passage_model_dict = QueryResultPassage.from_dict(query_result_passage_model_json).__dict__
+ query_result_passage_model2 = QueryResultPassage(**query_result_passage_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_result_passage_model == query_result_passage_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_result_passage_model_json2 = query_result_passage_model.to_dict()
+ assert query_result_passage_model_json2 == query_result_passage_model_json
+
+
+class TestModel_QuerySuggestedRefinement:
+ """
+ Test Class for QuerySuggestedRefinement
+ """
+
+ def test_query_suggested_refinement_serialization(self):
+ """
+ Test serialization/deserialization for QuerySuggestedRefinement
+ """
+
+ # Construct a json representation of a QuerySuggestedRefinement model
+ query_suggested_refinement_model_json = {}
+ query_suggested_refinement_model_json['text'] = 'testString'
+
+ # Construct a model instance of QuerySuggestedRefinement by calling from_dict on the json representation
+ query_suggested_refinement_model = QuerySuggestedRefinement.from_dict(query_suggested_refinement_model_json)
+ assert query_suggested_refinement_model != False
+
+ # Construct a model instance of QuerySuggestedRefinement by calling from_dict on the json representation
+ query_suggested_refinement_model_dict = QuerySuggestedRefinement.from_dict(query_suggested_refinement_model_json).__dict__
+ query_suggested_refinement_model2 = QuerySuggestedRefinement(**query_suggested_refinement_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_suggested_refinement_model == query_suggested_refinement_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_suggested_refinement_model_json2 = query_suggested_refinement_model.to_dict()
+ assert query_suggested_refinement_model_json2 == query_suggested_refinement_model_json
+
+
+class TestModel_QueryTableResult:
+ """
+ Test Class for QueryTableResult
+ """
+
+ def test_query_table_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryTableResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ table_text_location_model = {} # TableTextLocation
+ table_text_location_model['text'] = 'testString'
+ table_text_location_model['location'] = table_element_location_model
+
+ table_headers_model = {} # TableHeaders
+ table_headers_model['cell_id'] = 'testString'
+ table_headers_model['location'] = table_element_location_model
+ table_headers_model['text'] = 'testString'
+ table_headers_model['row_index_begin'] = 26
+ table_headers_model['row_index_end'] = 26
+ table_headers_model['column_index_begin'] = 26
+ table_headers_model['column_index_end'] = 26
+
+ table_row_headers_model = {} # TableRowHeaders
+ table_row_headers_model['cell_id'] = 'testString'
+ table_row_headers_model['location'] = table_element_location_model
+ table_row_headers_model['text'] = 'testString'
+ table_row_headers_model['text_normalized'] = 'testString'
+ table_row_headers_model['row_index_begin'] = 26
+ table_row_headers_model['row_index_end'] = 26
+ table_row_headers_model['column_index_begin'] = 26
+ table_row_headers_model['column_index_end'] = 26
+
+ table_column_headers_model = {} # TableColumnHeaders
+ table_column_headers_model['cell_id'] = 'testString'
+ table_column_headers_model['location'] = table_element_location_model
+ table_column_headers_model['text'] = 'testString'
+ table_column_headers_model['text_normalized'] = 'testString'
+ table_column_headers_model['row_index_begin'] = 26
+ table_column_headers_model['row_index_end'] = 26
+ table_column_headers_model['column_index_begin'] = 26
+ table_column_headers_model['column_index_end'] = 26
+
+ table_cell_key_model = {} # TableCellKey
+ table_cell_key_model['cell_id'] = 'testString'
+ table_cell_key_model['location'] = table_element_location_model
+ table_cell_key_model['text'] = 'testString'
+
+ table_cell_values_model = {} # TableCellValues
+ table_cell_values_model['cell_id'] = 'testString'
+ table_cell_values_model['location'] = table_element_location_model
+ table_cell_values_model['text'] = 'testString'
+
+ table_key_value_pairs_model = {} # TableKeyValuePairs
+ table_key_value_pairs_model['key'] = table_cell_key_model
+ table_key_value_pairs_model['value'] = [table_cell_values_model]
+
+ document_attribute_model = {} # DocumentAttribute
+ document_attribute_model['type'] = 'testString'
+ document_attribute_model['text'] = 'testString'
+ document_attribute_model['location'] = table_element_location_model
+
+ table_body_cells_model = {} # TableBodyCells
+ table_body_cells_model['cell_id'] = 'testString'
+ table_body_cells_model['location'] = table_element_location_model
+ table_body_cells_model['text'] = 'testString'
+ table_body_cells_model['row_index_begin'] = 26
+ table_body_cells_model['row_index_end'] = 26
+ table_body_cells_model['column_index_begin'] = 26
+ table_body_cells_model['column_index_end'] = 26
+ table_body_cells_model['row_header_ids'] = ['testString']
+ table_body_cells_model['row_header_texts'] = ['testString']
+ table_body_cells_model['row_header_texts_normalized'] = ['testString']
+ table_body_cells_model['column_header_ids'] = ['testString']
+ table_body_cells_model['column_header_texts'] = ['testString']
+ table_body_cells_model['column_header_texts_normalized'] = ['testString']
+ table_body_cells_model['attributes'] = [document_attribute_model]
+
+ table_result_table_model = {} # TableResultTable
+ table_result_table_model['location'] = table_element_location_model
+ table_result_table_model['text'] = 'testString'
+ table_result_table_model['section_title'] = table_text_location_model
+ table_result_table_model['title'] = table_text_location_model
+ table_result_table_model['table_headers'] = [table_headers_model]
+ table_result_table_model['row_headers'] = [table_row_headers_model]
+ table_result_table_model['column_headers'] = [table_column_headers_model]
+ table_result_table_model['key_value_pairs'] = [table_key_value_pairs_model]
+ table_result_table_model['body_cells'] = [table_body_cells_model]
+ table_result_table_model['contexts'] = [table_text_location_model]
+
+ # Construct a json representation of a QueryTableResult model
+ query_table_result_model_json = {}
+ query_table_result_model_json['table_id'] = 'testString'
+ query_table_result_model_json['source_document_id'] = 'testString'
+ query_table_result_model_json['collection_id'] = 'testString'
+ query_table_result_model_json['table_html'] = 'testString'
+ query_table_result_model_json['table_html_offset'] = 38
+ query_table_result_model_json['table'] = table_result_table_model
+
+ # Construct a model instance of QueryTableResult by calling from_dict on the json representation
+ query_table_result_model = QueryTableResult.from_dict(query_table_result_model_json)
+ assert query_table_result_model != False
+
+ # Construct a model instance of QueryTableResult by calling from_dict on the json representation
+ query_table_result_model_dict = QueryTableResult.from_dict(query_table_result_model_json).__dict__
+ query_table_result_model2 = QueryTableResult(**query_table_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_table_result_model == query_table_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_table_result_model_json2 = query_table_result_model.to_dict()
+ assert query_table_result_model_json2 == query_table_result_model_json
+
+
+class TestModel_QueryTermAggregationResult:
+ """
+ Test Class for QueryTermAggregationResult
+ """
+
+ def test_query_term_aggregation_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryTermAggregationResult
+ """
+
+ # Construct a json representation of a QueryTermAggregationResult model
+ query_term_aggregation_result_model_json = {}
+ query_term_aggregation_result_model_json['key'] = 'testString'
+ query_term_aggregation_result_model_json['matching_results'] = 38
+ query_term_aggregation_result_model_json['relevancy'] = 72.5
+ query_term_aggregation_result_model_json['total_matching_documents'] = 38
+ query_term_aggregation_result_model_json['estimated_matching_results'] = 72.5
+ query_term_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryTermAggregationResult by calling from_dict on the json representation
+ query_term_aggregation_result_model = QueryTermAggregationResult.from_dict(query_term_aggregation_result_model_json)
+ assert query_term_aggregation_result_model != False
+
+ # Construct a model instance of QueryTermAggregationResult by calling from_dict on the json representation
+ query_term_aggregation_result_model_dict = QueryTermAggregationResult.from_dict(query_term_aggregation_result_model_json).__dict__
+ query_term_aggregation_result_model2 = QueryTermAggregationResult(**query_term_aggregation_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_term_aggregation_result_model == query_term_aggregation_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_term_aggregation_result_model_json2 = query_term_aggregation_result_model.to_dict()
+ assert query_term_aggregation_result_model_json2 == query_term_aggregation_result_model_json
+
+
+class TestModel_QueryTimesliceAggregationResult:
+ """
+ Test Class for QueryTimesliceAggregationResult
+ """
+
+ def test_query_timeslice_aggregation_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryTimesliceAggregationResult
+ """
+
+ # Construct a json representation of a QueryTimesliceAggregationResult model
+ query_timeslice_aggregation_result_model_json = {}
+ query_timeslice_aggregation_result_model_json['key_as_string'] = 'testString'
+ query_timeslice_aggregation_result_model_json['key'] = 26
+ query_timeslice_aggregation_result_model_json['matching_results'] = 26
+ query_timeslice_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryTimesliceAggregationResult by calling from_dict on the json representation
+ query_timeslice_aggregation_result_model = QueryTimesliceAggregationResult.from_dict(query_timeslice_aggregation_result_model_json)
+ assert query_timeslice_aggregation_result_model != False
+
+ # Construct a model instance of QueryTimesliceAggregationResult by calling from_dict on the json representation
+ query_timeslice_aggregation_result_model_dict = QueryTimesliceAggregationResult.from_dict(query_timeslice_aggregation_result_model_json).__dict__
+ query_timeslice_aggregation_result_model2 = QueryTimesliceAggregationResult(**query_timeslice_aggregation_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_timeslice_aggregation_result_model == query_timeslice_aggregation_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_timeslice_aggregation_result_model_json2 = query_timeslice_aggregation_result_model.to_dict()
+ assert query_timeslice_aggregation_result_model_json2 == query_timeslice_aggregation_result_model_json
+
+
+class TestModel_QueryTopHitsAggregationResult:
+ """
+ Test Class for QueryTopHitsAggregationResult
+ """
+
+ def test_query_top_hits_aggregation_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryTopHitsAggregationResult
+ """
+
+ # Construct a json representation of a QueryTopHitsAggregationResult model
+ query_top_hits_aggregation_result_model_json = {}
+ query_top_hits_aggregation_result_model_json['matching_results'] = 38
+ query_top_hits_aggregation_result_model_json['hits'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryTopHitsAggregationResult by calling from_dict on the json representation
+ query_top_hits_aggregation_result_model = QueryTopHitsAggregationResult.from_dict(query_top_hits_aggregation_result_model_json)
+ assert query_top_hits_aggregation_result_model != False
+
+ # Construct a model instance of QueryTopHitsAggregationResult by calling from_dict on the json representation
+ query_top_hits_aggregation_result_model_dict = QueryTopHitsAggregationResult.from_dict(query_top_hits_aggregation_result_model_json).__dict__
+ query_top_hits_aggregation_result_model2 = QueryTopHitsAggregationResult(**query_top_hits_aggregation_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_top_hits_aggregation_result_model == query_top_hits_aggregation_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_top_hits_aggregation_result_model_json2 = query_top_hits_aggregation_result_model.to_dict()
+ assert query_top_hits_aggregation_result_model_json2 == query_top_hits_aggregation_result_model_json
+
+
+class TestModel_QueryTopicAggregationResult:
+ """
+ Test Class for QueryTopicAggregationResult
+ """
+
+ def test_query_topic_aggregation_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryTopicAggregationResult
+ """
+
+ # Construct a json representation of a QueryTopicAggregationResult model
+ query_topic_aggregation_result_model_json = {}
+ query_topic_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryTopicAggregationResult by calling from_dict on the json representation
+ query_topic_aggregation_result_model = QueryTopicAggregationResult.from_dict(query_topic_aggregation_result_model_json)
+ assert query_topic_aggregation_result_model != False
+
+ # Construct a model instance of QueryTopicAggregationResult by calling from_dict on the json representation
+ query_topic_aggregation_result_model_dict = QueryTopicAggregationResult.from_dict(query_topic_aggregation_result_model_json).__dict__
+ query_topic_aggregation_result_model2 = QueryTopicAggregationResult(**query_topic_aggregation_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_topic_aggregation_result_model == query_topic_aggregation_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_topic_aggregation_result_model_json2 = query_topic_aggregation_result_model.to_dict()
+ assert query_topic_aggregation_result_model_json2 == query_topic_aggregation_result_model_json
+
+
+class TestModel_QueryTrendAggregationResult:
+ """
+ Test Class for QueryTrendAggregationResult
+ """
+
+ def test_query_trend_aggregation_result_serialization(self):
+ """
+ Test serialization/deserialization for QueryTrendAggregationResult
+ """
+
+ # Construct a json representation of a QueryTrendAggregationResult model
+ query_trend_aggregation_result_model_json = {}
+ query_trend_aggregation_result_model_json['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryTrendAggregationResult by calling from_dict on the json representation
+ query_trend_aggregation_result_model = QueryTrendAggregationResult.from_dict(query_trend_aggregation_result_model_json)
+ assert query_trend_aggregation_result_model != False
+
+ # Construct a model instance of QueryTrendAggregationResult by calling from_dict on the json representation
+ query_trend_aggregation_result_model_dict = QueryTrendAggregationResult.from_dict(query_trend_aggregation_result_model_json).__dict__
+ query_trend_aggregation_result_model2 = QueryTrendAggregationResult(**query_trend_aggregation_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_trend_aggregation_result_model == query_trend_aggregation_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_trend_aggregation_result_model_json2 = query_trend_aggregation_result_model.to_dict()
+ assert query_trend_aggregation_result_model_json2 == query_trend_aggregation_result_model_json
+
+
+class TestModel_ResultPassageAnswer:
+ """
+ Test Class for ResultPassageAnswer
+ """
+
+ def test_result_passage_answer_serialization(self):
+ """
+ Test serialization/deserialization for ResultPassageAnswer
+ """
+
+ # Construct a json representation of a ResultPassageAnswer model
+ result_passage_answer_model_json = {}
+ result_passage_answer_model_json['answer_text'] = 'testString'
+ result_passage_answer_model_json['start_offset'] = 38
+ result_passage_answer_model_json['end_offset'] = 38
+ result_passage_answer_model_json['confidence'] = 0
+
+ # Construct a model instance of ResultPassageAnswer by calling from_dict on the json representation
+ result_passage_answer_model = ResultPassageAnswer.from_dict(result_passage_answer_model_json)
+ assert result_passage_answer_model != False
+
+ # Construct a model instance of ResultPassageAnswer by calling from_dict on the json representation
+ result_passage_answer_model_dict = ResultPassageAnswer.from_dict(result_passage_answer_model_json).__dict__
+ result_passage_answer_model2 = ResultPassageAnswer(**result_passage_answer_model_dict)
+
+ # Verify the model instances are equivalent
+ assert result_passage_answer_model == result_passage_answer_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ result_passage_answer_model_json2 = result_passage_answer_model.to_dict()
+ assert result_passage_answer_model_json2 == result_passage_answer_model_json
+
+
+class TestModel_RetrievalDetails:
+ """
+ Test Class for RetrievalDetails
+ """
+
+ def test_retrieval_details_serialization(self):
+ """
+ Test serialization/deserialization for RetrievalDetails
+ """
+
+ # Construct a json representation of a RetrievalDetails model
+ retrieval_details_model_json = {}
+ retrieval_details_model_json['document_retrieval_strategy'] = 'untrained'
+
+ # Construct a model instance of RetrievalDetails by calling from_dict on the json representation
+ retrieval_details_model = RetrievalDetails.from_dict(retrieval_details_model_json)
+ assert retrieval_details_model != False
+
+ # Construct a model instance of RetrievalDetails by calling from_dict on the json representation
+ retrieval_details_model_dict = RetrievalDetails.from_dict(retrieval_details_model_json).__dict__
+ retrieval_details_model2 = RetrievalDetails(**retrieval_details_model_dict)
+
+ # Verify the model instances are equivalent
+ assert retrieval_details_model == retrieval_details_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ retrieval_details_model_json2 = retrieval_details_model.to_dict()
+ assert retrieval_details_model_json2 == retrieval_details_model_json
+
+
+class TestModel_StopWordList:
+ """
+ Test Class for StopWordList
+ """
+
+ def test_stop_word_list_serialization(self):
+ """
+ Test serialization/deserialization for StopWordList
+ """
+
+ # Construct a json representation of a StopWordList model
+ stop_word_list_model_json = {}
+ stop_word_list_model_json['stopwords'] = ['testString']
+
+ # Construct a model instance of StopWordList by calling from_dict on the json representation
+ stop_word_list_model = StopWordList.from_dict(stop_word_list_model_json)
+ assert stop_word_list_model != False
+
+ # Construct a model instance of StopWordList by calling from_dict on the json representation
+ stop_word_list_model_dict = StopWordList.from_dict(stop_word_list_model_json).__dict__
+ stop_word_list_model2 = StopWordList(**stop_word_list_model_dict)
+
+ # Verify the model instances are equivalent
+ assert stop_word_list_model == stop_word_list_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ stop_word_list_model_json2 = stop_word_list_model.to_dict()
+ assert stop_word_list_model_json2 == stop_word_list_model_json
+
+
+class TestModel_TableBodyCells:
+ """
+ Test Class for TableBodyCells
+ """
+
+ def test_table_body_cells_serialization(self):
+ """
+ Test serialization/deserialization for TableBodyCells
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ document_attribute_model = {} # DocumentAttribute
+ document_attribute_model['type'] = 'testString'
+ document_attribute_model['text'] = 'testString'
+ document_attribute_model['location'] = table_element_location_model
+
+ # Construct a json representation of a TableBodyCells model
+ table_body_cells_model_json = {}
+ table_body_cells_model_json['cell_id'] = 'testString'
+ table_body_cells_model_json['location'] = table_element_location_model
+ table_body_cells_model_json['text'] = 'testString'
+ table_body_cells_model_json['row_index_begin'] = 26
+ table_body_cells_model_json['row_index_end'] = 26
+ table_body_cells_model_json['column_index_begin'] = 26
+ table_body_cells_model_json['column_index_end'] = 26
+ table_body_cells_model_json['row_header_ids'] = ['testString']
+ table_body_cells_model_json['row_header_texts'] = ['testString']
+ table_body_cells_model_json['row_header_texts_normalized'] = ['testString']
+ table_body_cells_model_json['column_header_ids'] = ['testString']
+ table_body_cells_model_json['column_header_texts'] = ['testString']
+ table_body_cells_model_json['column_header_texts_normalized'] = ['testString']
+ table_body_cells_model_json['attributes'] = [document_attribute_model]
+
+ # Construct a model instance of TableBodyCells by calling from_dict on the json representation
+ table_body_cells_model = TableBodyCells.from_dict(table_body_cells_model_json)
+ assert table_body_cells_model != False
+
+ # Construct a model instance of TableBodyCells by calling from_dict on the json representation
+ table_body_cells_model_dict = TableBodyCells.from_dict(table_body_cells_model_json).__dict__
+ table_body_cells_model2 = TableBodyCells(**table_body_cells_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_body_cells_model == table_body_cells_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_body_cells_model_json2 = table_body_cells_model.to_dict()
+ assert table_body_cells_model_json2 == table_body_cells_model_json
+
+
+class TestModel_TableCellKey:
+ """
+ Test Class for TableCellKey
+ """
+
+ def test_table_cell_key_serialization(self):
+ """
+ Test serialization/deserialization for TableCellKey
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ # Construct a json representation of a TableCellKey model
+ table_cell_key_model_json = {}
+ table_cell_key_model_json['cell_id'] = 'testString'
+ table_cell_key_model_json['location'] = table_element_location_model
+ table_cell_key_model_json['text'] = 'testString'
+
+ # Construct a model instance of TableCellKey by calling from_dict on the json representation
+ table_cell_key_model = TableCellKey.from_dict(table_cell_key_model_json)
+ assert table_cell_key_model != False
+
+ # Construct a model instance of TableCellKey by calling from_dict on the json representation
+ table_cell_key_model_dict = TableCellKey.from_dict(table_cell_key_model_json).__dict__
+ table_cell_key_model2 = TableCellKey(**table_cell_key_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_cell_key_model == table_cell_key_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_cell_key_model_json2 = table_cell_key_model.to_dict()
+ assert table_cell_key_model_json2 == table_cell_key_model_json
+
+
+class TestModel_TableCellValues:
+ """
+ Test Class for TableCellValues
+ """
+
+ def test_table_cell_values_serialization(self):
+ """
+ Test serialization/deserialization for TableCellValues
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ # Construct a json representation of a TableCellValues model
+ table_cell_values_model_json = {}
+ table_cell_values_model_json['cell_id'] = 'testString'
+ table_cell_values_model_json['location'] = table_element_location_model
+ table_cell_values_model_json['text'] = 'testString'
+
+ # Construct a model instance of TableCellValues by calling from_dict on the json representation
+ table_cell_values_model = TableCellValues.from_dict(table_cell_values_model_json)
+ assert table_cell_values_model != False
+
+ # Construct a model instance of TableCellValues by calling from_dict on the json representation
+ table_cell_values_model_dict = TableCellValues.from_dict(table_cell_values_model_json).__dict__
+ table_cell_values_model2 = TableCellValues(**table_cell_values_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_cell_values_model == table_cell_values_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_cell_values_model_json2 = table_cell_values_model.to_dict()
+ assert table_cell_values_model_json2 == table_cell_values_model_json
+
+
+class TestModel_TableColumnHeaders:
+ """
+ Test Class for TableColumnHeaders
+ """
+
+ def test_table_column_headers_serialization(self):
+ """
+ Test serialization/deserialization for TableColumnHeaders
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ # Construct a json representation of a TableColumnHeaders model
+ table_column_headers_model_json = {}
+ table_column_headers_model_json['cell_id'] = 'testString'
+ table_column_headers_model_json['location'] = table_element_location_model
+ table_column_headers_model_json['text'] = 'testString'
+ table_column_headers_model_json['text_normalized'] = 'testString'
+ table_column_headers_model_json['row_index_begin'] = 26
+ table_column_headers_model_json['row_index_end'] = 26
+ table_column_headers_model_json['column_index_begin'] = 26
+ table_column_headers_model_json['column_index_end'] = 26
+
+ # Construct a model instance of TableColumnHeaders by calling from_dict on the json representation
+ table_column_headers_model = TableColumnHeaders.from_dict(table_column_headers_model_json)
+ assert table_column_headers_model != False
+
+ # Construct a model instance of TableColumnHeaders by calling from_dict on the json representation
+ table_column_headers_model_dict = TableColumnHeaders.from_dict(table_column_headers_model_json).__dict__
+ table_column_headers_model2 = TableColumnHeaders(**table_column_headers_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_column_headers_model == table_column_headers_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_column_headers_model_json2 = table_column_headers_model.to_dict()
+ assert table_column_headers_model_json2 == table_column_headers_model_json
+
+
+class TestModel_TableElementLocation:
+ """
+ Test Class for TableElementLocation
+ """
+
+ def test_table_element_location_serialization(self):
+ """
+ Test serialization/deserialization for TableElementLocation
+ """
+
+ # Construct a json representation of a TableElementLocation model
+ table_element_location_model_json = {}
+ table_element_location_model_json['begin'] = 26
+ table_element_location_model_json['end'] = 26
+
+ # Construct a model instance of TableElementLocation by calling from_dict on the json representation
+ table_element_location_model = TableElementLocation.from_dict(table_element_location_model_json)
+ assert table_element_location_model != False
+
+ # Construct a model instance of TableElementLocation by calling from_dict on the json representation
+ table_element_location_model_dict = TableElementLocation.from_dict(table_element_location_model_json).__dict__
+ table_element_location_model2 = TableElementLocation(**table_element_location_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_element_location_model == table_element_location_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_element_location_model_json2 = table_element_location_model.to_dict()
+ assert table_element_location_model_json2 == table_element_location_model_json
+
+
+class TestModel_TableHeaders:
+ """
+ Test Class for TableHeaders
+ """
+
+ def test_table_headers_serialization(self):
+ """
+ Test serialization/deserialization for TableHeaders
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ # Construct a json representation of a TableHeaders model
+ table_headers_model_json = {}
+ table_headers_model_json['cell_id'] = 'testString'
+ table_headers_model_json['location'] = table_element_location_model
+ table_headers_model_json['text'] = 'testString'
+ table_headers_model_json['row_index_begin'] = 26
+ table_headers_model_json['row_index_end'] = 26
+ table_headers_model_json['column_index_begin'] = 26
+ table_headers_model_json['column_index_end'] = 26
+
+ # Construct a model instance of TableHeaders by calling from_dict on the json representation
+ table_headers_model = TableHeaders.from_dict(table_headers_model_json)
+ assert table_headers_model != False
+
+ # Construct a model instance of TableHeaders by calling from_dict on the json representation
+ table_headers_model_dict = TableHeaders.from_dict(table_headers_model_json).__dict__
+ table_headers_model2 = TableHeaders(**table_headers_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_headers_model == table_headers_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_headers_model_json2 = table_headers_model.to_dict()
+ assert table_headers_model_json2 == table_headers_model_json
+
+
+class TestModel_TableKeyValuePairs:
+ """
+ Test Class for TableKeyValuePairs
+ """
+
+ def test_table_key_value_pairs_serialization(self):
+ """
+ Test serialization/deserialization for TableKeyValuePairs
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ table_cell_key_model = {} # TableCellKey
+ table_cell_key_model['cell_id'] = 'testString'
+ table_cell_key_model['location'] = table_element_location_model
+ table_cell_key_model['text'] = 'testString'
+
+ table_cell_values_model = {} # TableCellValues
+ table_cell_values_model['cell_id'] = 'testString'
+ table_cell_values_model['location'] = table_element_location_model
+ table_cell_values_model['text'] = 'testString'
+
+ # Construct a json representation of a TableKeyValuePairs model
+ table_key_value_pairs_model_json = {}
+ table_key_value_pairs_model_json['key'] = table_cell_key_model
+ table_key_value_pairs_model_json['value'] = [table_cell_values_model]
+
+ # Construct a model instance of TableKeyValuePairs by calling from_dict on the json representation
+ table_key_value_pairs_model = TableKeyValuePairs.from_dict(table_key_value_pairs_model_json)
+ assert table_key_value_pairs_model != False
+
+ # Construct a model instance of TableKeyValuePairs by calling from_dict on the json representation
+ table_key_value_pairs_model_dict = TableKeyValuePairs.from_dict(table_key_value_pairs_model_json).__dict__
+ table_key_value_pairs_model2 = TableKeyValuePairs(**table_key_value_pairs_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_key_value_pairs_model == table_key_value_pairs_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_key_value_pairs_model_json2 = table_key_value_pairs_model.to_dict()
+ assert table_key_value_pairs_model_json2 == table_key_value_pairs_model_json
+
+
+class TestModel_TableResultTable:
+ """
+ Test Class for TableResultTable
+ """
+
+ def test_table_result_table_serialization(self):
+ """
+ Test serialization/deserialization for TableResultTable
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ table_text_location_model = {} # TableTextLocation
+ table_text_location_model['text'] = 'testString'
+ table_text_location_model['location'] = table_element_location_model
+
+ table_headers_model = {} # TableHeaders
+ table_headers_model['cell_id'] = 'testString'
+ table_headers_model['location'] = table_element_location_model
+ table_headers_model['text'] = 'testString'
+ table_headers_model['row_index_begin'] = 26
+ table_headers_model['row_index_end'] = 26
+ table_headers_model['column_index_begin'] = 26
+ table_headers_model['column_index_end'] = 26
+
+ table_row_headers_model = {} # TableRowHeaders
+ table_row_headers_model['cell_id'] = 'testString'
+ table_row_headers_model['location'] = table_element_location_model
+ table_row_headers_model['text'] = 'testString'
+ table_row_headers_model['text_normalized'] = 'testString'
+ table_row_headers_model['row_index_begin'] = 26
+ table_row_headers_model['row_index_end'] = 26
+ table_row_headers_model['column_index_begin'] = 26
+ table_row_headers_model['column_index_end'] = 26
+
+ table_column_headers_model = {} # TableColumnHeaders
+ table_column_headers_model['cell_id'] = 'testString'
+ table_column_headers_model['location'] = table_element_location_model
+ table_column_headers_model['text'] = 'testString'
+ table_column_headers_model['text_normalized'] = 'testString'
+ table_column_headers_model['row_index_begin'] = 26
+ table_column_headers_model['row_index_end'] = 26
+ table_column_headers_model['column_index_begin'] = 26
+ table_column_headers_model['column_index_end'] = 26
+
+ table_cell_key_model = {} # TableCellKey
+ table_cell_key_model['cell_id'] = 'testString'
+ table_cell_key_model['location'] = table_element_location_model
+ table_cell_key_model['text'] = 'testString'
+
+ table_cell_values_model = {} # TableCellValues
+ table_cell_values_model['cell_id'] = 'testString'
+ table_cell_values_model['location'] = table_element_location_model
+ table_cell_values_model['text'] = 'testString'
+
+ table_key_value_pairs_model = {} # TableKeyValuePairs
+ table_key_value_pairs_model['key'] = table_cell_key_model
+ table_key_value_pairs_model['value'] = [table_cell_values_model]
+
+ document_attribute_model = {} # DocumentAttribute
+ document_attribute_model['type'] = 'testString'
+ document_attribute_model['text'] = 'testString'
+ document_attribute_model['location'] = table_element_location_model
+
+ table_body_cells_model = {} # TableBodyCells
+ table_body_cells_model['cell_id'] = 'testString'
+ table_body_cells_model['location'] = table_element_location_model
+ table_body_cells_model['text'] = 'testString'
+ table_body_cells_model['row_index_begin'] = 26
+ table_body_cells_model['row_index_end'] = 26
+ table_body_cells_model['column_index_begin'] = 26
+ table_body_cells_model['column_index_end'] = 26
+ table_body_cells_model['row_header_ids'] = ['testString']
+ table_body_cells_model['row_header_texts'] = ['testString']
+ table_body_cells_model['row_header_texts_normalized'] = ['testString']
+ table_body_cells_model['column_header_ids'] = ['testString']
+ table_body_cells_model['column_header_texts'] = ['testString']
+ table_body_cells_model['column_header_texts_normalized'] = ['testString']
+ table_body_cells_model['attributes'] = [document_attribute_model]
+
+ # Construct a json representation of a TableResultTable model
+ table_result_table_model_json = {}
+ table_result_table_model_json['location'] = table_element_location_model
+ table_result_table_model_json['text'] = 'testString'
+ table_result_table_model_json['section_title'] = table_text_location_model
+ table_result_table_model_json['title'] = table_text_location_model
+ table_result_table_model_json['table_headers'] = [table_headers_model]
+ table_result_table_model_json['row_headers'] = [table_row_headers_model]
+ table_result_table_model_json['column_headers'] = [table_column_headers_model]
+ table_result_table_model_json['key_value_pairs'] = [table_key_value_pairs_model]
+ table_result_table_model_json['body_cells'] = [table_body_cells_model]
+ table_result_table_model_json['contexts'] = [table_text_location_model]
+
+ # Construct a model instance of TableResultTable by calling from_dict on the json representation
+ table_result_table_model = TableResultTable.from_dict(table_result_table_model_json)
+ assert table_result_table_model != False
+
+ # Construct a model instance of TableResultTable by calling from_dict on the json representation
+ table_result_table_model_dict = TableResultTable.from_dict(table_result_table_model_json).__dict__
+ table_result_table_model2 = TableResultTable(**table_result_table_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_result_table_model == table_result_table_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_result_table_model_json2 = table_result_table_model.to_dict()
+ assert table_result_table_model_json2 == table_result_table_model_json
+
+
+class TestModel_TableRowHeaders:
+ """
+ Test Class for TableRowHeaders
+ """
+
+ def test_table_row_headers_serialization(self):
+ """
+ Test serialization/deserialization for TableRowHeaders
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ # Construct a json representation of a TableRowHeaders model
+ table_row_headers_model_json = {}
+ table_row_headers_model_json['cell_id'] = 'testString'
+ table_row_headers_model_json['location'] = table_element_location_model
+ table_row_headers_model_json['text'] = 'testString'
+ table_row_headers_model_json['text_normalized'] = 'testString'
+ table_row_headers_model_json['row_index_begin'] = 26
+ table_row_headers_model_json['row_index_end'] = 26
+ table_row_headers_model_json['column_index_begin'] = 26
+ table_row_headers_model_json['column_index_end'] = 26
+
+ # Construct a model instance of TableRowHeaders by calling from_dict on the json representation
+ table_row_headers_model = TableRowHeaders.from_dict(table_row_headers_model_json)
+ assert table_row_headers_model != False
+
+ # Construct a model instance of TableRowHeaders by calling from_dict on the json representation
+ table_row_headers_model_dict = TableRowHeaders.from_dict(table_row_headers_model_json).__dict__
+ table_row_headers_model2 = TableRowHeaders(**table_row_headers_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_row_headers_model == table_row_headers_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_row_headers_model_json2 = table_row_headers_model.to_dict()
+ assert table_row_headers_model_json2 == table_row_headers_model_json
+
+
+class TestModel_TableTextLocation:
+ """
+ Test Class for TableTextLocation
+ """
+
+ def test_table_text_location_serialization(self):
+ """
+ Test serialization/deserialization for TableTextLocation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ table_element_location_model = {} # TableElementLocation
+ table_element_location_model['begin'] = 26
+ table_element_location_model['end'] = 26
+
+ # Construct a json representation of a TableTextLocation model
+ table_text_location_model_json = {}
+ table_text_location_model_json['text'] = 'testString'
+ table_text_location_model_json['location'] = table_element_location_model
+
+ # Construct a model instance of TableTextLocation by calling from_dict on the json representation
+ table_text_location_model = TableTextLocation.from_dict(table_text_location_model_json)
+ assert table_text_location_model != False
+
+ # Construct a model instance of TableTextLocation by calling from_dict on the json representation
+ table_text_location_model_dict = TableTextLocation.from_dict(table_text_location_model_json).__dict__
+ table_text_location_model2 = TableTextLocation(**table_text_location_model_dict)
+
+ # Verify the model instances are equivalent
+ assert table_text_location_model == table_text_location_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ table_text_location_model_json2 = table_text_location_model.to_dict()
+ assert table_text_location_model_json2 == table_text_location_model_json
+
+
+class TestModel_TrainingExample:
+ """
+ Test Class for TrainingExample
+ """
+
+ def test_training_example_serialization(self):
+ """
+ Test serialization/deserialization for TrainingExample
+ """
+
+ # Construct a json representation of a TrainingExample model
+ training_example_model_json = {}
+ training_example_model_json['document_id'] = 'testString'
+ training_example_model_json['collection_id'] = 'testString'
+ training_example_model_json['relevance'] = 38
+
+ # Construct a model instance of TrainingExample by calling from_dict on the json representation
+ training_example_model = TrainingExample.from_dict(training_example_model_json)
+ assert training_example_model != False
+
+ # Construct a model instance of TrainingExample by calling from_dict on the json representation
+ training_example_model_dict = TrainingExample.from_dict(training_example_model_json).__dict__
+ training_example_model2 = TrainingExample(**training_example_model_dict)
+
+ # Verify the model instances are equivalent
+ assert training_example_model == training_example_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ training_example_model_json2 = training_example_model.to_dict()
+ assert training_example_model_json2 == training_example_model_json
+
+
+class TestModel_TrainingQuery:
+ """
+ Test Class for TrainingQuery
+ """
+
+ def test_training_query_serialization(self):
+ """
+ Test serialization/deserialization for TrainingQuery
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ training_example_model = {} # TrainingExample
+ training_example_model['document_id'] = 'testString'
+ training_example_model['collection_id'] = 'testString'
+ training_example_model['relevance'] = 38
+
+ # Construct a json representation of a TrainingQuery model
+ training_query_model_json = {}
+ training_query_model_json['natural_language_query'] = 'testString'
+ training_query_model_json['filter'] = 'testString'
+ training_query_model_json['examples'] = [training_example_model]
+
+ # Construct a model instance of TrainingQuery by calling from_dict on the json representation
+ training_query_model = TrainingQuery.from_dict(training_query_model_json)
+ assert training_query_model != False
+
+ # Construct a model instance of TrainingQuery by calling from_dict on the json representation
+ training_query_model_dict = TrainingQuery.from_dict(training_query_model_json).__dict__
+ training_query_model2 = TrainingQuery(**training_query_model_dict)
+
+ # Verify the model instances are equivalent
+ assert training_query_model == training_query_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ training_query_model_json2 = training_query_model.to_dict()
+ assert training_query_model_json2 == training_query_model_json
+
+
+class TestModel_TrainingQuerySet:
+ """
+ Test Class for TrainingQuerySet
+ """
+
+ def test_training_query_set_serialization(self):
+ """
+ Test serialization/deserialization for TrainingQuerySet
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ training_example_model = {} # TrainingExample
+ training_example_model['document_id'] = 'testString'
+ training_example_model['collection_id'] = 'testString'
+ training_example_model['relevance'] = 38
+
+ training_query_model = {} # TrainingQuery
+ training_query_model['natural_language_query'] = 'testString'
+ training_query_model['filter'] = 'testString'
+ training_query_model['examples'] = [training_example_model]
+
+ # Construct a json representation of a TrainingQuerySet model
+ training_query_set_model_json = {}
+ training_query_set_model_json['queries'] = [training_query_model]
+
+ # Construct a model instance of TrainingQuerySet by calling from_dict on the json representation
+ training_query_set_model = TrainingQuerySet.from_dict(training_query_set_model_json)
+ assert training_query_set_model != False
+
+ # Construct a model instance of TrainingQuerySet by calling from_dict on the json representation
+ training_query_set_model_dict = TrainingQuerySet.from_dict(training_query_set_model_json).__dict__
+ training_query_set_model2 = TrainingQuerySet(**training_query_set_model_dict)
+
+ # Verify the model instances are equivalent
+ assert training_query_set_model == training_query_set_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ training_query_set_model_json2 = training_query_set_model.to_dict()
+ assert training_query_set_model_json2 == training_query_set_model_json
+
+
+class TestModel_UpdateDocumentClassifier:
+ """
+ Test Class for UpdateDocumentClassifier
+ """
+
+ def test_update_document_classifier_serialization(self):
+ """
+ Test serialization/deserialization for UpdateDocumentClassifier
+ """
+
+ # Construct a json representation of a UpdateDocumentClassifier model
+ update_document_classifier_model_json = {}
+ update_document_classifier_model_json['name'] = 'testString'
+ update_document_classifier_model_json['description'] = 'testString'
+
+ # Construct a model instance of UpdateDocumentClassifier by calling from_dict on the json representation
+ update_document_classifier_model = UpdateDocumentClassifier.from_dict(update_document_classifier_model_json)
+ assert update_document_classifier_model != False
+
+ # Construct a model instance of UpdateDocumentClassifier by calling from_dict on the json representation
+ update_document_classifier_model_dict = UpdateDocumentClassifier.from_dict(update_document_classifier_model_json).__dict__
+ update_document_classifier_model2 = UpdateDocumentClassifier(**update_document_classifier_model_dict)
+
+ # Verify the model instances are equivalent
+ assert update_document_classifier_model == update_document_classifier_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ update_document_classifier_model_json2 = update_document_classifier_model.to_dict()
+ assert update_document_classifier_model_json2 == update_document_classifier_model_json
+
+
+class TestModel_WebhookHeader:
+ """
+ Test Class for WebhookHeader
+ """
+
+ def test_webhook_header_serialization(self):
+ """
+ Test serialization/deserialization for WebhookHeader
+ """
+
+ # Construct a json representation of a WebhookHeader model
+ webhook_header_model_json = {}
+ webhook_header_model_json['name'] = 'testString'
+ webhook_header_model_json['value'] = 'testString'
+
+ # Construct a model instance of WebhookHeader by calling from_dict on the json representation
+ webhook_header_model = WebhookHeader.from_dict(webhook_header_model_json)
+ assert webhook_header_model != False
+
+ # Construct a model instance of WebhookHeader by calling from_dict on the json representation
+ webhook_header_model_dict = WebhookHeader.from_dict(webhook_header_model_json).__dict__
+ webhook_header_model2 = WebhookHeader(**webhook_header_model_dict)
+
+ # Verify the model instances are equivalent
+ assert webhook_header_model == webhook_header_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ webhook_header_model_json2 = webhook_header_model.to_dict()
+ assert webhook_header_model_json2 == webhook_header_model_json
+
+
+class TestModel_PullBatchesResponse:
+ """
+ Test Class for PullBatchesResponse
+ """
+
+ def test_pull_batches_response_serialization(self):
+ """
+ Test serialization/deserialization for PullBatchesResponse
+ """
+
+ # Construct a json representation of a PullBatchesResponse model
+ pull_batches_response_model_json = {}
+ pull_batches_response_model_json['file'] = 'testString'
+
+ # Construct a model instance of PullBatchesResponse by calling from_dict on the json representation
+ pull_batches_response_model = PullBatchesResponse.from_dict(pull_batches_response_model_json)
+ assert pull_batches_response_model != False
+
+ # Construct a model instance of PullBatchesResponse by calling from_dict on the json representation
+ pull_batches_response_model_dict = PullBatchesResponse.from_dict(pull_batches_response_model_json).__dict__
+ pull_batches_response_model2 = PullBatchesResponse(**pull_batches_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert pull_batches_response_model == pull_batches_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ pull_batches_response_model_json2 = pull_batches_response_model.to_dict()
+ assert pull_batches_response_model_json2 == pull_batches_response_model_json
+
+
+class TestModel_QueryAggregationQueryCalculationAggregation:
+ """
+ Test Class for QueryAggregationQueryCalculationAggregation
+ """
+
+ def test_query_aggregation_query_calculation_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryCalculationAggregation
+ """
+
+ # Construct a json representation of a QueryAggregationQueryCalculationAggregation model
+ query_aggregation_query_calculation_aggregation_model_json = {}
+ query_aggregation_query_calculation_aggregation_model_json['type'] = 'unique_count'
+ query_aggregation_query_calculation_aggregation_model_json['field'] = 'testString'
+ query_aggregation_query_calculation_aggregation_model_json['value'] = 72.5
+
+ # Construct a model instance of QueryAggregationQueryCalculationAggregation by calling from_dict on the json representation
+ query_aggregation_query_calculation_aggregation_model = QueryAggregationQueryCalculationAggregation.from_dict(query_aggregation_query_calculation_aggregation_model_json)
+ assert query_aggregation_query_calculation_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryCalculationAggregation by calling from_dict on the json representation
+ query_aggregation_query_calculation_aggregation_model_dict = QueryAggregationQueryCalculationAggregation.from_dict(query_aggregation_query_calculation_aggregation_model_json).__dict__
+ query_aggregation_query_calculation_aggregation_model2 = QueryAggregationQueryCalculationAggregation(**query_aggregation_query_calculation_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_calculation_aggregation_model == query_aggregation_query_calculation_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_calculation_aggregation_model_json2 = query_aggregation_query_calculation_aggregation_model.to_dict()
+ assert query_aggregation_query_calculation_aggregation_model_json2 == query_aggregation_query_calculation_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryFilterAggregation:
+ """
+ Test Class for QueryAggregationQueryFilterAggregation
+ """
+
+ def test_query_aggregation_query_filter_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryFilterAggregation
+ """
+
+ # Construct a json representation of a QueryAggregationQueryFilterAggregation model
+ query_aggregation_query_filter_aggregation_model_json = {}
+ query_aggregation_query_filter_aggregation_model_json['type'] = 'filter'
+ query_aggregation_query_filter_aggregation_model_json['match'] = 'testString'
+ query_aggregation_query_filter_aggregation_model_json['matching_results'] = 26
+ query_aggregation_query_filter_aggregation_model_json['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryAggregationQueryFilterAggregation by calling from_dict on the json representation
+ query_aggregation_query_filter_aggregation_model = QueryAggregationQueryFilterAggregation.from_dict(query_aggregation_query_filter_aggregation_model_json)
+ assert query_aggregation_query_filter_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryFilterAggregation by calling from_dict on the json representation
+ query_aggregation_query_filter_aggregation_model_dict = QueryAggregationQueryFilterAggregation.from_dict(query_aggregation_query_filter_aggregation_model_json).__dict__
+ query_aggregation_query_filter_aggregation_model2 = QueryAggregationQueryFilterAggregation(**query_aggregation_query_filter_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_filter_aggregation_model == query_aggregation_query_filter_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_filter_aggregation_model_json2 = query_aggregation_query_filter_aggregation_model.to_dict()
+ assert query_aggregation_query_filter_aggregation_model_json2 == query_aggregation_query_filter_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryGroupByAggregation:
+ """
+ Test Class for QueryAggregationQueryGroupByAggregation
+ """
+
+ def test_query_aggregation_query_group_by_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryGroupByAggregation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_group_by_aggregation_result_model = {} # QueryGroupByAggregationResult
+ query_group_by_aggregation_result_model['key'] = 'testString'
+ query_group_by_aggregation_result_model['matching_results'] = 38
+ query_group_by_aggregation_result_model['relevancy'] = 72.5
+ query_group_by_aggregation_result_model['total_matching_documents'] = 38
+ query_group_by_aggregation_result_model['estimated_matching_results'] = 72.5
+ query_group_by_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a json representation of a QueryAggregationQueryGroupByAggregation model
+ query_aggregation_query_group_by_aggregation_model_json = {}
+ query_aggregation_query_group_by_aggregation_model_json['type'] = 'group_by'
+ query_aggregation_query_group_by_aggregation_model_json['results'] = [query_group_by_aggregation_result_model]
+
+ # Construct a model instance of QueryAggregationQueryGroupByAggregation by calling from_dict on the json representation
+ query_aggregation_query_group_by_aggregation_model = QueryAggregationQueryGroupByAggregation.from_dict(query_aggregation_query_group_by_aggregation_model_json)
+ assert query_aggregation_query_group_by_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryGroupByAggregation by calling from_dict on the json representation
+ query_aggregation_query_group_by_aggregation_model_dict = QueryAggregationQueryGroupByAggregation.from_dict(query_aggregation_query_group_by_aggregation_model_json).__dict__
+ query_aggregation_query_group_by_aggregation_model2 = QueryAggregationQueryGroupByAggregation(**query_aggregation_query_group_by_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_group_by_aggregation_model == query_aggregation_query_group_by_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_group_by_aggregation_model_json2 = query_aggregation_query_group_by_aggregation_model.to_dict()
+ assert query_aggregation_query_group_by_aggregation_model_json2 == query_aggregation_query_group_by_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryHistogramAggregation:
+ """
+ Test Class for QueryAggregationQueryHistogramAggregation
+ """
+
+ def test_query_aggregation_query_histogram_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryHistogramAggregation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_histogram_aggregation_result_model = {} # QueryHistogramAggregationResult
+ query_histogram_aggregation_result_model['key'] = 26
+ query_histogram_aggregation_result_model['matching_results'] = 38
+ query_histogram_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a json representation of a QueryAggregationQueryHistogramAggregation model
+ query_aggregation_query_histogram_aggregation_model_json = {}
+ query_aggregation_query_histogram_aggregation_model_json['type'] = 'histogram'
+ query_aggregation_query_histogram_aggregation_model_json['field'] = 'testString'
+ query_aggregation_query_histogram_aggregation_model_json['interval'] = 38
+ query_aggregation_query_histogram_aggregation_model_json['name'] = 'testString'
+ query_aggregation_query_histogram_aggregation_model_json['results'] = [query_histogram_aggregation_result_model]
+
+ # Construct a model instance of QueryAggregationQueryHistogramAggregation by calling from_dict on the json representation
+ query_aggregation_query_histogram_aggregation_model = QueryAggregationQueryHistogramAggregation.from_dict(query_aggregation_query_histogram_aggregation_model_json)
+ assert query_aggregation_query_histogram_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryHistogramAggregation by calling from_dict on the json representation
+ query_aggregation_query_histogram_aggregation_model_dict = QueryAggregationQueryHistogramAggregation.from_dict(query_aggregation_query_histogram_aggregation_model_json).__dict__
+ query_aggregation_query_histogram_aggregation_model2 = QueryAggregationQueryHistogramAggregation(**query_aggregation_query_histogram_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_histogram_aggregation_model == query_aggregation_query_histogram_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_histogram_aggregation_model_json2 = query_aggregation_query_histogram_aggregation_model.to_dict()
+ assert query_aggregation_query_histogram_aggregation_model_json2 == query_aggregation_query_histogram_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryNestedAggregation:
+ """
+ Test Class for QueryAggregationQueryNestedAggregation
+ """
+
+ def test_query_aggregation_query_nested_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryNestedAggregation
+ """
+
+ # Construct a json representation of a QueryAggregationQueryNestedAggregation model
+ query_aggregation_query_nested_aggregation_model_json = {}
+ query_aggregation_query_nested_aggregation_model_json['type'] = 'nested'
+ query_aggregation_query_nested_aggregation_model_json['path'] = 'testString'
+ query_aggregation_query_nested_aggregation_model_json['matching_results'] = 26
+ query_aggregation_query_nested_aggregation_model_json['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a model instance of QueryAggregationQueryNestedAggregation by calling from_dict on the json representation
+ query_aggregation_query_nested_aggregation_model = QueryAggregationQueryNestedAggregation.from_dict(query_aggregation_query_nested_aggregation_model_json)
+ assert query_aggregation_query_nested_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryNestedAggregation by calling from_dict on the json representation
+ query_aggregation_query_nested_aggregation_model_dict = QueryAggregationQueryNestedAggregation.from_dict(query_aggregation_query_nested_aggregation_model_json).__dict__
+ query_aggregation_query_nested_aggregation_model2 = QueryAggregationQueryNestedAggregation(**query_aggregation_query_nested_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_nested_aggregation_model == query_aggregation_query_nested_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_nested_aggregation_model_json2 = query_aggregation_query_nested_aggregation_model.to_dict()
+ assert query_aggregation_query_nested_aggregation_model_json2 == query_aggregation_query_nested_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryPairAggregation:
+ """
+ Test Class for QueryAggregationQueryPairAggregation
+ """
+
+ def test_query_aggregation_query_pair_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryPairAggregation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_pair_aggregation_result_model = {} # QueryPairAggregationResult
+ query_pair_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a json representation of a QueryAggregationQueryPairAggregation model
+ query_aggregation_query_pair_aggregation_model_json = {}
+ query_aggregation_query_pair_aggregation_model_json['type'] = 'pair'
+ query_aggregation_query_pair_aggregation_model_json['first'] = 'testString'
+ query_aggregation_query_pair_aggregation_model_json['second'] = 'testString'
+ query_aggregation_query_pair_aggregation_model_json['show_estimated_matching_results'] = False
+ query_aggregation_query_pair_aggregation_model_json['show_total_matching_documents'] = False
+ query_aggregation_query_pair_aggregation_model_json['results'] = [query_pair_aggregation_result_model]
+
+ # Construct a model instance of QueryAggregationQueryPairAggregation by calling from_dict on the json representation
+ query_aggregation_query_pair_aggregation_model = QueryAggregationQueryPairAggregation.from_dict(query_aggregation_query_pair_aggregation_model_json)
+ assert query_aggregation_query_pair_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryPairAggregation by calling from_dict on the json representation
+ query_aggregation_query_pair_aggregation_model_dict = QueryAggregationQueryPairAggregation.from_dict(query_aggregation_query_pair_aggregation_model_json).__dict__
+ query_aggregation_query_pair_aggregation_model2 = QueryAggregationQueryPairAggregation(**query_aggregation_query_pair_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_pair_aggregation_model == query_aggregation_query_pair_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_pair_aggregation_model_json2 = query_aggregation_query_pair_aggregation_model.to_dict()
+ assert query_aggregation_query_pair_aggregation_model_json2 == query_aggregation_query_pair_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryTermAggregation:
+ """
+ Test Class for QueryAggregationQueryTermAggregation
+ """
+
+ def test_query_aggregation_query_term_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryTermAggregation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_term_aggregation_result_model = {} # QueryTermAggregationResult
+ query_term_aggregation_result_model['key'] = 'testString'
+ query_term_aggregation_result_model['matching_results'] = 38
+ query_term_aggregation_result_model['relevancy'] = 72.5
+ query_term_aggregation_result_model['total_matching_documents'] = 38
+ query_term_aggregation_result_model['estimated_matching_results'] = 72.5
+ query_term_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a json representation of a QueryAggregationQueryTermAggregation model
+ query_aggregation_query_term_aggregation_model_json = {}
+ query_aggregation_query_term_aggregation_model_json['type'] = 'term'
+ query_aggregation_query_term_aggregation_model_json['field'] = 'testString'
+ query_aggregation_query_term_aggregation_model_json['count'] = 38
+ query_aggregation_query_term_aggregation_model_json['name'] = 'testString'
+ query_aggregation_query_term_aggregation_model_json['results'] = [query_term_aggregation_result_model]
+
+ # Construct a model instance of QueryAggregationQueryTermAggregation by calling from_dict on the json representation
+ query_aggregation_query_term_aggregation_model = QueryAggregationQueryTermAggregation.from_dict(query_aggregation_query_term_aggregation_model_json)
+ assert query_aggregation_query_term_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryTermAggregation by calling from_dict on the json representation
+ query_aggregation_query_term_aggregation_model_dict = QueryAggregationQueryTermAggregation.from_dict(query_aggregation_query_term_aggregation_model_json).__dict__
+ query_aggregation_query_term_aggregation_model2 = QueryAggregationQueryTermAggregation(**query_aggregation_query_term_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_term_aggregation_model == query_aggregation_query_term_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_term_aggregation_model_json2 = query_aggregation_query_term_aggregation_model.to_dict()
+ assert query_aggregation_query_term_aggregation_model_json2 == query_aggregation_query_term_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryTimesliceAggregation:
+ """
+ Test Class for QueryAggregationQueryTimesliceAggregation
+ """
+
+ def test_query_aggregation_query_timeslice_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryTimesliceAggregation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_timeslice_aggregation_result_model = {} # QueryTimesliceAggregationResult
+ query_timeslice_aggregation_result_model['key_as_string'] = 'testString'
+ query_timeslice_aggregation_result_model['key'] = 26
+ query_timeslice_aggregation_result_model['matching_results'] = 26
+ query_timeslice_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a json representation of a QueryAggregationQueryTimesliceAggregation model
+ query_aggregation_query_timeslice_aggregation_model_json = {}
+ query_aggregation_query_timeslice_aggregation_model_json['type'] = 'timeslice'
+ query_aggregation_query_timeslice_aggregation_model_json['field'] = 'testString'
+ query_aggregation_query_timeslice_aggregation_model_json['interval'] = 'testString'
+ query_aggregation_query_timeslice_aggregation_model_json['name'] = 'testString'
+ query_aggregation_query_timeslice_aggregation_model_json['results'] = [query_timeslice_aggregation_result_model]
+
+ # Construct a model instance of QueryAggregationQueryTimesliceAggregation by calling from_dict on the json representation
+ query_aggregation_query_timeslice_aggregation_model = QueryAggregationQueryTimesliceAggregation.from_dict(query_aggregation_query_timeslice_aggregation_model_json)
+ assert query_aggregation_query_timeslice_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryTimesliceAggregation by calling from_dict on the json representation
+ query_aggregation_query_timeslice_aggregation_model_dict = QueryAggregationQueryTimesliceAggregation.from_dict(query_aggregation_query_timeslice_aggregation_model_json).__dict__
+ query_aggregation_query_timeslice_aggregation_model2 = QueryAggregationQueryTimesliceAggregation(**query_aggregation_query_timeslice_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_timeslice_aggregation_model == query_aggregation_query_timeslice_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_timeslice_aggregation_model_json2 = query_aggregation_query_timeslice_aggregation_model.to_dict()
+ assert query_aggregation_query_timeslice_aggregation_model_json2 == query_aggregation_query_timeslice_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryTopHitsAggregation:
+ """
+ Test Class for QueryAggregationQueryTopHitsAggregation
+ """
+
+ def test_query_aggregation_query_top_hits_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryTopHitsAggregation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_top_hits_aggregation_result_model = {} # QueryTopHitsAggregationResult
+ query_top_hits_aggregation_result_model['matching_results'] = 38
+ query_top_hits_aggregation_result_model['hits'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a json representation of a QueryAggregationQueryTopHitsAggregation model
+ query_aggregation_query_top_hits_aggregation_model_json = {}
+ query_aggregation_query_top_hits_aggregation_model_json['type'] = 'top_hits'
+ query_aggregation_query_top_hits_aggregation_model_json['size'] = 38
+ query_aggregation_query_top_hits_aggregation_model_json['name'] = 'testString'
+ query_aggregation_query_top_hits_aggregation_model_json['hits'] = query_top_hits_aggregation_result_model
+
+ # Construct a model instance of QueryAggregationQueryTopHitsAggregation by calling from_dict on the json representation
+ query_aggregation_query_top_hits_aggregation_model = QueryAggregationQueryTopHitsAggregation.from_dict(query_aggregation_query_top_hits_aggregation_model_json)
+ assert query_aggregation_query_top_hits_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryTopHitsAggregation by calling from_dict on the json representation
+ query_aggregation_query_top_hits_aggregation_model_dict = QueryAggregationQueryTopHitsAggregation.from_dict(query_aggregation_query_top_hits_aggregation_model_json).__dict__
+ query_aggregation_query_top_hits_aggregation_model2 = QueryAggregationQueryTopHitsAggregation(**query_aggregation_query_top_hits_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_top_hits_aggregation_model == query_aggregation_query_top_hits_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_top_hits_aggregation_model_json2 = query_aggregation_query_top_hits_aggregation_model.to_dict()
+ assert query_aggregation_query_top_hits_aggregation_model_json2 == query_aggregation_query_top_hits_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryTopicAggregation:
+ """
+ Test Class for QueryAggregationQueryTopicAggregation
+ """
+
+ def test_query_aggregation_query_topic_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryTopicAggregation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_topic_aggregation_result_model = {} # QueryTopicAggregationResult
+ query_topic_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a json representation of a QueryAggregationQueryTopicAggregation model
+ query_aggregation_query_topic_aggregation_model_json = {}
+ query_aggregation_query_topic_aggregation_model_json['type'] = 'topic'
+ query_aggregation_query_topic_aggregation_model_json['facet'] = 'testString'
+ query_aggregation_query_topic_aggregation_model_json['time_segments'] = 'testString'
+ query_aggregation_query_topic_aggregation_model_json['show_estimated_matching_results'] = False
+ query_aggregation_query_topic_aggregation_model_json['show_total_matching_documents'] = False
+ query_aggregation_query_topic_aggregation_model_json['results'] = [query_topic_aggregation_result_model]
+
+ # Construct a model instance of QueryAggregationQueryTopicAggregation by calling from_dict on the json representation
+ query_aggregation_query_topic_aggregation_model = QueryAggregationQueryTopicAggregation.from_dict(query_aggregation_query_topic_aggregation_model_json)
+ assert query_aggregation_query_topic_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryTopicAggregation by calling from_dict on the json representation
+ query_aggregation_query_topic_aggregation_model_dict = QueryAggregationQueryTopicAggregation.from_dict(query_aggregation_query_topic_aggregation_model_json).__dict__
+ query_aggregation_query_topic_aggregation_model2 = QueryAggregationQueryTopicAggregation(**query_aggregation_query_topic_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_topic_aggregation_model == query_aggregation_query_topic_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_topic_aggregation_model_json2 = query_aggregation_query_topic_aggregation_model.to_dict()
+ assert query_aggregation_query_topic_aggregation_model_json2 == query_aggregation_query_topic_aggregation_model_json
+
+
+class TestModel_QueryAggregationQueryTrendAggregation:
+ """
+ Test Class for QueryAggregationQueryTrendAggregation
+ """
+
+ def test_query_aggregation_query_trend_aggregation_serialization(self):
+ """
+ Test serialization/deserialization for QueryAggregationQueryTrendAggregation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ query_trend_aggregation_result_model = {} # QueryTrendAggregationResult
+ query_trend_aggregation_result_model['aggregations'] = [{'anyKey': 'anyValue'}]
+
+ # Construct a json representation of a QueryAggregationQueryTrendAggregation model
+ query_aggregation_query_trend_aggregation_model_json = {}
+ query_aggregation_query_trend_aggregation_model_json['type'] = 'trend'
+ query_aggregation_query_trend_aggregation_model_json['facet'] = 'testString'
+ query_aggregation_query_trend_aggregation_model_json['time_segments'] = 'testString'
+ query_aggregation_query_trend_aggregation_model_json['show_estimated_matching_results'] = False
+ query_aggregation_query_trend_aggregation_model_json['show_total_matching_documents'] = False
+ query_aggregation_query_trend_aggregation_model_json['results'] = [query_trend_aggregation_result_model]
+
+ # Construct a model instance of QueryAggregationQueryTrendAggregation by calling from_dict on the json representation
+ query_aggregation_query_trend_aggregation_model = QueryAggregationQueryTrendAggregation.from_dict(query_aggregation_query_trend_aggregation_model_json)
+ assert query_aggregation_query_trend_aggregation_model != False
+
+ # Construct a model instance of QueryAggregationQueryTrendAggregation by calling from_dict on the json representation
+ query_aggregation_query_trend_aggregation_model_dict = QueryAggregationQueryTrendAggregation.from_dict(query_aggregation_query_trend_aggregation_model_json).__dict__
+ query_aggregation_query_trend_aggregation_model2 = QueryAggregationQueryTrendAggregation(**query_aggregation_query_trend_aggregation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert query_aggregation_query_trend_aggregation_model == query_aggregation_query_trend_aggregation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ query_aggregation_query_trend_aggregation_model_json2 = query_aggregation_query_trend_aggregation_model.to_dict()
+ assert query_aggregation_query_trend_aggregation_model_json2 == query_aggregation_query_trend_aggregation_model_json
+
+
+# endregion
+##############################################################################
+# End of Model Tests
+##############################################################################
diff --git a/test/unit/test_language_translator_v3.py b/test/unit/test_language_translator_v3.py
deleted file mode 100644
index 4244d950e..000000000
--- a/test/unit/test_language_translator_v3.py
+++ /dev/null
@@ -1,306 +0,0 @@
-# coding=utf-8
-
-import json
-import os
-import responses
-import ibm_watson
-from ibm_watson.language_translator_v3 import TranslationResult, TranslationModels, TranslationModel, IdentifiedLanguages, IdentifiableLanguages, DeleteModelResult
-
-platform_url = 'https://gateway.watsonplatform.net'
-service_path = '/language-translator/api'
-base_url = '{0}{1}'.format(platform_url, service_path)
-
-iam_url = "https://iam.cloud.ibm.com/identity/token"
-iam_token_response = """{
- "access_token": "oAeisG8yqPY7sFR_x66Z15",
- "token_type": "Bearer",
- "expires_in": 3600,
- "expiration": 1524167011,
- "refresh_token": "jy4gl91BQ"
-}"""
-
-#########################
-# counterexamples
-#########################
-
-@responses.activate
-def test_translate_source_target():
- service = ibm_watson.LanguageTranslatorV3(
- version='2018-05-01',
- iam_apikey='iam_apikey')
- endpoint = '/v3/translate'
- url = '{0}{1}'.format(base_url, endpoint)
- expected = {
- "character_count": 19,
- "translations": [{"translation": u"Hello, how are you ? \u20ac"}],
- "word_count": 4
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(expected),
- status=200,
- content_type='application/json')
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
-
- response = service.translate('Hola, cómo estás? €', source='es', target='en').get_result()
- assert len(responses.calls) == 2
- assert responses.calls[1].request.url.startswith(url)
- assert response == expected
- TranslationResult._from_dict(response)
-
-@responses.activate
-def test_translate_model_id():
- service = ibm_watson.LanguageTranslatorV3(
- version='2018-05-01',
- iam_apikey='iam_apikey')
- endpoint = '/v3/translate'
- url = '{0}{1}'.format(base_url, endpoint)
- expected = {
- "character_count": 22,
- "translations": [
- {
- "translation": "Messi es el mejor"
- }
- ],
- "word_count": 5
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(expected),
- status=200,
- content_type='application/json')
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
- response = service.translate('Messi is the best ever',
- model_id='en-es-conversational').get_result()
-
- assert len(responses.calls) == 2
- assert responses.calls[1].request.url.startswith(url)
- assert response == expected
- TranslationResult._from_dict(response)
-
-@responses.activate
-def test_identify():
- service = ibm_watson.LanguageTranslatorV3(
- version='2018-05-01',
- iam_apikey='iam_apikey')
- endpoint = '/v3/identify'
- url = '{0}{1}'.format(base_url, endpoint)
- expected = {
- "languages": [
- {
- "confidence": 0.477673,
- "language": "zh"
- },
- {
- "confidence": 0.262053,
- "language": "zh-TW"
- },
- {
- "confidence": 0.00958378,
- "language": "en"
- }
- ]
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(expected),
- status=200,
- content_type='application/json')
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
- response = service.identify('祝你有美好的一天').get_result()
- assert len(responses.calls) == 2
- assert responses.calls[1].request.url.startswith(url)
- assert response == expected
- IdentifiedLanguages._from_dict(response)
-
-@responses.activate
-def test_list_identifiable_languages():
- service = ibm_watson.LanguageTranslatorV3(
- version='2018-05-01',
- iam_apikey='iam_apikey')
- endpoint = '/v3/identifiable_languages'
- url = '{0}{1}'.format(base_url, endpoint)
- expected = {
- "languages": [
- {
- "name": "German",
- "language": "de"
- },
- {
- "name": "Greek",
- "language": "el"
- },
- {
- "name": "English",
- "language": "en"
- },
- {
- "name": "Esperanto",
- "language": "eo"
- },
- {
- "name": "Spanish",
- "language": "es"
- },
- {
- "name": "Chinese",
- "language": "zh"
- }
- ]
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(expected),
- status=200,
- content_type='application/json')
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
- response = service.list_identifiable_languages().get_result()
- assert len(responses.calls) == 2
- assert responses.calls[1].request.url.startswith(url)
- assert response == expected
- IdentifiableLanguages._from_dict(response)
-
-@responses.activate
-def test_create_model():
- service = ibm_watson.LanguageTranslatorV3(
- version='2018-05-01',
- username='xxx',
- password='yyy'
- )
- endpoint = '/v3/models'
- url = '{0}{1}'.format(base_url, endpoint)
- expected = {
- "status": "available",
- "model_id": "en-es-conversational",
- "domain": "conversational",
- "target": "es",
- "customizable": False,
- "source": "en",
- "base_model_id": "en-es-conversational",
- "owner": "",
- "default_model": False,
- "name": "test_glossary"
- }
- responses.add(
- responses.POST,
- url,
- body=json.dumps(expected),
- status=200,
- content_type='application/json')
- with open(os.path.join(os.path.dirname(__file__), '../../resources/language_translator_model.tmx'), 'rb') as custom_model:
- response = service.create_model('en-fr',
- name='test_glossary',
- forced_glossary=custom_model).get_result()
- assert len(responses.calls) == 1
- assert responses.calls[0].request.url.startswith(url)
- assert response == expected
- TranslationModel._from_dict(response)
-
-@responses.activate
-def test_delete_model():
- service = ibm_watson.LanguageTranslatorV3(
- version='2018-05-01',
- iam_apikey='iam_apikey')
- model_id = 'en-es-conversational'
- endpoint = '/v3/models/' + model_id
- url = '{0}{1}'.format(base_url, endpoint)
- expected = {
- "status": "OK",
- }
- responses.add(
- responses.DELETE,
- url,
- body=json.dumps(expected),
- status=200,
- content_type='application/json')
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
- response = service.delete_model(model_id).get_result()
- assert len(responses.calls) == 2
- assert responses.calls[1].request.url.startswith(url)
- assert response == expected
- DeleteModelResult._from_dict(response)
-
-@responses.activate
-def test_get_model():
- service = ibm_watson.LanguageTranslatorV3(
- version='2018-05-01',
- iam_apikey='iam_apikey')
- model_id = 'en-es-conversational'
- endpoint = '/v3/models/' + model_id
- url = '{0}{1}'.format(base_url, endpoint)
- expected = {
- "status": "available",
- "model_id": "en-es-conversational",
- "domain": "conversational",
- "target": "es",
- "customizable": False,
- "source": "en",
- "base_model_id": "",
- "owner": "",
- "default_model": False,
- "name": "en-es-conversational"
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(expected),
- status=200,
- content_type='application/json')
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
- response = service.get_model(model_id).get_result()
- assert len(responses.calls) == 2
- assert responses.calls[1].request.url.startswith(url)
- assert response == expected
- TranslationModel._from_dict(response)
-
-@responses.activate
-def test_list_models():
- service = ibm_watson.LanguageTranslatorV3(
- version='2018-05-01',
- iam_apikey='iam_apikey')
- endpoint = '/v3/models'
- url = '{0}{1}'.format(base_url, endpoint)
- expected = {
- "models": [
- {
- "status": "available",
- "model_id": "en-es-conversational",
- "domain": "conversational",
- "target": "es",
- "customizable": False,
- "source": "en",
- "base_model_id": "",
- "owner": "",
- "default_model": False,
- "name": "en-es-conversational"
- },
- {
- "status": "available",
- "model_id": "es-en",
- "domain": "news",
- "target": "en",
- "customizable": True,
- "source": "es",
- "base_model_id": "",
- "owner": "",
- "default_model": True,
- "name": "es-en"
- }
- ]
- }
- responses.add(
- responses.GET,
- url,
- body=json.dumps(expected),
- status=200,
- content_type='application/json')
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
- response = service.list_models().get_result()
- assert len(responses.calls) == 2
- assert responses.calls[1].request.url.startswith(url)
- assert response == expected
- TranslationModels._from_dict(response)
diff --git a/test/unit/test_natural_language_classifier_v1.py b/test/unit/test_natural_language_classifier_v1.py
deleted file mode 100644
index 8146d9c4d..000000000
--- a/test/unit/test_natural_language_classifier_v1.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# coding: utf-8
-import os
-import responses
-import ibm_watson
-
-
-@responses.activate
-def test_success():
- natural_language_classifier = ibm_watson.NaturalLanguageClassifierV1(username="username",
- password="password")
-
- list_url = 'https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers'
- list_response = '{"classifiers": [{"url": "https://gateway.watsonplatform.net/natural-language-classifier-' \
- 'experimental/api/v1/classifiers/497EF2-nlc-00", "classifier_id": "497EF2-nlc-00"}]}'
- responses.add(responses.GET, list_url,
- body=list_response, status=200,
- content_type='application/json')
-
- natural_language_classifier.list_classifiers()
-
- assert responses.calls[0].request.url == list_url
- assert responses.calls[0].response.text == list_response
-
- status_url = ('https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers/'
- '497EF2-nlc-00')
- status_response = '{"url": "https://gateway.watsonplatform.net/natural-language-classifier/api/v1/' \
- 'classifiers/497EF2-nlc-00", "status": "Available", "status_description": "The classifier ' \
- 'instance is now available and is ready to take classifier requests.", "classifier_id": ' \
- '"497EF2-nlc-00"}'
-
- responses.add(responses.GET, status_url,
- body=status_response, status=200,
- content_type='application/json')
-
- natural_language_classifier.get_classifier('497EF2-nlc-00')
-
- assert responses.calls[1].request.url == status_url
- assert responses.calls[1].response.text == status_response
-
- classify_url = 'https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers/' \
- '497EF2-nlc-00/classify'
- classify_response = '{"url": "https://gateway.watsonplatform.net/natural-language-classifier/api/' \
- 'v1", "text": "test", "classes": [{"class_name": "conditions", "confidence": ' \
- '0.6575315710901418}, {"class_name": "temperature", "confidence": 0.3424684289098582}], ' \
- '"classifier_id": "497EF2-nlc-00", "top_class": "conditions"}'
-
- responses.add(responses.POST, classify_url,
- body=classify_response, status=200,
- content_type='application/json')
-
- natural_language_classifier.classify('497EF2-nlc-00', 'test')
-
- assert responses.calls[2].request.url == classify_url
- assert responses.calls[2].response.text == classify_response
-
- create_url = 'https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers'
- create_response = '{"url": "https://gateway.watsonplatform.net/natural-language-classifier/api/v1/' \
- 'classifiers/497EF2-nlc-00", "status": "Available", "status_description": "The classifier ' \
- 'instance is now available and is ready to take classifier requests.", "classifier_id": ' \
- '"497EF2-nlc-00"}'
-
- responses.add(responses.POST, create_url,
- body=create_response, status=200,
- content_type='application/json')
- with open(os.path.join(os.path.dirname(__file__), '../../resources/weather_data_train.csv'), 'rb') as training_data:
- natural_language_classifier.create_classifier(
- training_data=training_data, metadata='{"language": "en"}')
-
- assert responses.calls[3].request.url == create_url
- assert responses.calls[3].response.text == create_response
-
- remove_url = status_url
- remove_response = '{}'
-
- responses.add(responses.DELETE, remove_url,
- body=remove_response, status=200,
- content_type='application/json')
-
- natural_language_classifier.delete_classifier('497EF2-nlc-00')
-
- assert responses.calls[4].request.url == remove_url
- assert responses.calls[4].response.text == remove_response
-
- assert len(responses.calls) == 5
-
-@responses.activate
-def test_classify_collection():
- natural_language_classifier = ibm_watson.NaturalLanguageClassifierV1(username="username",
- password="password")
- classify_collection_url = 'https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers/497EF2-nlc-00/classify_collection'
- classify_collection_response = '{ \
- "classifier_id": "497EF2-nlc-00", \
- "url": "https://gateway.watsonplatform.net/natural-language-classifier/api/v1/classifiers/10D41B-nlc-1", \
- "collection": [ \
- { \
- "text": "How hot will it be today?", \
- "top_class": "temperature", \
- "classes": [ \
- { \
- "class_name": "temperature", \
- "confidence": 0.9930558798985937 \
- }, \
- { \
- "class_name": "conditions", \
- "confidence": 0.006944120101406304 \
- } \
- ] \
- }, \
- { \
- "text": "Is it hot outside?", \
- "top_class": "temperature", \
- "classes": [ \
- { \
- "class_name": "temperature", \
- "confidence": 1 \
- }, \
- { \
- "class_name": "conditions", \
- "confidence": 0 \
- } \
- ] \
- } \
- ] \
- }'
- responses.add(responses.POST, classify_collection_url,
- body=classify_collection_response, status=200,
- content_type='application/json')
-
- classifier_id = '497EF2-nlc-00'
- collection = ['{"text":"How hot will it be today?"}', '{"text":"Is it hot outside?"}']
- natural_language_classifier.classify_collection(classifier_id, collection)
-
- assert responses.calls[0].request.url == classify_collection_url
- assert responses.calls[0].response.text == classify_collection_response
diff --git a/test/unit/test_natural_language_understanding.py b/test/unit/test_natural_language_understanding.py
deleted file mode 100644
index 4a18ad0a4..000000000
--- a/test/unit/test_natural_language_understanding.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# coding: utf-8
-from unittest import TestCase
-from ibm_watson import NaturalLanguageUnderstandingV1
-from ibm_watson.natural_language_understanding_v1 import \
- Features, ConceptsOptions, EntitiesOptions, KeywordsOptions, CategoriesOptions, \
- EmotionOptions, MetadataOptions, SemanticRolesOptions, RelationsOptions, \
- SentimentOptions
-
-import os
-import pytest
-import responses
-
-
-base_url = 'https://gateway.watsonplatform.net'
-default_url = '{0}/natural-language-understanding/api'.format(base_url)
-
-
-class TestFeatures(TestCase):
- def test_concepts(self):
- c = Features(concepts=ConceptsOptions())
- assert c._to_dict() == {'concepts': {}}
- c = Features(concepts=ConceptsOptions(limit=10))
- assert c._to_dict() == {'concepts': {'limit': 10}}
-
- def test_entities(self):
- e = Features(entities=EntitiesOptions())
- assert e._to_dict() == {'entities': {}}
-
- def test_keywords(self):
- k = Features(keywords=KeywordsOptions())
- assert k._to_dict() == {'keywords': {}}
-
- def test_categories(self):
- c = Features(categories=CategoriesOptions())
- assert c._to_dict() == {'categories': {}}
-
- def test_emotion(self):
- e = Features(emotion=EmotionOptions())
- assert e._to_dict() == {'emotion': {}}
-
- def test_metadata(self):
- m = Features(metadata=MetadataOptions())
- assert m._to_dict() == {'metadata': {}}
-
- def test_semantic_roles(self):
- s = Features(semantic_roles=SemanticRolesOptions())
- assert s._to_dict() == {'semantic_roles': {}}
-
- def test_relations(self):
- r = Features(relations=RelationsOptions())
- assert r._to_dict() == {'relations': {}}
-
- def test_sentiment(self):
- s = Features(sentiment=SentimentOptions())
- assert s._to_dict() == {'sentiment': {}}
-
-
-class TestNaturalLanguageUnderstanding(TestCase):
- def test_version_date(self):
- with pytest.raises(TypeError):
- NaturalLanguageUnderstandingV1() # pylint: disable=E1120
- nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
- url='http://bogus.com',
- username='username',
- password='password')
- assert nlu
-
- @pytest.mark.skipif(os.getenv('VCAP_SERVICES') is not None,
- reason='credentials may come from VCAP_SERVICES')
- def test_missing_credentials(self):
- with pytest.raises(ValueError):
- NaturalLanguageUnderstandingV1(version='2016-01-23')
- with pytest.raises(ValueError):
- NaturalLanguageUnderstandingV1(version='2016-01-23',
- url='https://bogus.com')
-
- def test_analyze_throws(self):
- nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
- url='http://bogus.com',
- username='username',
- password='password')
- with pytest.raises(ValueError):
- nlu.analyze(None, text="this will not work")
-
- @responses.activate
- def test_text_analyze(self):
- nlu_url = "http://bogus.com/v1/analyze"
- responses.add(responses.POST, nlu_url,
- body="{\"resulting_key\": true}", status=200,
- content_type='application/json')
- nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
- url='http://bogus.com',
- username='username',
- password='password')
- nlu.analyze(Features(sentiment=SentimentOptions()), text="hello this is a test")
- assert len(responses.calls) == 1
-
- @responses.activate
- def test_html_analyze(self):
- nlu_url = "http://bogus.com/v1/analyze"
- responses.add(responses.POST, nlu_url,
- body="{\"resulting_key\": true}", status=200,
- content_type='application/json')
- nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
- url='http://bogus.com',
- username='username',
- password='password')
- nlu.analyze(Features(sentiment=SentimentOptions(),
- emotion=EmotionOptions(document=False)),
- html="hello this is a test")
- assert len(responses.calls) == 1
-
- @responses.activate
- def test_url_analyze(self):
- nlu_url = "http://bogus.com/v1/analyze"
- responses.add(responses.POST, nlu_url,
- body="{\"resulting_key\": true}", status=200,
- content_type='application/json')
- nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
- url='http://bogus.com',
- username='username',
- password='password')
- nlu.analyze(Features(sentiment=SentimentOptions(),
- emotion=EmotionOptions(document=False)),
- url="http://cnn.com",
- xpath="/bogus/xpath", language="en")
- assert len(responses.calls) == 1
-
- @responses.activate
- def test_list_models(self):
- nlu_url = "http://bogus.com/v1/models"
- responses.add(responses.GET, nlu_url, status=200,
- body="{\"resulting_key\": true}",
- content_type='application/json')
- nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
- url='http://bogus.com',
- username='username',
- password='password')
- nlu.list_models()
- assert len(responses.calls) == 1
-
- @responses.activate
- def test_delete_model(self):
- model_id = "invalid_model_id"
- nlu_url = "http://bogus.com/v1/models/" + model_id
- responses.add(responses.DELETE, nlu_url, status=200,
- body="{}", content_type='application/json')
- nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
- url='http://bogus.com',
- username='username',
- password='password')
- nlu.delete_model(model_id)
- assert len(responses.calls) == 1
diff --git a/test/unit/test_natural_language_understanding_v1.py b/test/unit/test_natural_language_understanding_v1.py
new file mode 100644
index 000000000..40ca030a6
--- /dev/null
+++ b/test/unit/test_natural_language_understanding_v1.py
@@ -0,0 +1,3839 @@
+# -*- coding: utf-8 -*-
+# (C) Copyright IBM Corp. 2019, 2024.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit Tests for NaturalLanguageUnderstandingV1
+"""
+
+from datetime import datetime, timezone
+from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
+from ibm_cloud_sdk_core.utils import datetime_to_string, string_to_datetime
+import inspect
+import io
+import json
+import pytest
+import re
+import requests
+import responses
+import tempfile
+import urllib
+from ibm_watson.natural_language_understanding_v1 import *
+
+version = 'testString'
+
+_service = NaturalLanguageUnderstandingV1(
+ authenticator=NoAuthAuthenticator(),
+ version=version,
+)
+
+_base_url = 'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com'
+_service.set_service_url(_base_url)
+
+
+def preprocess_url(operation_path: str):
+ """
+ Returns the request url associated with the specified operation path.
+ This will be base_url concatenated with a quoted version of operation_path.
+ The returned request URL is used to register the mock response so it needs
+ to match the request URL that is formed by the requests library.
+ """
+
+ # Form the request URL from the base URL and operation path.
+ request_url = _base_url + operation_path
+
+ # If the request url does NOT end with a /, then just return it as-is.
+ # Otherwise, return a regular expression that matches one or more trailing /.
+ if not request_url.endswith('/'):
+ return request_url
+ return re.compile(request_url.rstrip('/') + '/+')
+
+
+##############################################################################
+# Start of Service: Analyze
+##############################################################################
+# region
+
+
+class TestAnalyze:
+ """
+ Test Class for analyze
+ """
+
+ @responses.activate
+ def test_analyze_all_params(self):
+ """
+ analyze()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/analyze')
+ mock_response = '{"language": "language", "analyzed_text": "analyzed_text", "retrieved_url": "retrieved_url", "usage": {"features": 8, "text_characters": 15, "text_units": 10}, "concepts": [{"text": "text", "relevance": 9, "dbpedia_resource": "dbpedia_resource"}], "entities": [{"type": "type", "text": "text", "relevance": 9, "confidence": 10, "mentions": [{"text": "text", "location": [8], "confidence": 10}], "count": 5, "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}, "sentiment": {"score": 5}, "disambiguation": {"name": "name", "dbpedia_resource": "dbpedia_resource", "subtype": ["subtype"]}}], "keywords": [{"count": 5, "relevance": 9, "text": "text", "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}, "sentiment": {"score": 5}}], "categories": [{"label": "label", "score": 5, "explanation": {"relevant_text": [{"text": "text"}]}}], "classifications": [{"class_name": "class_name", "confidence": 10}], "emotion": {"document": {"emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}}, "targets": [{"text": "text", "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}}]}, "metadata": {"authors": [{"name": "name"}], "publication_date": "publication_date", "title": "title", "image": "image", "feeds": [{"link": "link"}]}, "relations": [{"score": 5, "sentence": "sentence", "type": "type", "arguments": [{"entities": [{"text": "text", "type": "type"}], "location": [8], "text": "text"}]}], "semantic_roles": [{"sentence": "sentence", "subject": {"text": "text", "entities": [{"type": "type", "text": "text"}], "keywords": [{"text": "text"}]}, "action": {"text": "text", "normalized": "normalized", "verb": {"text": "text", "tense": "tense"}}, "object": {"text": "text", "keywords": [{"text": "text"}]}}], "sentiment": {"document": {"label": "label", "score": 5}, "targets": [{"text": "text", "score": 5}]}, "syntax": {"tokens": [{"text": "text", "part_of_speech": "ADJ", "location": [8], "lemma": "lemma"}], "sentences": [{"text": "text", "location": [8]}]}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a ClassificationsOptions model
+ classifications_options_model = {}
+ classifications_options_model['model'] = 'testString'
+
+ # Construct a dict representation of a ConceptsOptions model
+ concepts_options_model = {}
+ concepts_options_model['limit'] = 8
+
+ # Construct a dict representation of a EmotionOptions model
+ emotion_options_model = {}
+ emotion_options_model['document'] = True
+ emotion_options_model['targets'] = ['testString']
+
+ # Construct a dict representation of a EntitiesOptions model
+ entities_options_model = {}
+ entities_options_model['limit'] = 50
+ entities_options_model['mentions'] = False
+ entities_options_model['model'] = 'testString'
+ entities_options_model['sentiment'] = False
+ entities_options_model['emotion'] = False
+
+ # Construct a dict representation of a KeywordsOptions model
+ keywords_options_model = {}
+ keywords_options_model['limit'] = 50
+ keywords_options_model['sentiment'] = False
+ keywords_options_model['emotion'] = False
+
+ # Construct a dict representation of a RelationsOptions model
+ relations_options_model = {}
+ relations_options_model['model'] = 'testString'
+
+ # Construct a dict representation of a SemanticRolesOptions model
+ semantic_roles_options_model = {}
+ semantic_roles_options_model['limit'] = 50
+ semantic_roles_options_model['keywords'] = False
+ semantic_roles_options_model['entities'] = False
+
+ # Construct a dict representation of a SentimentOptions model
+ sentiment_options_model = {}
+ sentiment_options_model['document'] = True
+ sentiment_options_model['targets'] = ['testString']
+
+ # Construct a dict representation of a CategoriesOptions model
+ categories_options_model = {}
+ categories_options_model['explanation'] = False
+ categories_options_model['limit'] = 3
+ categories_options_model['model'] = 'testString'
+
+ # Construct a dict representation of a SyntaxOptionsTokens model
+ syntax_options_tokens_model = {}
+ syntax_options_tokens_model['lemma'] = True
+ syntax_options_tokens_model['part_of_speech'] = True
+
+ # Construct a dict representation of a SyntaxOptions model
+ syntax_options_model = {}
+ syntax_options_model['tokens'] = syntax_options_tokens_model
+ syntax_options_model['sentences'] = True
+
+ # Construct a dict representation of a Features model
+ features_model = {}
+ features_model['classifications'] = classifications_options_model
+ features_model['concepts'] = concepts_options_model
+ features_model['emotion'] = emotion_options_model
+ features_model['entities'] = entities_options_model
+ features_model['keywords'] = keywords_options_model
+ features_model['metadata'] = {'anyKey': 'anyValue'}
+ features_model['relations'] = relations_options_model
+ features_model['semantic_roles'] = semantic_roles_options_model
+ features_model['sentiment'] = sentiment_options_model
+ features_model['categories'] = categories_options_model
+ features_model['syntax'] = syntax_options_model
+
+ # Set up parameter values
+ features = features_model
+ text = 'testString'
+ html = 'testString'
+ url = 'testString'
+ clean = True
+ xpath = 'testString'
+ fallback_to_raw = True
+ return_analyzed_text = False
+ language = 'testString'
+ limit_text_characters = 38
+
+ # Invoke method
+ response = _service.analyze(
+ features,
+ text=text,
+ html=html,
+ url=url,
+ clean=clean,
+ xpath=xpath,
+ fallback_to_raw=fallback_to_raw,
+ return_analyzed_text=return_analyzed_text,
+ language=language,
+ limit_text_characters=limit_text_characters,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['features'] == features_model
+ assert req_body['text'] == 'testString'
+ assert req_body['html'] == 'testString'
+ assert req_body['url'] == 'testString'
+ assert req_body['clean'] == True
+ assert req_body['xpath'] == 'testString'
+ assert req_body['fallback_to_raw'] == True
+ assert req_body['return_analyzed_text'] == False
+ assert req_body['language'] == 'testString'
+ assert req_body['limit_text_characters'] == 38
+
+ def test_analyze_all_params_with_retries(self):
+ # Enable retries and run test_analyze_all_params.
+ _service.enable_retries()
+ self.test_analyze_all_params()
+
+ # Disable retries and run test_analyze_all_params.
+ _service.disable_retries()
+ self.test_analyze_all_params()
+
+ @responses.activate
+ def test_analyze_value_error(self):
+ """
+ test_analyze_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/analyze')
+ mock_response = '{"language": "language", "analyzed_text": "analyzed_text", "retrieved_url": "retrieved_url", "usage": {"features": 8, "text_characters": 15, "text_units": 10}, "concepts": [{"text": "text", "relevance": 9, "dbpedia_resource": "dbpedia_resource"}], "entities": [{"type": "type", "text": "text", "relevance": 9, "confidence": 10, "mentions": [{"text": "text", "location": [8], "confidence": 10}], "count": 5, "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}, "sentiment": {"score": 5}, "disambiguation": {"name": "name", "dbpedia_resource": "dbpedia_resource", "subtype": ["subtype"]}}], "keywords": [{"count": 5, "relevance": 9, "text": "text", "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}, "sentiment": {"score": 5}}], "categories": [{"label": "label", "score": 5, "explanation": {"relevant_text": [{"text": "text"}]}}], "classifications": [{"class_name": "class_name", "confidence": 10}], "emotion": {"document": {"emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}}, "targets": [{"text": "text", "emotion": {"anger": 5, "disgust": 7, "fear": 4, "joy": 3, "sadness": 7}}]}, "metadata": {"authors": [{"name": "name"}], "publication_date": "publication_date", "title": "title", "image": "image", "feeds": [{"link": "link"}]}, "relations": [{"score": 5, "sentence": "sentence", "type": "type", "arguments": [{"entities": [{"text": "text", "type": "type"}], "location": [8], "text": "text"}]}], "semantic_roles": [{"sentence": "sentence", "subject": {"text": "text", "entities": [{"type": "type", "text": "text"}], "keywords": [{"text": "text"}]}, "action": {"text": "text", "normalized": "normalized", "verb": {"text": "text", "tense": "tense"}}, "object": {"text": "text", "keywords": [{"text": "text"}]}}], "sentiment": {"document": {"label": "label", "score": 5}, "targets": [{"text": "text", "score": 5}]}, "syntax": {"tokens": [{"text": "text", "part_of_speech": "ADJ", "location": [8], "lemma": "lemma"}], "sentences": [{"text": "text", "location": [8]}]}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a ClassificationsOptions model
+ classifications_options_model = {}
+ classifications_options_model['model'] = 'testString'
+
+ # Construct a dict representation of a ConceptsOptions model
+ concepts_options_model = {}
+ concepts_options_model['limit'] = 8
+
+ # Construct a dict representation of a EmotionOptions model
+ emotion_options_model = {}
+ emotion_options_model['document'] = True
+ emotion_options_model['targets'] = ['testString']
+
+ # Construct a dict representation of a EntitiesOptions model
+ entities_options_model = {}
+ entities_options_model['limit'] = 50
+ entities_options_model['mentions'] = False
+ entities_options_model['model'] = 'testString'
+ entities_options_model['sentiment'] = False
+ entities_options_model['emotion'] = False
+
+ # Construct a dict representation of a KeywordsOptions model
+ keywords_options_model = {}
+ keywords_options_model['limit'] = 50
+ keywords_options_model['sentiment'] = False
+ keywords_options_model['emotion'] = False
+
+ # Construct a dict representation of a RelationsOptions model
+ relations_options_model = {}
+ relations_options_model['model'] = 'testString'
+
+ # Construct a dict representation of a SemanticRolesOptions model
+ semantic_roles_options_model = {}
+ semantic_roles_options_model['limit'] = 50
+ semantic_roles_options_model['keywords'] = False
+ semantic_roles_options_model['entities'] = False
+
+ # Construct a dict representation of a SentimentOptions model
+ sentiment_options_model = {}
+ sentiment_options_model['document'] = True
+ sentiment_options_model['targets'] = ['testString']
+
+ # Construct a dict representation of a CategoriesOptions model
+ categories_options_model = {}
+ categories_options_model['explanation'] = False
+ categories_options_model['limit'] = 3
+ categories_options_model['model'] = 'testString'
+
+ # Construct a dict representation of a SyntaxOptionsTokens model
+ syntax_options_tokens_model = {}
+ syntax_options_tokens_model['lemma'] = True
+ syntax_options_tokens_model['part_of_speech'] = True
+
+ # Construct a dict representation of a SyntaxOptions model
+ syntax_options_model = {}
+ syntax_options_model['tokens'] = syntax_options_tokens_model
+ syntax_options_model['sentences'] = True
+
+ # Construct a dict representation of a Features model
+ features_model = {}
+ features_model['classifications'] = classifications_options_model
+ features_model['concepts'] = concepts_options_model
+ features_model['emotion'] = emotion_options_model
+ features_model['entities'] = entities_options_model
+ features_model['keywords'] = keywords_options_model
+ features_model['metadata'] = {'anyKey': 'anyValue'}
+ features_model['relations'] = relations_options_model
+ features_model['semantic_roles'] = semantic_roles_options_model
+ features_model['sentiment'] = sentiment_options_model
+ features_model['categories'] = categories_options_model
+ features_model['syntax'] = syntax_options_model
+
+ # Set up parameter values
+ features = features_model
+ text = 'testString'
+ html = 'testString'
+ url = 'testString'
+ clean = True
+ xpath = 'testString'
+ fallback_to_raw = True
+ return_analyzed_text = False
+ language = 'testString'
+ limit_text_characters = 38
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "features": features,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.analyze(**req_copy)
+
+ def test_analyze_value_error_with_retries(self):
+ # Enable retries and run test_analyze_value_error.
+ _service.enable_retries()
+ self.test_analyze_value_error()
+
+ # Disable retries and run test_analyze_value_error.
+ _service.disable_retries()
+ self.test_analyze_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Analyze
+##############################################################################
+
+##############################################################################
+# Start of Service: ManageModels
+##############################################################################
+# region
+
+
+class TestListModels:
+ """
+ Test Class for list_models
+ """
+
+ @responses.activate
+ def test_list_models_all_params(self):
+ """
+ list_models()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models')
+ mock_response = '{"models": [{"status": "starting", "model_id": "model_id", "language": "language", "description": "description", "workspace_id": "workspace_id", "model_version": "model_version", "version": "version", "version_description": "version_description", "created": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_models()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_models_all_params_with_retries(self):
+ # Enable retries and run test_list_models_all_params.
+ _service.enable_retries()
+ self.test_list_models_all_params()
+
+ # Disable retries and run test_list_models_all_params.
+ _service.disable_retries()
+ self.test_list_models_all_params()
+
+ @responses.activate
+ def test_list_models_value_error(self):
+ """
+ test_list_models_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models')
+ mock_response = '{"models": [{"status": "starting", "model_id": "model_id", "language": "language", "description": "description", "workspace_id": "workspace_id", "model_version": "model_version", "version": "version", "version_description": "version_description", "created": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_models(**req_copy)
+
+ def test_list_models_value_error_with_retries(self):
+ # Enable retries and run test_list_models_value_error.
+ _service.enable_retries()
+ self.test_list_models_value_error()
+
+ # Disable retries and run test_list_models_value_error.
+ _service.disable_retries()
+ self.test_list_models_value_error()
+
+
+class TestDeleteModel:
+ """
+ Test Class for delete_model
+ """
+
+ @responses.activate
+ def test_delete_model_all_params(self):
+ """
+ delete_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/testString')
+ mock_response = '{"deleted": "deleted"}'
+ responses.add(
+ responses.DELETE,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_model(
+ model_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_model_all_params_with_retries(self):
+ # Enable retries and run test_delete_model_all_params.
+ _service.enable_retries()
+ self.test_delete_model_all_params()
+
+ # Disable retries and run test_delete_model_all_params.
+ _service.disable_retries()
+ self.test_delete_model_all_params()
+
+ @responses.activate
+ def test_delete_model_value_error(self):
+ """
+ test_delete_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/testString')
+ mock_response = '{"deleted": "deleted"}'
+ responses.add(
+ responses.DELETE,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "model_id": model_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_model(**req_copy)
+
+ def test_delete_model_value_error_with_retries(self):
+ # Enable retries and run test_delete_model_value_error.
+ _service.enable_retries()
+ self.test_delete_model_value_error()
+
+ # Disable retries and run test_delete_model_value_error.
+ _service.disable_retries()
+ self.test_delete_model_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: ManageModels
+##############################################################################
+
+##############################################################################
+# Start of Service: ManageCategoriesModels
+##############################################################################
+# region
+
+
+class TestCreateCategoriesModel:
+ """
+ Test Class for create_categories_model
+ """
+
+ @responses.activate
+ def test_create_categories_model_all_params(self):
+ """
+ create_categories_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+ training_data_content_type = 'json'
+ name = 'testString'
+ user_metadata = {'region': 'North America', 'latest': True}
+ description = 'testString'
+ model_version = 'testString'
+ workspace_id = 'testString'
+ version_description = 'testString'
+
+ # Invoke method
+ response = _service.create_categories_model(
+ language,
+ training_data,
+ training_data_content_type=training_data_content_type,
+ name=name,
+ user_metadata=user_metadata,
+ description=description,
+ model_version=model_version,
+ workspace_id=workspace_id,
+ version_description=version_description,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_categories_model_all_params_with_retries(self):
+ # Enable retries and run test_create_categories_model_all_params.
+ _service.enable_retries()
+ self.test_create_categories_model_all_params()
+
+ # Disable retries and run test_create_categories_model_all_params.
+ _service.disable_retries()
+ self.test_create_categories_model_all_params()
+
+ @responses.activate
+ def test_create_categories_model_required_params(self):
+ """
+ test_create_categories_model_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.create_categories_model(
+ language,
+ training_data,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_categories_model_required_params_with_retries(self):
+ # Enable retries and run test_create_categories_model_required_params.
+ _service.enable_retries()
+ self.test_create_categories_model_required_params()
+
+ # Disable retries and run test_create_categories_model_required_params.
+ _service.disable_retries()
+ self.test_create_categories_model_required_params()
+
+ @responses.activate
+ def test_create_categories_model_value_error(self):
+ """
+ test_create_categories_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "language": language,
+ "training_data": training_data,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_categories_model(**req_copy)
+
+ def test_create_categories_model_value_error_with_retries(self):
+ # Enable retries and run test_create_categories_model_value_error.
+ _service.enable_retries()
+ self.test_create_categories_model_value_error()
+
+ # Disable retries and run test_create_categories_model_value_error.
+ _service.disable_retries()
+ self.test_create_categories_model_value_error()
+
+
+class TestListCategoriesModels:
+ """
+ Test Class for list_categories_models
+ """
+
+ @responses.activate
+ def test_list_categories_models_all_params(self):
+ """
+ list_categories_models()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories')
+ mock_response = '{"models": [{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_categories_models()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_categories_models_all_params_with_retries(self):
+ # Enable retries and run test_list_categories_models_all_params.
+ _service.enable_retries()
+ self.test_list_categories_models_all_params()
+
+ # Disable retries and run test_list_categories_models_all_params.
+ _service.disable_retries()
+ self.test_list_categories_models_all_params()
+
+ @responses.activate
+ def test_list_categories_models_value_error(self):
+ """
+ test_list_categories_models_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories')
+ mock_response = '{"models": [{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_categories_models(**req_copy)
+
+ def test_list_categories_models_value_error_with_retries(self):
+ # Enable retries and run test_list_categories_models_value_error.
+ _service.enable_retries()
+ self.test_list_categories_models_value_error()
+
+ # Disable retries and run test_list_categories_models_value_error.
+ _service.disable_retries()
+ self.test_list_categories_models_value_error()
+
+
+class TestGetCategoriesModel:
+ """
+ Test Class for get_categories_model
+ """
+
+ @responses.activate
+ def test_get_categories_model_all_params(self):
+ """
+ get_categories_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Invoke method
+ response = _service.get_categories_model(
+ model_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_categories_model_all_params_with_retries(self):
+ # Enable retries and run test_get_categories_model_all_params.
+ _service.enable_retries()
+ self.test_get_categories_model_all_params()
+
+ # Disable retries and run test_get_categories_model_all_params.
+ _service.disable_retries()
+ self.test_get_categories_model_all_params()
+
+ @responses.activate
+ def test_get_categories_model_value_error(self):
+ """
+ test_get_categories_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "model_id": model_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_categories_model(**req_copy)
+
+ def test_get_categories_model_value_error_with_retries(self):
+ # Enable retries and run test_get_categories_model_value_error.
+ _service.enable_retries()
+ self.test_get_categories_model_value_error()
+
+ # Disable retries and run test_get_categories_model_value_error.
+ _service.disable_retries()
+ self.test_get_categories_model_value_error()
+
+
+class TestUpdateCategoriesModel:
+ """
+ Test Class for update_categories_model
+ """
+
+ @responses.activate
+ def test_update_categories_model_all_params(self):
+ """
+ update_categories_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.PUT,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+ training_data_content_type = 'json'
+ name = 'testString'
+ user_metadata = {'region': 'North America', 'latest': True}
+ description = 'testString'
+ model_version = 'testString'
+ workspace_id = 'testString'
+ version_description = 'testString'
+
+ # Invoke method
+ response = _service.update_categories_model(
+ model_id,
+ language,
+ training_data,
+ training_data_content_type=training_data_content_type,
+ name=name,
+ user_metadata=user_metadata,
+ description=description,
+ model_version=model_version,
+ workspace_id=workspace_id,
+ version_description=version_description,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_update_categories_model_all_params_with_retries(self):
+ # Enable retries and run test_update_categories_model_all_params.
+ _service.enable_retries()
+ self.test_update_categories_model_all_params()
+
+ # Disable retries and run test_update_categories_model_all_params.
+ _service.disable_retries()
+ self.test_update_categories_model_all_params()
+
+ @responses.activate
+ def test_update_categories_model_required_params(self):
+ """
+ test_update_categories_model_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.PUT,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.update_categories_model(
+ model_id,
+ language,
+ training_data,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_update_categories_model_required_params_with_retries(self):
+ # Enable retries and run test_update_categories_model_required_params.
+ _service.enable_retries()
+ self.test_update_categories_model_required_params()
+
+ # Disable retries and run test_update_categories_model_required_params.
+ _service.disable_retries()
+ self.test_update_categories_model_required_params()
+
+ @responses.activate
+ def test_update_categories_model_value_error(self):
+ """
+ test_update_categories_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.PUT,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "model_id": model_id,
+ "language": language,
+ "training_data": training_data,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_categories_model(**req_copy)
+
+ def test_update_categories_model_value_error_with_retries(self):
+ # Enable retries and run test_update_categories_model_value_error.
+ _service.enable_retries()
+ self.test_update_categories_model_value_error()
+
+ # Disable retries and run test_update_categories_model_value_error.
+ _service.disable_retries()
+ self.test_update_categories_model_value_error()
+
+
+class TestDeleteCategoriesModel:
+ """
+ Test Class for delete_categories_model
+ """
+
+ @responses.activate
+ def test_delete_categories_model_all_params(self):
+ """
+ delete_categories_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories/testString')
+ mock_response = '{"deleted": "deleted"}'
+ responses.add(
+ responses.DELETE,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_categories_model(
+ model_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_categories_model_all_params_with_retries(self):
+ # Enable retries and run test_delete_categories_model_all_params.
+ _service.enable_retries()
+ self.test_delete_categories_model_all_params()
+
+ # Disable retries and run test_delete_categories_model_all_params.
+ _service.disable_retries()
+ self.test_delete_categories_model_all_params()
+
+ @responses.activate
+ def test_delete_categories_model_value_error(self):
+ """
+ test_delete_categories_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/categories/testString')
+ mock_response = '{"deleted": "deleted"}'
+ responses.add(
+ responses.DELETE,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "model_id": model_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_categories_model(**req_copy)
+
+ def test_delete_categories_model_value_error_with_retries(self):
+ # Enable retries and run test_delete_categories_model_value_error.
+ _service.enable_retries()
+ self.test_delete_categories_model_value_error()
+
+ # Disable retries and run test_delete_categories_model_value_error.
+ _service.disable_retries()
+ self.test_delete_categories_model_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: ManageCategoriesModels
+##############################################################################
+
+##############################################################################
+# Start of Service: ManageClassificationsModels
+##############################################################################
+# region
+
+
+class TestCreateClassificationsModel:
+ """
+ Test Class for create_classifications_model
+ """
+
+ @responses.activate
+ def test_create_classifications_model_all_params(self):
+ """
+ create_classifications_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a ClassificationsTrainingParameters model
+ classifications_training_parameters_model = {}
+ classifications_training_parameters_model['model_type'] = 'single_label'
+
+ # Set up parameter values
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+ training_data_content_type = 'json'
+ name = 'testString'
+ user_metadata = {'region': 'North America', 'latest': True}
+ description = 'testString'
+ model_version = 'testString'
+ workspace_id = 'testString'
+ version_description = 'testString'
+ training_parameters = classifications_training_parameters_model
+
+ # Invoke method
+ response = _service.create_classifications_model(
+ language,
+ training_data,
+ training_data_content_type=training_data_content_type,
+ name=name,
+ user_metadata=user_metadata,
+ description=description,
+ model_version=model_version,
+ workspace_id=workspace_id,
+ version_description=version_description,
+ training_parameters=training_parameters,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_classifications_model_all_params_with_retries(self):
+ # Enable retries and run test_create_classifications_model_all_params.
+ _service.enable_retries()
+ self.test_create_classifications_model_all_params()
+
+ # Disable retries and run test_create_classifications_model_all_params.
+ _service.disable_retries()
+ self.test_create_classifications_model_all_params()
+
+ @responses.activate
+ def test_create_classifications_model_required_params(self):
+ """
+ test_create_classifications_model_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.create_classifications_model(
+ language,
+ training_data,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_create_classifications_model_required_params_with_retries(self):
+ # Enable retries and run test_create_classifications_model_required_params.
+ _service.enable_retries()
+ self.test_create_classifications_model_required_params()
+
+ # Disable retries and run test_create_classifications_model_required_params.
+ _service.disable_retries()
+ self.test_create_classifications_model_required_params()
+
+ @responses.activate
+ def test_create_classifications_model_value_error(self):
+ """
+ test_create_classifications_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "language": language,
+ "training_data": training_data,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_classifications_model(**req_copy)
+
+ def test_create_classifications_model_value_error_with_retries(self):
+ # Enable retries and run test_create_classifications_model_value_error.
+ _service.enable_retries()
+ self.test_create_classifications_model_value_error()
+
+ # Disable retries and run test_create_classifications_model_value_error.
+ _service.disable_retries()
+ self.test_create_classifications_model_value_error()
+
+
+class TestListClassificationsModels:
+ """
+ Test Class for list_classifications_models
+ """
+
+ @responses.activate
+ def test_list_classifications_models_all_params(self):
+ """
+ list_classifications_models()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications')
+ mock_response = '{"models": [{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_classifications_models()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_classifications_models_all_params_with_retries(self):
+ # Enable retries and run test_list_classifications_models_all_params.
+ _service.enable_retries()
+ self.test_list_classifications_models_all_params()
+
+ # Disable retries and run test_list_classifications_models_all_params.
+ _service.disable_retries()
+ self.test_list_classifications_models_all_params()
+
+ @responses.activate
+ def test_list_classifications_models_value_error(self):
+ """
+ test_list_classifications_models_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications')
+ mock_response = '{"models": [{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_classifications_models(**req_copy)
+
+ def test_list_classifications_models_value_error_with_retries(self):
+ # Enable retries and run test_list_classifications_models_value_error.
+ _service.enable_retries()
+ self.test_list_classifications_models_value_error()
+
+ # Disable retries and run test_list_classifications_models_value_error.
+ _service.disable_retries()
+ self.test_list_classifications_models_value_error()
+
+
+class TestGetClassificationsModel:
+ """
+ Test Class for get_classifications_model
+ """
+
+ @responses.activate
+ def test_get_classifications_model_all_params(self):
+ """
+ get_classifications_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Invoke method
+ response = _service.get_classifications_model(
+ model_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_classifications_model_all_params_with_retries(self):
+ # Enable retries and run test_get_classifications_model_all_params.
+ _service.enable_retries()
+ self.test_get_classifications_model_all_params()
+
+ # Disable retries and run test_get_classifications_model_all_params.
+ _service.disable_retries()
+ self.test_get_classifications_model_all_params()
+
+ @responses.activate
+ def test_get_classifications_model_value_error(self):
+ """
+ test_get_classifications_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "model_id": model_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_classifications_model(**req_copy)
+
+ def test_get_classifications_model_value_error_with_retries(self):
+ # Enable retries and run test_get_classifications_model_value_error.
+ _service.enable_retries()
+ self.test_get_classifications_model_value_error()
+
+ # Disable retries and run test_get_classifications_model_value_error.
+ _service.disable_retries()
+ self.test_get_classifications_model_value_error()
+
+
+class TestUpdateClassificationsModel:
+ """
+ Test Class for update_classifications_model
+ """
+
+ @responses.activate
+ def test_update_classifications_model_all_params(self):
+ """
+ update_classifications_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.PUT,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Construct a dict representation of a ClassificationsTrainingParameters model
+ classifications_training_parameters_model = {}
+ classifications_training_parameters_model['model_type'] = 'single_label'
+
+ # Set up parameter values
+ model_id = 'testString'
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+ training_data_content_type = 'json'
+ name = 'testString'
+ user_metadata = {'region': 'North America', 'latest': True}
+ description = 'testString'
+ model_version = 'testString'
+ workspace_id = 'testString'
+ version_description = 'testString'
+ training_parameters = classifications_training_parameters_model
+
+ # Invoke method
+ response = _service.update_classifications_model(
+ model_id,
+ language,
+ training_data,
+ training_data_content_type=training_data_content_type,
+ name=name,
+ user_metadata=user_metadata,
+ description=description,
+ model_version=model_version,
+ workspace_id=workspace_id,
+ version_description=version_description,
+ training_parameters=training_parameters,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_update_classifications_model_all_params_with_retries(self):
+ # Enable retries and run test_update_classifications_model_all_params.
+ _service.enable_retries()
+ self.test_update_classifications_model_all_params()
+
+ # Disable retries and run test_update_classifications_model_all_params.
+ _service.disable_retries()
+ self.test_update_classifications_model_all_params()
+
+ @responses.activate
+ def test_update_classifications_model_required_params(self):
+ """
+ test_update_classifications_model_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.PUT,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.update_classifications_model(
+ model_id,
+ language,
+ training_data,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_update_classifications_model_required_params_with_retries(self):
+ # Enable retries and run test_update_classifications_model_required_params.
+ _service.enable_retries()
+ self.test_update_classifications_model_required_params()
+
+ # Disable retries and run test_update_classifications_model_required_params.
+ _service.disable_retries()
+ self.test_update_classifications_model_required_params()
+
+ @responses.activate
+ def test_update_classifications_model_value_error(self):
+ """
+ test_update_classifications_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications/testString')
+ mock_response = '{"name": "name", "user_metadata": {"anyKey": "anyValue"}, "language": "language", "description": "description", "model_version": "model_version", "workspace_id": "workspace_id", "version_description": "version_description", "features": ["features"], "status": "starting", "model_id": "model_id", "created": "2019-01-01T12:00:00.000Z", "notices": [{"message": "message"}], "last_trained": "2019-01-01T12:00:00.000Z", "last_deployed": "2019-01-01T12:00:00.000Z"}'
+ responses.add(
+ responses.PUT,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+ language = 'testString'
+ training_data = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "model_id": model_id,
+ "language": language,
+ "training_data": training_data,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_classifications_model(**req_copy)
+
+ def test_update_classifications_model_value_error_with_retries(self):
+ # Enable retries and run test_update_classifications_model_value_error.
+ _service.enable_retries()
+ self.test_update_classifications_model_value_error()
+
+ # Disable retries and run test_update_classifications_model_value_error.
+ _service.disable_retries()
+ self.test_update_classifications_model_value_error()
+
+
+class TestDeleteClassificationsModel:
+ """
+ Test Class for delete_classifications_model
+ """
+
+ @responses.activate
+ def test_delete_classifications_model_all_params(self):
+ """
+ delete_classifications_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications/testString')
+ mock_response = '{"deleted": "deleted"}'
+ responses.add(
+ responses.DELETE,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_classifications_model(
+ model_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_classifications_model_all_params_with_retries(self):
+ # Enable retries and run test_delete_classifications_model_all_params.
+ _service.enable_retries()
+ self.test_delete_classifications_model_all_params()
+
+ # Disable retries and run test_delete_classifications_model_all_params.
+ _service.disable_retries()
+ self.test_delete_classifications_model_all_params()
+
+ @responses.activate
+ def test_delete_classifications_model_value_error(self):
+ """
+ test_delete_classifications_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/classifications/testString')
+ mock_response = '{"deleted": "deleted"}'
+ responses.add(
+ responses.DELETE,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "model_id": model_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_classifications_model(**req_copy)
+
+ def test_delete_classifications_model_value_error_with_retries(self):
+ # Enable retries and run test_delete_classifications_model_value_error.
+ _service.enable_retries()
+ self.test_delete_classifications_model_value_error()
+
+ # Disable retries and run test_delete_classifications_model_value_error.
+ _service.disable_retries()
+ self.test_delete_classifications_model_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: ManageClassificationsModels
+##############################################################################
+
+
+##############################################################################
+# Start of Model Tests
+##############################################################################
+# region
+
+
+class TestModel_AnalysisResults:
+ """
+ Test Class for AnalysisResults
+ """
+
+ def test_analysis_results_serialization(self):
+ """
+ Test serialization/deserialization for AnalysisResults
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ analysis_results_usage_model = {} # AnalysisResultsUsage
+ analysis_results_usage_model['features'] = 38
+ analysis_results_usage_model['text_characters'] = 38
+ analysis_results_usage_model['text_units'] = 38
+
+ concepts_result_model = {} # ConceptsResult
+ concepts_result_model['text'] = 'Social network service'
+ concepts_result_model['relevance'] = 0.92186
+ concepts_result_model['dbpedia_resource'] = 'http://dbpedia.org/resource/Social_network_service'
+
+ entity_mention_model = {} # EntityMention
+ entity_mention_model['text'] = 'testString'
+ entity_mention_model['location'] = [38]
+ entity_mention_model['confidence'] = 72.5
+
+ emotion_scores_model = {} # EmotionScores
+ emotion_scores_model['anger'] = 72.5
+ emotion_scores_model['disgust'] = 72.5
+ emotion_scores_model['fear'] = 72.5
+ emotion_scores_model['joy'] = 72.5
+ emotion_scores_model['sadness'] = 72.5
+
+ feature_sentiment_results_model = {} # FeatureSentimentResults
+ feature_sentiment_results_model['score'] = 72.5
+
+ disambiguation_result_model = {} # DisambiguationResult
+ disambiguation_result_model['name'] = 'testString'
+ disambiguation_result_model['dbpedia_resource'] = 'testString'
+ disambiguation_result_model['subtype'] = ['testString']
+
+ entities_result_model = {} # EntitiesResult
+ entities_result_model['type'] = 'testString'
+ entities_result_model['text'] = 'Social network service'
+ entities_result_model['relevance'] = 0.92186
+ entities_result_model['confidence'] = 72.5
+ entities_result_model['mentions'] = [entity_mention_model]
+ entities_result_model['count'] = 38
+ entities_result_model['emotion'] = emotion_scores_model
+ entities_result_model['sentiment'] = feature_sentiment_results_model
+ entities_result_model['disambiguation'] = disambiguation_result_model
+
+ keywords_result_model = {} # KeywordsResult
+ keywords_result_model['count'] = 1
+ keywords_result_model['relevance'] = 0.864624
+ keywords_result_model['text'] = 'curated online courses'
+ keywords_result_model['emotion'] = emotion_scores_model
+ keywords_result_model['sentiment'] = feature_sentiment_results_model
+
+ categories_relevant_text_model = {} # CategoriesRelevantText
+ categories_relevant_text_model['text'] = 'testString'
+
+ categories_result_explanation_model = {} # CategoriesResultExplanation
+ categories_result_explanation_model['relevant_text'] = [categories_relevant_text_model]
+
+ categories_result_model = {} # CategoriesResult
+ categories_result_model['label'] = '/technology and computing/computing/computer software and applications'
+ categories_result_model['score'] = 0.594296
+ categories_result_model['explanation'] = categories_result_explanation_model
+
+ classifications_result_model = {} # ClassificationsResult
+ classifications_result_model['class_name'] = 'temperature'
+ classifications_result_model['confidence'] = 0.562519
+
+ document_emotion_results_model = {} # DocumentEmotionResults
+ document_emotion_results_model['emotion'] = emotion_scores_model
+
+ targeted_emotion_results_model = {} # TargetedEmotionResults
+ targeted_emotion_results_model['text'] = 'testString'
+ targeted_emotion_results_model['emotion'] = emotion_scores_model
+
+ emotion_result_model = {} # EmotionResult
+ emotion_result_model['document'] = document_emotion_results_model
+ emotion_result_model['targets'] = [targeted_emotion_results_model]
+
+ author_model = {} # Author
+ author_model['name'] = 'testString'
+
+ feed_model = {} # Feed
+ feed_model['link'] = 'testString'
+
+ features_results_metadata_model = {} # FeaturesResultsMetadata
+ features_results_metadata_model['authors'] = [author_model]
+ features_results_metadata_model['publication_date'] = 'testString'
+ features_results_metadata_model['title'] = 'testString'
+ features_results_metadata_model['image'] = 'testString'
+ features_results_metadata_model['feeds'] = [feed_model]
+
+ relation_entity_model = {} # RelationEntity
+ relation_entity_model['text'] = 'Best Actor'
+ relation_entity_model['type'] = 'EntertainmentAward'
+
+ relation_argument_model = {} # RelationArgument
+ relation_argument_model['entities'] = [relation_entity_model]
+ relation_argument_model['location'] = [22, 32]
+ relation_argument_model['text'] = 'Best Actor'
+
+ relations_result_model = {} # RelationsResult
+ relations_result_model['score'] = 0.680715
+ relations_result_model['sentence'] = 'Leonardo DiCaprio won Best Actor in a Leading Role for his performance.'
+ relations_result_model['type'] = 'awardedTo'
+ relations_result_model['arguments'] = [relation_argument_model]
+
+ semantic_roles_entity_model = {} # SemanticRolesEntity
+ semantic_roles_entity_model['type'] = 'testString'
+ semantic_roles_entity_model['text'] = 'testString'
+
+ semantic_roles_keyword_model = {} # SemanticRolesKeyword
+ semantic_roles_keyword_model['text'] = 'testString'
+
+ semantic_roles_result_subject_model = {} # SemanticRolesResultSubject
+ semantic_roles_result_subject_model['text'] = 'IBM'
+ semantic_roles_result_subject_model['entities'] = [semantic_roles_entity_model]
+ semantic_roles_result_subject_model['keywords'] = [semantic_roles_keyword_model]
+
+ semantic_roles_verb_model = {} # SemanticRolesVerb
+ semantic_roles_verb_model['text'] = 'have'
+ semantic_roles_verb_model['tense'] = 'present'
+
+ semantic_roles_result_action_model = {} # SemanticRolesResultAction
+ semantic_roles_result_action_model['text'] = 'has'
+ semantic_roles_result_action_model['normalized'] = 'have'
+ semantic_roles_result_action_model['verb'] = semantic_roles_verb_model
+
+ semantic_roles_result_object_model = {} # SemanticRolesResultObject
+ semantic_roles_result_object_model['text'] = 'one of the largest workforces in the world'
+ semantic_roles_result_object_model['keywords'] = [semantic_roles_keyword_model]
+
+ semantic_roles_result_model = {} # SemanticRolesResult
+ semantic_roles_result_model['sentence'] = 'IBM has one of the largest workforces in the world'
+ semantic_roles_result_model['subject'] = semantic_roles_result_subject_model
+ semantic_roles_result_model['action'] = semantic_roles_result_action_model
+ semantic_roles_result_model['object'] = semantic_roles_result_object_model
+
+ document_sentiment_results_model = {} # DocumentSentimentResults
+ document_sentiment_results_model['label'] = 'testString'
+ document_sentiment_results_model['score'] = 72.5
+
+ targeted_sentiment_results_model = {} # TargetedSentimentResults
+ targeted_sentiment_results_model['text'] = 'testString'
+ targeted_sentiment_results_model['score'] = 72.5
+
+ sentiment_result_model = {} # SentimentResult
+ sentiment_result_model['document'] = document_sentiment_results_model
+ sentiment_result_model['targets'] = [targeted_sentiment_results_model]
+
+ token_result_model = {} # TokenResult
+ token_result_model['text'] = 'testString'
+ token_result_model['part_of_speech'] = 'ADJ'
+ token_result_model['location'] = [38]
+ token_result_model['lemma'] = 'testString'
+
+ sentence_result_model = {} # SentenceResult
+ sentence_result_model['text'] = 'testString'
+ sentence_result_model['location'] = [38]
+
+ syntax_result_model = {} # SyntaxResult
+ syntax_result_model['tokens'] = [token_result_model]
+ syntax_result_model['sentences'] = [sentence_result_model]
+
+ # Construct a json representation of a AnalysisResults model
+ analysis_results_model_json = {}
+ analysis_results_model_json['language'] = 'testString'
+ analysis_results_model_json['analyzed_text'] = 'testString'
+ analysis_results_model_json['retrieved_url'] = 'testString'
+ analysis_results_model_json['usage'] = analysis_results_usage_model
+ analysis_results_model_json['concepts'] = [concepts_result_model]
+ analysis_results_model_json['entities'] = [entities_result_model]
+ analysis_results_model_json['keywords'] = [keywords_result_model]
+ analysis_results_model_json['categories'] = [categories_result_model]
+ analysis_results_model_json['classifications'] = [classifications_result_model]
+ analysis_results_model_json['emotion'] = emotion_result_model
+ analysis_results_model_json['metadata'] = features_results_metadata_model
+ analysis_results_model_json['relations'] = [relations_result_model]
+ analysis_results_model_json['semantic_roles'] = [semantic_roles_result_model]
+ analysis_results_model_json['sentiment'] = sentiment_result_model
+ analysis_results_model_json['syntax'] = syntax_result_model
+
+ # Construct a model instance of AnalysisResults by calling from_dict on the json representation
+ analysis_results_model = AnalysisResults.from_dict(analysis_results_model_json)
+ assert analysis_results_model != False
+
+ # Construct a model instance of AnalysisResults by calling from_dict on the json representation
+ analysis_results_model_dict = AnalysisResults.from_dict(analysis_results_model_json).__dict__
+ analysis_results_model2 = AnalysisResults(**analysis_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert analysis_results_model == analysis_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ analysis_results_model_json2 = analysis_results_model.to_dict()
+ assert analysis_results_model_json2 == analysis_results_model_json
+
+
+class TestModel_AnalysisResultsUsage:
+ """
+ Test Class for AnalysisResultsUsage
+ """
+
+ def test_analysis_results_usage_serialization(self):
+ """
+ Test serialization/deserialization for AnalysisResultsUsage
+ """
+
+ # Construct a json representation of a AnalysisResultsUsage model
+ analysis_results_usage_model_json = {}
+ analysis_results_usage_model_json['features'] = 38
+ analysis_results_usage_model_json['text_characters'] = 38
+ analysis_results_usage_model_json['text_units'] = 38
+
+ # Construct a model instance of AnalysisResultsUsage by calling from_dict on the json representation
+ analysis_results_usage_model = AnalysisResultsUsage.from_dict(analysis_results_usage_model_json)
+ assert analysis_results_usage_model != False
+
+ # Construct a model instance of AnalysisResultsUsage by calling from_dict on the json representation
+ analysis_results_usage_model_dict = AnalysisResultsUsage.from_dict(analysis_results_usage_model_json).__dict__
+ analysis_results_usage_model2 = AnalysisResultsUsage(**analysis_results_usage_model_dict)
+
+ # Verify the model instances are equivalent
+ assert analysis_results_usage_model == analysis_results_usage_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ analysis_results_usage_model_json2 = analysis_results_usage_model.to_dict()
+ assert analysis_results_usage_model_json2 == analysis_results_usage_model_json
+
+
+class TestModel_Author:
+ """
+ Test Class for Author
+ """
+
+ def test_author_serialization(self):
+ """
+ Test serialization/deserialization for Author
+ """
+
+ # Construct a json representation of a Author model
+ author_model_json = {}
+ author_model_json['name'] = 'testString'
+
+ # Construct a model instance of Author by calling from_dict on the json representation
+ author_model = Author.from_dict(author_model_json)
+ assert author_model != False
+
+ # Construct a model instance of Author by calling from_dict on the json representation
+ author_model_dict = Author.from_dict(author_model_json).__dict__
+ author_model2 = Author(**author_model_dict)
+
+ # Verify the model instances are equivalent
+ assert author_model == author_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ author_model_json2 = author_model.to_dict()
+ assert author_model_json2 == author_model_json
+
+
+class TestModel_CategoriesModel:
+ """
+ Test Class for CategoriesModel
+ """
+
+ def test_categories_model_serialization(self):
+ """
+ Test serialization/deserialization for CategoriesModel
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ notice_model = {} # Notice
+
+ # Construct a json representation of a CategoriesModel model
+ categories_model_model_json = {}
+ categories_model_model_json['name'] = 'testString'
+ categories_model_model_json['user_metadata'] = {'region': 'North America', 'latest': True}
+ categories_model_model_json['language'] = 'testString'
+ categories_model_model_json['description'] = 'testString'
+ categories_model_model_json['model_version'] = 'testString'
+ categories_model_model_json['workspace_id'] = 'testString'
+ categories_model_model_json['version_description'] = 'testString'
+ categories_model_model_json['features'] = ['testString']
+ categories_model_model_json['status'] = 'starting'
+ categories_model_model_json['model_id'] = 'testString'
+ categories_model_model_json['created'] = '2019-01-01T12:00:00Z'
+ categories_model_model_json['notices'] = [notice_model]
+ categories_model_model_json['last_trained'] = '2019-01-01T12:00:00Z'
+ categories_model_model_json['last_deployed'] = '2019-01-01T12:00:00Z'
+
+ # Construct a model instance of CategoriesModel by calling from_dict on the json representation
+ categories_model_model = CategoriesModel.from_dict(categories_model_model_json)
+ assert categories_model_model != False
+
+ # Construct a model instance of CategoriesModel by calling from_dict on the json representation
+ categories_model_model_dict = CategoriesModel.from_dict(categories_model_model_json).__dict__
+ categories_model_model2 = CategoriesModel(**categories_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert categories_model_model == categories_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ categories_model_model_json2 = categories_model_model.to_dict()
+ assert categories_model_model_json2 == categories_model_model_json
+
+
+class TestModel_CategoriesModelList:
+ """
+ Test Class for CategoriesModelList
+ """
+
+ def test_categories_model_list_serialization(self):
+ """
+ Test serialization/deserialization for CategoriesModelList
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ notice_model = {} # Notice
+
+ categories_model_model = {} # CategoriesModel
+ categories_model_model['name'] = 'testString'
+ categories_model_model['user_metadata'] = {'region': 'North America', 'latest': True}
+ categories_model_model['language'] = 'testString'
+ categories_model_model['description'] = 'testString'
+ categories_model_model['model_version'] = 'testString'
+ categories_model_model['workspace_id'] = 'testString'
+ categories_model_model['version_description'] = 'testString'
+ categories_model_model['features'] = ['testString']
+ categories_model_model['status'] = 'starting'
+ categories_model_model['model_id'] = 'testString'
+ categories_model_model['created'] = '2019-01-01T12:00:00Z'
+ categories_model_model['notices'] = [notice_model]
+ categories_model_model['last_trained'] = '2019-01-01T12:00:00Z'
+ categories_model_model['last_deployed'] = '2019-01-01T12:00:00Z'
+
+ # Construct a json representation of a CategoriesModelList model
+ categories_model_list_model_json = {}
+ categories_model_list_model_json['models'] = [categories_model_model]
+
+ # Construct a model instance of CategoriesModelList by calling from_dict on the json representation
+ categories_model_list_model = CategoriesModelList.from_dict(categories_model_list_model_json)
+ assert categories_model_list_model != False
+
+ # Construct a model instance of CategoriesModelList by calling from_dict on the json representation
+ categories_model_list_model_dict = CategoriesModelList.from_dict(categories_model_list_model_json).__dict__
+ categories_model_list_model2 = CategoriesModelList(**categories_model_list_model_dict)
+
+ # Verify the model instances are equivalent
+ assert categories_model_list_model == categories_model_list_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ categories_model_list_model_json2 = categories_model_list_model.to_dict()
+ assert categories_model_list_model_json2 == categories_model_list_model_json
+
+
+class TestModel_CategoriesOptions:
+ """
+ Test Class for CategoriesOptions
+ """
+
+ def test_categories_options_serialization(self):
+ """
+ Test serialization/deserialization for CategoriesOptions
+ """
+
+ # Construct a json representation of a CategoriesOptions model
+ categories_options_model_json = {}
+ categories_options_model_json['explanation'] = False
+ categories_options_model_json['limit'] = 3
+ categories_options_model_json['model'] = 'testString'
+
+ # Construct a model instance of CategoriesOptions by calling from_dict on the json representation
+ categories_options_model = CategoriesOptions.from_dict(categories_options_model_json)
+ assert categories_options_model != False
+
+ # Construct a model instance of CategoriesOptions by calling from_dict on the json representation
+ categories_options_model_dict = CategoriesOptions.from_dict(categories_options_model_json).__dict__
+ categories_options_model2 = CategoriesOptions(**categories_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert categories_options_model == categories_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ categories_options_model_json2 = categories_options_model.to_dict()
+ assert categories_options_model_json2 == categories_options_model_json
+
+
+class TestModel_CategoriesRelevantText:
+ """
+ Test Class for CategoriesRelevantText
+ """
+
+ def test_categories_relevant_text_serialization(self):
+ """
+ Test serialization/deserialization for CategoriesRelevantText
+ """
+
+ # Construct a json representation of a CategoriesRelevantText model
+ categories_relevant_text_model_json = {}
+ categories_relevant_text_model_json['text'] = 'testString'
+
+ # Construct a model instance of CategoriesRelevantText by calling from_dict on the json representation
+ categories_relevant_text_model = CategoriesRelevantText.from_dict(categories_relevant_text_model_json)
+ assert categories_relevant_text_model != False
+
+ # Construct a model instance of CategoriesRelevantText by calling from_dict on the json representation
+ categories_relevant_text_model_dict = CategoriesRelevantText.from_dict(categories_relevant_text_model_json).__dict__
+ categories_relevant_text_model2 = CategoriesRelevantText(**categories_relevant_text_model_dict)
+
+ # Verify the model instances are equivalent
+ assert categories_relevant_text_model == categories_relevant_text_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ categories_relevant_text_model_json2 = categories_relevant_text_model.to_dict()
+ assert categories_relevant_text_model_json2 == categories_relevant_text_model_json
+
+
+class TestModel_CategoriesResult:
+ """
+ Test Class for CategoriesResult
+ """
+
+ def test_categories_result_serialization(self):
+ """
+ Test serialization/deserialization for CategoriesResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ categories_relevant_text_model = {} # CategoriesRelevantText
+ categories_relevant_text_model['text'] = 'testString'
+
+ categories_result_explanation_model = {} # CategoriesResultExplanation
+ categories_result_explanation_model['relevant_text'] = [categories_relevant_text_model]
+
+ # Construct a json representation of a CategoriesResult model
+ categories_result_model_json = {}
+ categories_result_model_json['label'] = 'testString'
+ categories_result_model_json['score'] = 72.5
+ categories_result_model_json['explanation'] = categories_result_explanation_model
+
+ # Construct a model instance of CategoriesResult by calling from_dict on the json representation
+ categories_result_model = CategoriesResult.from_dict(categories_result_model_json)
+ assert categories_result_model != False
+
+ # Construct a model instance of CategoriesResult by calling from_dict on the json representation
+ categories_result_model_dict = CategoriesResult.from_dict(categories_result_model_json).__dict__
+ categories_result_model2 = CategoriesResult(**categories_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert categories_result_model == categories_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ categories_result_model_json2 = categories_result_model.to_dict()
+ assert categories_result_model_json2 == categories_result_model_json
+
+
+class TestModel_CategoriesResultExplanation:
+ """
+ Test Class for CategoriesResultExplanation
+ """
+
+ def test_categories_result_explanation_serialization(self):
+ """
+ Test serialization/deserialization for CategoriesResultExplanation
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ categories_relevant_text_model = {} # CategoriesRelevantText
+ categories_relevant_text_model['text'] = 'testString'
+
+ # Construct a json representation of a CategoriesResultExplanation model
+ categories_result_explanation_model_json = {}
+ categories_result_explanation_model_json['relevant_text'] = [categories_relevant_text_model]
+
+ # Construct a model instance of CategoriesResultExplanation by calling from_dict on the json representation
+ categories_result_explanation_model = CategoriesResultExplanation.from_dict(categories_result_explanation_model_json)
+ assert categories_result_explanation_model != False
+
+ # Construct a model instance of CategoriesResultExplanation by calling from_dict on the json representation
+ categories_result_explanation_model_dict = CategoriesResultExplanation.from_dict(categories_result_explanation_model_json).__dict__
+ categories_result_explanation_model2 = CategoriesResultExplanation(**categories_result_explanation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert categories_result_explanation_model == categories_result_explanation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ categories_result_explanation_model_json2 = categories_result_explanation_model.to_dict()
+ assert categories_result_explanation_model_json2 == categories_result_explanation_model_json
+
+
+class TestModel_ClassificationsModel:
+ """
+ Test Class for ClassificationsModel
+ """
+
+ def test_classifications_model_serialization(self):
+ """
+ Test serialization/deserialization for ClassificationsModel
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ notice_model = {} # Notice
+
+ # Construct a json representation of a ClassificationsModel model
+ classifications_model_model_json = {}
+ classifications_model_model_json['name'] = 'testString'
+ classifications_model_model_json['user_metadata'] = {'region': 'North America', 'latest': True}
+ classifications_model_model_json['language'] = 'testString'
+ classifications_model_model_json['description'] = 'testString'
+ classifications_model_model_json['model_version'] = 'testString'
+ classifications_model_model_json['workspace_id'] = 'testString'
+ classifications_model_model_json['version_description'] = 'testString'
+ classifications_model_model_json['features'] = ['testString']
+ classifications_model_model_json['status'] = 'starting'
+ classifications_model_model_json['model_id'] = 'testString'
+ classifications_model_model_json['created'] = '2019-01-01T12:00:00Z'
+ classifications_model_model_json['notices'] = [notice_model]
+ classifications_model_model_json['last_trained'] = '2019-01-01T12:00:00Z'
+ classifications_model_model_json['last_deployed'] = '2019-01-01T12:00:00Z'
+
+ # Construct a model instance of ClassificationsModel by calling from_dict on the json representation
+ classifications_model_model = ClassificationsModel.from_dict(classifications_model_model_json)
+ assert classifications_model_model != False
+
+ # Construct a model instance of ClassificationsModel by calling from_dict on the json representation
+ classifications_model_model_dict = ClassificationsModel.from_dict(classifications_model_model_json).__dict__
+ classifications_model_model2 = ClassificationsModel(**classifications_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert classifications_model_model == classifications_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ classifications_model_model_json2 = classifications_model_model.to_dict()
+ assert classifications_model_model_json2 == classifications_model_model_json
+
+
+class TestModel_ClassificationsModelList:
+ """
+ Test Class for ClassificationsModelList
+ """
+
+ def test_classifications_model_list_serialization(self):
+ """
+ Test serialization/deserialization for ClassificationsModelList
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ notice_model = {} # Notice
+
+ classifications_model_model = {} # ClassificationsModel
+ classifications_model_model['name'] = 'testString'
+ classifications_model_model['user_metadata'] = {'region': 'North America', 'latest': True}
+ classifications_model_model['language'] = 'testString'
+ classifications_model_model['description'] = 'testString'
+ classifications_model_model['model_version'] = 'testString'
+ classifications_model_model['workspace_id'] = 'testString'
+ classifications_model_model['version_description'] = 'testString'
+ classifications_model_model['features'] = ['testString']
+ classifications_model_model['status'] = 'starting'
+ classifications_model_model['model_id'] = 'testString'
+ classifications_model_model['created'] = '2019-01-01T12:00:00Z'
+ classifications_model_model['notices'] = [notice_model]
+ classifications_model_model['last_trained'] = '2019-01-01T12:00:00Z'
+ classifications_model_model['last_deployed'] = '2019-01-01T12:00:00Z'
+
+ # Construct a json representation of a ClassificationsModelList model
+ classifications_model_list_model_json = {}
+ classifications_model_list_model_json['models'] = [classifications_model_model]
+
+ # Construct a model instance of ClassificationsModelList by calling from_dict on the json representation
+ classifications_model_list_model = ClassificationsModelList.from_dict(classifications_model_list_model_json)
+ assert classifications_model_list_model != False
+
+ # Construct a model instance of ClassificationsModelList by calling from_dict on the json representation
+ classifications_model_list_model_dict = ClassificationsModelList.from_dict(classifications_model_list_model_json).__dict__
+ classifications_model_list_model2 = ClassificationsModelList(**classifications_model_list_model_dict)
+
+ # Verify the model instances are equivalent
+ assert classifications_model_list_model == classifications_model_list_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ classifications_model_list_model_json2 = classifications_model_list_model.to_dict()
+ assert classifications_model_list_model_json2 == classifications_model_list_model_json
+
+
+class TestModel_ClassificationsOptions:
+ """
+ Test Class for ClassificationsOptions
+ """
+
+ def test_classifications_options_serialization(self):
+ """
+ Test serialization/deserialization for ClassificationsOptions
+ """
+
+ # Construct a json representation of a ClassificationsOptions model
+ classifications_options_model_json = {}
+ classifications_options_model_json['model'] = 'testString'
+
+ # Construct a model instance of ClassificationsOptions by calling from_dict on the json representation
+ classifications_options_model = ClassificationsOptions.from_dict(classifications_options_model_json)
+ assert classifications_options_model != False
+
+ # Construct a model instance of ClassificationsOptions by calling from_dict on the json representation
+ classifications_options_model_dict = ClassificationsOptions.from_dict(classifications_options_model_json).__dict__
+ classifications_options_model2 = ClassificationsOptions(**classifications_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert classifications_options_model == classifications_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ classifications_options_model_json2 = classifications_options_model.to_dict()
+ assert classifications_options_model_json2 == classifications_options_model_json
+
+
+class TestModel_ClassificationsResult:
+ """
+ Test Class for ClassificationsResult
+ """
+
+ def test_classifications_result_serialization(self):
+ """
+ Test serialization/deserialization for ClassificationsResult
+ """
+
+ # Construct a json representation of a ClassificationsResult model
+ classifications_result_model_json = {}
+ classifications_result_model_json['class_name'] = 'testString'
+ classifications_result_model_json['confidence'] = 72.5
+
+ # Construct a model instance of ClassificationsResult by calling from_dict on the json representation
+ classifications_result_model = ClassificationsResult.from_dict(classifications_result_model_json)
+ assert classifications_result_model != False
+
+ # Construct a model instance of ClassificationsResult by calling from_dict on the json representation
+ classifications_result_model_dict = ClassificationsResult.from_dict(classifications_result_model_json).__dict__
+ classifications_result_model2 = ClassificationsResult(**classifications_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert classifications_result_model == classifications_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ classifications_result_model_json2 = classifications_result_model.to_dict()
+ assert classifications_result_model_json2 == classifications_result_model_json
+
+
+class TestModel_ClassificationsTrainingParameters:
+ """
+ Test Class for ClassificationsTrainingParameters
+ """
+
+ def test_classifications_training_parameters_serialization(self):
+ """
+ Test serialization/deserialization for ClassificationsTrainingParameters
+ """
+
+ # Construct a json representation of a ClassificationsTrainingParameters model
+ classifications_training_parameters_model_json = {}
+ classifications_training_parameters_model_json['model_type'] = 'single_label'
+
+ # Construct a model instance of ClassificationsTrainingParameters by calling from_dict on the json representation
+ classifications_training_parameters_model = ClassificationsTrainingParameters.from_dict(classifications_training_parameters_model_json)
+ assert classifications_training_parameters_model != False
+
+ # Construct a model instance of ClassificationsTrainingParameters by calling from_dict on the json representation
+ classifications_training_parameters_model_dict = ClassificationsTrainingParameters.from_dict(classifications_training_parameters_model_json).__dict__
+ classifications_training_parameters_model2 = ClassificationsTrainingParameters(**classifications_training_parameters_model_dict)
+
+ # Verify the model instances are equivalent
+ assert classifications_training_parameters_model == classifications_training_parameters_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ classifications_training_parameters_model_json2 = classifications_training_parameters_model.to_dict()
+ assert classifications_training_parameters_model_json2 == classifications_training_parameters_model_json
+
+
+class TestModel_ConceptsOptions:
+ """
+ Test Class for ConceptsOptions
+ """
+
+ def test_concepts_options_serialization(self):
+ """
+ Test serialization/deserialization for ConceptsOptions
+ """
+
+ # Construct a json representation of a ConceptsOptions model
+ concepts_options_model_json = {}
+ concepts_options_model_json['limit'] = 8
+
+ # Construct a model instance of ConceptsOptions by calling from_dict on the json representation
+ concepts_options_model = ConceptsOptions.from_dict(concepts_options_model_json)
+ assert concepts_options_model != False
+
+ # Construct a model instance of ConceptsOptions by calling from_dict on the json representation
+ concepts_options_model_dict = ConceptsOptions.from_dict(concepts_options_model_json).__dict__
+ concepts_options_model2 = ConceptsOptions(**concepts_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert concepts_options_model == concepts_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ concepts_options_model_json2 = concepts_options_model.to_dict()
+ assert concepts_options_model_json2 == concepts_options_model_json
+
+
+class TestModel_ConceptsResult:
+ """
+ Test Class for ConceptsResult
+ """
+
+ def test_concepts_result_serialization(self):
+ """
+ Test serialization/deserialization for ConceptsResult
+ """
+
+ # Construct a json representation of a ConceptsResult model
+ concepts_result_model_json = {}
+ concepts_result_model_json['text'] = 'testString'
+ concepts_result_model_json['relevance'] = 72.5
+ concepts_result_model_json['dbpedia_resource'] = 'testString'
+
+ # Construct a model instance of ConceptsResult by calling from_dict on the json representation
+ concepts_result_model = ConceptsResult.from_dict(concepts_result_model_json)
+ assert concepts_result_model != False
+
+ # Construct a model instance of ConceptsResult by calling from_dict on the json representation
+ concepts_result_model_dict = ConceptsResult.from_dict(concepts_result_model_json).__dict__
+ concepts_result_model2 = ConceptsResult(**concepts_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert concepts_result_model == concepts_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ concepts_result_model_json2 = concepts_result_model.to_dict()
+ assert concepts_result_model_json2 == concepts_result_model_json
+
+
+class TestModel_DeleteModelResults:
+ """
+ Test Class for DeleteModelResults
+ """
+
+ def test_delete_model_results_serialization(self):
+ """
+ Test serialization/deserialization for DeleteModelResults
+ """
+
+ # Construct a json representation of a DeleteModelResults model
+ delete_model_results_model_json = {}
+ delete_model_results_model_json['deleted'] = 'testString'
+
+ # Construct a model instance of DeleteModelResults by calling from_dict on the json representation
+ delete_model_results_model = DeleteModelResults.from_dict(delete_model_results_model_json)
+ assert delete_model_results_model != False
+
+ # Construct a model instance of DeleteModelResults by calling from_dict on the json representation
+ delete_model_results_model_dict = DeleteModelResults.from_dict(delete_model_results_model_json).__dict__
+ delete_model_results_model2 = DeleteModelResults(**delete_model_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert delete_model_results_model == delete_model_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ delete_model_results_model_json2 = delete_model_results_model.to_dict()
+ assert delete_model_results_model_json2 == delete_model_results_model_json
+
+
+class TestModel_DisambiguationResult:
+ """
+ Test Class for DisambiguationResult
+ """
+
+ def test_disambiguation_result_serialization(self):
+ """
+ Test serialization/deserialization for DisambiguationResult
+ """
+
+ # Construct a json representation of a DisambiguationResult model
+ disambiguation_result_model_json = {}
+ disambiguation_result_model_json['name'] = 'testString'
+ disambiguation_result_model_json['dbpedia_resource'] = 'testString'
+ disambiguation_result_model_json['subtype'] = ['testString']
+
+ # Construct a model instance of DisambiguationResult by calling from_dict on the json representation
+ disambiguation_result_model = DisambiguationResult.from_dict(disambiguation_result_model_json)
+ assert disambiguation_result_model != False
+
+ # Construct a model instance of DisambiguationResult by calling from_dict on the json representation
+ disambiguation_result_model_dict = DisambiguationResult.from_dict(disambiguation_result_model_json).__dict__
+ disambiguation_result_model2 = DisambiguationResult(**disambiguation_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert disambiguation_result_model == disambiguation_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ disambiguation_result_model_json2 = disambiguation_result_model.to_dict()
+ assert disambiguation_result_model_json2 == disambiguation_result_model_json
+
+
+class TestModel_DocumentEmotionResults:
+ """
+ Test Class for DocumentEmotionResults
+ """
+
+ def test_document_emotion_results_serialization(self):
+ """
+ Test serialization/deserialization for DocumentEmotionResults
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ emotion_scores_model = {} # EmotionScores
+ emotion_scores_model['anger'] = 72.5
+ emotion_scores_model['disgust'] = 72.5
+ emotion_scores_model['fear'] = 72.5
+ emotion_scores_model['joy'] = 72.5
+ emotion_scores_model['sadness'] = 72.5
+
+ # Construct a json representation of a DocumentEmotionResults model
+ document_emotion_results_model_json = {}
+ document_emotion_results_model_json['emotion'] = emotion_scores_model
+
+ # Construct a model instance of DocumentEmotionResults by calling from_dict on the json representation
+ document_emotion_results_model = DocumentEmotionResults.from_dict(document_emotion_results_model_json)
+ assert document_emotion_results_model != False
+
+ # Construct a model instance of DocumentEmotionResults by calling from_dict on the json representation
+ document_emotion_results_model_dict = DocumentEmotionResults.from_dict(document_emotion_results_model_json).__dict__
+ document_emotion_results_model2 = DocumentEmotionResults(**document_emotion_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_emotion_results_model == document_emotion_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_emotion_results_model_json2 = document_emotion_results_model.to_dict()
+ assert document_emotion_results_model_json2 == document_emotion_results_model_json
+
+
+class TestModel_DocumentSentimentResults:
+ """
+ Test Class for DocumentSentimentResults
+ """
+
+ def test_document_sentiment_results_serialization(self):
+ """
+ Test serialization/deserialization for DocumentSentimentResults
+ """
+
+ # Construct a json representation of a DocumentSentimentResults model
+ document_sentiment_results_model_json = {}
+ document_sentiment_results_model_json['label'] = 'testString'
+ document_sentiment_results_model_json['score'] = 72.5
+
+ # Construct a model instance of DocumentSentimentResults by calling from_dict on the json representation
+ document_sentiment_results_model = DocumentSentimentResults.from_dict(document_sentiment_results_model_json)
+ assert document_sentiment_results_model != False
+
+ # Construct a model instance of DocumentSentimentResults by calling from_dict on the json representation
+ document_sentiment_results_model_dict = DocumentSentimentResults.from_dict(document_sentiment_results_model_json).__dict__
+ document_sentiment_results_model2 = DocumentSentimentResults(**document_sentiment_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert document_sentiment_results_model == document_sentiment_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ document_sentiment_results_model_json2 = document_sentiment_results_model.to_dict()
+ assert document_sentiment_results_model_json2 == document_sentiment_results_model_json
+
+
+class TestModel_EmotionOptions:
+ """
+ Test Class for EmotionOptions
+ """
+
+ def test_emotion_options_serialization(self):
+ """
+ Test serialization/deserialization for EmotionOptions
+ """
+
+ # Construct a json representation of a EmotionOptions model
+ emotion_options_model_json = {}
+ emotion_options_model_json['document'] = True
+ emotion_options_model_json['targets'] = ['testString']
+
+ # Construct a model instance of EmotionOptions by calling from_dict on the json representation
+ emotion_options_model = EmotionOptions.from_dict(emotion_options_model_json)
+ assert emotion_options_model != False
+
+ # Construct a model instance of EmotionOptions by calling from_dict on the json representation
+ emotion_options_model_dict = EmotionOptions.from_dict(emotion_options_model_json).__dict__
+ emotion_options_model2 = EmotionOptions(**emotion_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert emotion_options_model == emotion_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ emotion_options_model_json2 = emotion_options_model.to_dict()
+ assert emotion_options_model_json2 == emotion_options_model_json
+
+
+class TestModel_EmotionResult:
+ """
+ Test Class for EmotionResult
+ """
+
+ def test_emotion_result_serialization(self):
+ """
+ Test serialization/deserialization for EmotionResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ emotion_scores_model = {} # EmotionScores
+ emotion_scores_model['anger'] = 0.041796
+ emotion_scores_model['disgust'] = 0.022637
+ emotion_scores_model['fear'] = 0.033387
+ emotion_scores_model['joy'] = 0.563273
+ emotion_scores_model['sadness'] = 0.32665
+
+ document_emotion_results_model = {} # DocumentEmotionResults
+ document_emotion_results_model['emotion'] = emotion_scores_model
+
+ targeted_emotion_results_model = {} # TargetedEmotionResults
+ targeted_emotion_results_model['text'] = 'apples'
+ targeted_emotion_results_model['emotion'] = emotion_scores_model
+
+ # Construct a json representation of a EmotionResult model
+ emotion_result_model_json = {}
+ emotion_result_model_json['document'] = document_emotion_results_model
+ emotion_result_model_json['targets'] = [targeted_emotion_results_model]
+
+ # Construct a model instance of EmotionResult by calling from_dict on the json representation
+ emotion_result_model = EmotionResult.from_dict(emotion_result_model_json)
+ assert emotion_result_model != False
+
+ # Construct a model instance of EmotionResult by calling from_dict on the json representation
+ emotion_result_model_dict = EmotionResult.from_dict(emotion_result_model_json).__dict__
+ emotion_result_model2 = EmotionResult(**emotion_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert emotion_result_model == emotion_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ emotion_result_model_json2 = emotion_result_model.to_dict()
+ assert emotion_result_model_json2 == emotion_result_model_json
+
+
+class TestModel_EmotionScores:
+ """
+ Test Class for EmotionScores
+ """
+
+ def test_emotion_scores_serialization(self):
+ """
+ Test serialization/deserialization for EmotionScores
+ """
+
+ # Construct a json representation of a EmotionScores model
+ emotion_scores_model_json = {}
+ emotion_scores_model_json['anger'] = 72.5
+ emotion_scores_model_json['disgust'] = 72.5
+ emotion_scores_model_json['fear'] = 72.5
+ emotion_scores_model_json['joy'] = 72.5
+ emotion_scores_model_json['sadness'] = 72.5
+
+ # Construct a model instance of EmotionScores by calling from_dict on the json representation
+ emotion_scores_model = EmotionScores.from_dict(emotion_scores_model_json)
+ assert emotion_scores_model != False
+
+ # Construct a model instance of EmotionScores by calling from_dict on the json representation
+ emotion_scores_model_dict = EmotionScores.from_dict(emotion_scores_model_json).__dict__
+ emotion_scores_model2 = EmotionScores(**emotion_scores_model_dict)
+
+ # Verify the model instances are equivalent
+ assert emotion_scores_model == emotion_scores_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ emotion_scores_model_json2 = emotion_scores_model.to_dict()
+ assert emotion_scores_model_json2 == emotion_scores_model_json
+
+
+class TestModel_EntitiesOptions:
+ """
+ Test Class for EntitiesOptions
+ """
+
+ def test_entities_options_serialization(self):
+ """
+ Test serialization/deserialization for EntitiesOptions
+ """
+
+ # Construct a json representation of a EntitiesOptions model
+ entities_options_model_json = {}
+ entities_options_model_json['limit'] = 50
+ entities_options_model_json['mentions'] = False
+ entities_options_model_json['model'] = 'testString'
+ entities_options_model_json['sentiment'] = False
+ entities_options_model_json['emotion'] = False
+
+ # Construct a model instance of EntitiesOptions by calling from_dict on the json representation
+ entities_options_model = EntitiesOptions.from_dict(entities_options_model_json)
+ assert entities_options_model != False
+
+ # Construct a model instance of EntitiesOptions by calling from_dict on the json representation
+ entities_options_model_dict = EntitiesOptions.from_dict(entities_options_model_json).__dict__
+ entities_options_model2 = EntitiesOptions(**entities_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert entities_options_model == entities_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ entities_options_model_json2 = entities_options_model.to_dict()
+ assert entities_options_model_json2 == entities_options_model_json
+
+
+class TestModel_EntitiesResult:
+ """
+ Test Class for EntitiesResult
+ """
+
+ def test_entities_result_serialization(self):
+ """
+ Test serialization/deserialization for EntitiesResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ entity_mention_model = {} # EntityMention
+ entity_mention_model['text'] = 'testString'
+ entity_mention_model['location'] = [38]
+ entity_mention_model['confidence'] = 72.5
+
+ emotion_scores_model = {} # EmotionScores
+ emotion_scores_model['anger'] = 72.5
+ emotion_scores_model['disgust'] = 72.5
+ emotion_scores_model['fear'] = 72.5
+ emotion_scores_model['joy'] = 72.5
+ emotion_scores_model['sadness'] = 72.5
+
+ feature_sentiment_results_model = {} # FeatureSentimentResults
+ feature_sentiment_results_model['score'] = 72.5
+
+ disambiguation_result_model = {} # DisambiguationResult
+ disambiguation_result_model['name'] = 'testString'
+ disambiguation_result_model['dbpedia_resource'] = 'testString'
+ disambiguation_result_model['subtype'] = ['testString']
+
+ # Construct a json representation of a EntitiesResult model
+ entities_result_model_json = {}
+ entities_result_model_json['type'] = 'testString'
+ entities_result_model_json['text'] = 'testString'
+ entities_result_model_json['relevance'] = 72.5
+ entities_result_model_json['confidence'] = 72.5
+ entities_result_model_json['mentions'] = [entity_mention_model]
+ entities_result_model_json['count'] = 38
+ entities_result_model_json['emotion'] = emotion_scores_model
+ entities_result_model_json['sentiment'] = feature_sentiment_results_model
+ entities_result_model_json['disambiguation'] = disambiguation_result_model
+
+ # Construct a model instance of EntitiesResult by calling from_dict on the json representation
+ entities_result_model = EntitiesResult.from_dict(entities_result_model_json)
+ assert entities_result_model != False
+
+ # Construct a model instance of EntitiesResult by calling from_dict on the json representation
+ entities_result_model_dict = EntitiesResult.from_dict(entities_result_model_json).__dict__
+ entities_result_model2 = EntitiesResult(**entities_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert entities_result_model == entities_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ entities_result_model_json2 = entities_result_model.to_dict()
+ assert entities_result_model_json2 == entities_result_model_json
+
+
+class TestModel_EntityMention:
+ """
+ Test Class for EntityMention
+ """
+
+ def test_entity_mention_serialization(self):
+ """
+ Test serialization/deserialization for EntityMention
+ """
+
+ # Construct a json representation of a EntityMention model
+ entity_mention_model_json = {}
+ entity_mention_model_json['text'] = 'testString'
+ entity_mention_model_json['location'] = [38]
+ entity_mention_model_json['confidence'] = 72.5
+
+ # Construct a model instance of EntityMention by calling from_dict on the json representation
+ entity_mention_model = EntityMention.from_dict(entity_mention_model_json)
+ assert entity_mention_model != False
+
+ # Construct a model instance of EntityMention by calling from_dict on the json representation
+ entity_mention_model_dict = EntityMention.from_dict(entity_mention_model_json).__dict__
+ entity_mention_model2 = EntityMention(**entity_mention_model_dict)
+
+ # Verify the model instances are equivalent
+ assert entity_mention_model == entity_mention_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ entity_mention_model_json2 = entity_mention_model.to_dict()
+ assert entity_mention_model_json2 == entity_mention_model_json
+
+
+class TestModel_FeatureSentimentResults:
+ """
+ Test Class for FeatureSentimentResults
+ """
+
+ def test_feature_sentiment_results_serialization(self):
+ """
+ Test serialization/deserialization for FeatureSentimentResults
+ """
+
+ # Construct a json representation of a FeatureSentimentResults model
+ feature_sentiment_results_model_json = {}
+ feature_sentiment_results_model_json['score'] = 72.5
+
+ # Construct a model instance of FeatureSentimentResults by calling from_dict on the json representation
+ feature_sentiment_results_model = FeatureSentimentResults.from_dict(feature_sentiment_results_model_json)
+ assert feature_sentiment_results_model != False
+
+ # Construct a model instance of FeatureSentimentResults by calling from_dict on the json representation
+ feature_sentiment_results_model_dict = FeatureSentimentResults.from_dict(feature_sentiment_results_model_json).__dict__
+ feature_sentiment_results_model2 = FeatureSentimentResults(**feature_sentiment_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert feature_sentiment_results_model == feature_sentiment_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ feature_sentiment_results_model_json2 = feature_sentiment_results_model.to_dict()
+ assert feature_sentiment_results_model_json2 == feature_sentiment_results_model_json
+
+
+class TestModel_Features:
+ """
+ Test Class for Features
+ """
+
+ def test_features_serialization(self):
+ """
+ Test serialization/deserialization for Features
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ classifications_options_model = {} # ClassificationsOptions
+ classifications_options_model['model'] = 'testString'
+
+ concepts_options_model = {} # ConceptsOptions
+ concepts_options_model['limit'] = 8
+
+ emotion_options_model = {} # EmotionOptions
+ emotion_options_model['document'] = True
+ emotion_options_model['targets'] = ['testString']
+
+ entities_options_model = {} # EntitiesOptions
+ entities_options_model['limit'] = 50
+ entities_options_model['mentions'] = False
+ entities_options_model['model'] = 'testString'
+ entities_options_model['sentiment'] = False
+ entities_options_model['emotion'] = False
+
+ keywords_options_model = {} # KeywordsOptions
+ keywords_options_model['limit'] = 50
+ keywords_options_model['sentiment'] = False
+ keywords_options_model['emotion'] = False
+
+ relations_options_model = {} # RelationsOptions
+ relations_options_model['model'] = 'testString'
+
+ semantic_roles_options_model = {} # SemanticRolesOptions
+ semantic_roles_options_model['limit'] = 50
+ semantic_roles_options_model['keywords'] = False
+ semantic_roles_options_model['entities'] = False
+
+ sentiment_options_model = {} # SentimentOptions
+ sentiment_options_model['document'] = True
+ sentiment_options_model['targets'] = ['testString']
+
+ categories_options_model = {} # CategoriesOptions
+ categories_options_model['explanation'] = False
+ categories_options_model['limit'] = 3
+ categories_options_model['model'] = 'testString'
+
+ syntax_options_tokens_model = {} # SyntaxOptionsTokens
+ syntax_options_tokens_model['lemma'] = True
+ syntax_options_tokens_model['part_of_speech'] = True
+
+ syntax_options_model = {} # SyntaxOptions
+ syntax_options_model['tokens'] = syntax_options_tokens_model
+ syntax_options_model['sentences'] = True
+
+ # Construct a json representation of a Features model
+ features_model_json = {}
+ features_model_json['classifications'] = classifications_options_model
+ features_model_json['concepts'] = concepts_options_model
+ features_model_json['emotion'] = emotion_options_model
+ features_model_json['entities'] = entities_options_model
+ features_model_json['keywords'] = keywords_options_model
+ features_model_json['metadata'] = {'anyKey': 'anyValue'}
+ features_model_json['relations'] = relations_options_model
+ features_model_json['semantic_roles'] = semantic_roles_options_model
+ features_model_json['sentiment'] = sentiment_options_model
+ features_model_json['categories'] = categories_options_model
+ features_model_json['syntax'] = syntax_options_model
+
+ # Construct a model instance of Features by calling from_dict on the json representation
+ features_model = Features.from_dict(features_model_json)
+ assert features_model != False
+
+ # Construct a model instance of Features by calling from_dict on the json representation
+ features_model_dict = Features.from_dict(features_model_json).__dict__
+ features_model2 = Features(**features_model_dict)
+
+ # Verify the model instances are equivalent
+ assert features_model == features_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ features_model_json2 = features_model.to_dict()
+ assert features_model_json2 == features_model_json
+
+
+class TestModel_FeaturesResultsMetadata:
+ """
+ Test Class for FeaturesResultsMetadata
+ """
+
+ def test_features_results_metadata_serialization(self):
+ """
+ Test serialization/deserialization for FeaturesResultsMetadata
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ author_model = {} # Author
+ author_model['name'] = 'testString'
+
+ feed_model = {} # Feed
+ feed_model['link'] = 'testString'
+
+ # Construct a json representation of a FeaturesResultsMetadata model
+ features_results_metadata_model_json = {}
+ features_results_metadata_model_json['authors'] = [author_model]
+ features_results_metadata_model_json['publication_date'] = 'testString'
+ features_results_metadata_model_json['title'] = 'testString'
+ features_results_metadata_model_json['image'] = 'testString'
+ features_results_metadata_model_json['feeds'] = [feed_model]
+
+ # Construct a model instance of FeaturesResultsMetadata by calling from_dict on the json representation
+ features_results_metadata_model = FeaturesResultsMetadata.from_dict(features_results_metadata_model_json)
+ assert features_results_metadata_model != False
+
+ # Construct a model instance of FeaturesResultsMetadata by calling from_dict on the json representation
+ features_results_metadata_model_dict = FeaturesResultsMetadata.from_dict(features_results_metadata_model_json).__dict__
+ features_results_metadata_model2 = FeaturesResultsMetadata(**features_results_metadata_model_dict)
+
+ # Verify the model instances are equivalent
+ assert features_results_metadata_model == features_results_metadata_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ features_results_metadata_model_json2 = features_results_metadata_model.to_dict()
+ assert features_results_metadata_model_json2 == features_results_metadata_model_json
+
+
+class TestModel_Feed:
+ """
+ Test Class for Feed
+ """
+
+ def test_feed_serialization(self):
+ """
+ Test serialization/deserialization for Feed
+ """
+
+ # Construct a json representation of a Feed model
+ feed_model_json = {}
+ feed_model_json['link'] = 'testString'
+
+ # Construct a model instance of Feed by calling from_dict on the json representation
+ feed_model = Feed.from_dict(feed_model_json)
+ assert feed_model != False
+
+ # Construct a model instance of Feed by calling from_dict on the json representation
+ feed_model_dict = Feed.from_dict(feed_model_json).__dict__
+ feed_model2 = Feed(**feed_model_dict)
+
+ # Verify the model instances are equivalent
+ assert feed_model == feed_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ feed_model_json2 = feed_model.to_dict()
+ assert feed_model_json2 == feed_model_json
+
+
+class TestModel_KeywordsOptions:
+ """
+ Test Class for KeywordsOptions
+ """
+
+ def test_keywords_options_serialization(self):
+ """
+ Test serialization/deserialization for KeywordsOptions
+ """
+
+ # Construct a json representation of a KeywordsOptions model
+ keywords_options_model_json = {}
+ keywords_options_model_json['limit'] = 50
+ keywords_options_model_json['sentiment'] = False
+ keywords_options_model_json['emotion'] = False
+
+ # Construct a model instance of KeywordsOptions by calling from_dict on the json representation
+ keywords_options_model = KeywordsOptions.from_dict(keywords_options_model_json)
+ assert keywords_options_model != False
+
+ # Construct a model instance of KeywordsOptions by calling from_dict on the json representation
+ keywords_options_model_dict = KeywordsOptions.from_dict(keywords_options_model_json).__dict__
+ keywords_options_model2 = KeywordsOptions(**keywords_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert keywords_options_model == keywords_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ keywords_options_model_json2 = keywords_options_model.to_dict()
+ assert keywords_options_model_json2 == keywords_options_model_json
+
+
+class TestModel_KeywordsResult:
+ """
+ Test Class for KeywordsResult
+ """
+
+ def test_keywords_result_serialization(self):
+ """
+ Test serialization/deserialization for KeywordsResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ emotion_scores_model = {} # EmotionScores
+ emotion_scores_model['anger'] = 72.5
+ emotion_scores_model['disgust'] = 72.5
+ emotion_scores_model['fear'] = 72.5
+ emotion_scores_model['joy'] = 72.5
+ emotion_scores_model['sadness'] = 72.5
+
+ feature_sentiment_results_model = {} # FeatureSentimentResults
+ feature_sentiment_results_model['score'] = 72.5
+
+ # Construct a json representation of a KeywordsResult model
+ keywords_result_model_json = {}
+ keywords_result_model_json['count'] = 38
+ keywords_result_model_json['relevance'] = 72.5
+ keywords_result_model_json['text'] = 'testString'
+ keywords_result_model_json['emotion'] = emotion_scores_model
+ keywords_result_model_json['sentiment'] = feature_sentiment_results_model
+
+ # Construct a model instance of KeywordsResult by calling from_dict on the json representation
+ keywords_result_model = KeywordsResult.from_dict(keywords_result_model_json)
+ assert keywords_result_model != False
+
+ # Construct a model instance of KeywordsResult by calling from_dict on the json representation
+ keywords_result_model_dict = KeywordsResult.from_dict(keywords_result_model_json).__dict__
+ keywords_result_model2 = KeywordsResult(**keywords_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert keywords_result_model == keywords_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ keywords_result_model_json2 = keywords_result_model.to_dict()
+ assert keywords_result_model_json2 == keywords_result_model_json
+
+
+class TestModel_ListModelsResults:
+ """
+ Test Class for ListModelsResults
+ """
+
+ def test_list_models_results_serialization(self):
+ """
+ Test serialization/deserialization for ListModelsResults
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ model_model = {} # Model
+ model_model['status'] = 'starting'
+ model_model['model_id'] = 'testString'
+ model_model['language'] = 'testString'
+ model_model['description'] = 'testString'
+ model_model['workspace_id'] = 'testString'
+ model_model['model_version'] = 'testString'
+ model_model['version'] = 'testString'
+ model_model['version_description'] = 'testString'
+ model_model['created'] = '2019-01-01T12:00:00Z'
+
+ # Construct a json representation of a ListModelsResults model
+ list_models_results_model_json = {}
+ list_models_results_model_json['models'] = [model_model]
+
+ # Construct a model instance of ListModelsResults by calling from_dict on the json representation
+ list_models_results_model = ListModelsResults.from_dict(list_models_results_model_json)
+ assert list_models_results_model != False
+
+ # Construct a model instance of ListModelsResults by calling from_dict on the json representation
+ list_models_results_model_dict = ListModelsResults.from_dict(list_models_results_model_json).__dict__
+ list_models_results_model2 = ListModelsResults(**list_models_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert list_models_results_model == list_models_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ list_models_results_model_json2 = list_models_results_model.to_dict()
+ assert list_models_results_model_json2 == list_models_results_model_json
+
+
+class TestModel_Model:
+ """
+ Test Class for Model
+ """
+
+ def test_model_serialization(self):
+ """
+ Test serialization/deserialization for Model
+ """
+
+ # Construct a json representation of a Model model
+ model_model_json = {}
+ model_model_json['status'] = 'starting'
+ model_model_json['model_id'] = 'testString'
+ model_model_json['language'] = 'testString'
+ model_model_json['description'] = 'testString'
+ model_model_json['workspace_id'] = 'testString'
+ model_model_json['model_version'] = 'testString'
+ model_model_json['version'] = 'testString'
+ model_model_json['version_description'] = 'testString'
+ model_model_json['created'] = '2019-01-01T12:00:00Z'
+
+ # Construct a model instance of Model by calling from_dict on the json representation
+ model_model = Model.from_dict(model_model_json)
+ assert model_model != False
+
+ # Construct a model instance of Model by calling from_dict on the json representation
+ model_model_dict = Model.from_dict(model_model_json).__dict__
+ model_model2 = Model(**model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert model_model == model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ model_model_json2 = model_model.to_dict()
+ assert model_model_json2 == model_model_json
+
+
+class TestModel_Notice:
+ """
+ Test Class for Notice
+ """
+
+ def test_notice_serialization(self):
+ """
+ Test serialization/deserialization for Notice
+ """
+
+ # Construct a json representation of a Notice model
+ notice_model_json = {}
+
+ # Construct a model instance of Notice by calling from_dict on the json representation
+ notice_model = Notice.from_dict(notice_model_json)
+ assert notice_model != False
+
+ # Construct a model instance of Notice by calling from_dict on the json representation
+ notice_model_dict = Notice.from_dict(notice_model_json).__dict__
+ notice_model2 = Notice(**notice_model_dict)
+
+ # Verify the model instances are equivalent
+ assert notice_model == notice_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ notice_model_json2 = notice_model.to_dict()
+ assert notice_model_json2 == notice_model_json
+
+
+class TestModel_RelationArgument:
+ """
+ Test Class for RelationArgument
+ """
+
+ def test_relation_argument_serialization(self):
+ """
+ Test serialization/deserialization for RelationArgument
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ relation_entity_model = {} # RelationEntity
+ relation_entity_model['text'] = 'testString'
+ relation_entity_model['type'] = 'testString'
+
+ # Construct a json representation of a RelationArgument model
+ relation_argument_model_json = {}
+ relation_argument_model_json['entities'] = [relation_entity_model]
+ relation_argument_model_json['location'] = [38]
+ relation_argument_model_json['text'] = 'testString'
+
+ # Construct a model instance of RelationArgument by calling from_dict on the json representation
+ relation_argument_model = RelationArgument.from_dict(relation_argument_model_json)
+ assert relation_argument_model != False
+
+ # Construct a model instance of RelationArgument by calling from_dict on the json representation
+ relation_argument_model_dict = RelationArgument.from_dict(relation_argument_model_json).__dict__
+ relation_argument_model2 = RelationArgument(**relation_argument_model_dict)
+
+ # Verify the model instances are equivalent
+ assert relation_argument_model == relation_argument_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ relation_argument_model_json2 = relation_argument_model.to_dict()
+ assert relation_argument_model_json2 == relation_argument_model_json
+
+
+class TestModel_RelationEntity:
+ """
+ Test Class for RelationEntity
+ """
+
+ def test_relation_entity_serialization(self):
+ """
+ Test serialization/deserialization for RelationEntity
+ """
+
+ # Construct a json representation of a RelationEntity model
+ relation_entity_model_json = {}
+ relation_entity_model_json['text'] = 'testString'
+ relation_entity_model_json['type'] = 'testString'
+
+ # Construct a model instance of RelationEntity by calling from_dict on the json representation
+ relation_entity_model = RelationEntity.from_dict(relation_entity_model_json)
+ assert relation_entity_model != False
+
+ # Construct a model instance of RelationEntity by calling from_dict on the json representation
+ relation_entity_model_dict = RelationEntity.from_dict(relation_entity_model_json).__dict__
+ relation_entity_model2 = RelationEntity(**relation_entity_model_dict)
+
+ # Verify the model instances are equivalent
+ assert relation_entity_model == relation_entity_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ relation_entity_model_json2 = relation_entity_model.to_dict()
+ assert relation_entity_model_json2 == relation_entity_model_json
+
+
+class TestModel_RelationsOptions:
+ """
+ Test Class for RelationsOptions
+ """
+
+ def test_relations_options_serialization(self):
+ """
+ Test serialization/deserialization for RelationsOptions
+ """
+
+ # Construct a json representation of a RelationsOptions model
+ relations_options_model_json = {}
+ relations_options_model_json['model'] = 'testString'
+
+ # Construct a model instance of RelationsOptions by calling from_dict on the json representation
+ relations_options_model = RelationsOptions.from_dict(relations_options_model_json)
+ assert relations_options_model != False
+
+ # Construct a model instance of RelationsOptions by calling from_dict on the json representation
+ relations_options_model_dict = RelationsOptions.from_dict(relations_options_model_json).__dict__
+ relations_options_model2 = RelationsOptions(**relations_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert relations_options_model == relations_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ relations_options_model_json2 = relations_options_model.to_dict()
+ assert relations_options_model_json2 == relations_options_model_json
+
+
+class TestModel_RelationsResult:
+ """
+ Test Class for RelationsResult
+ """
+
+ def test_relations_result_serialization(self):
+ """
+ Test serialization/deserialization for RelationsResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ relation_entity_model = {} # RelationEntity
+ relation_entity_model['text'] = 'testString'
+ relation_entity_model['type'] = 'testString'
+
+ relation_argument_model = {} # RelationArgument
+ relation_argument_model['entities'] = [relation_entity_model]
+ relation_argument_model['location'] = [38]
+ relation_argument_model['text'] = 'testString'
+
+ # Construct a json representation of a RelationsResult model
+ relations_result_model_json = {}
+ relations_result_model_json['score'] = 72.5
+ relations_result_model_json['sentence'] = 'testString'
+ relations_result_model_json['type'] = 'testString'
+ relations_result_model_json['arguments'] = [relation_argument_model]
+
+ # Construct a model instance of RelationsResult by calling from_dict on the json representation
+ relations_result_model = RelationsResult.from_dict(relations_result_model_json)
+ assert relations_result_model != False
+
+ # Construct a model instance of RelationsResult by calling from_dict on the json representation
+ relations_result_model_dict = RelationsResult.from_dict(relations_result_model_json).__dict__
+ relations_result_model2 = RelationsResult(**relations_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert relations_result_model == relations_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ relations_result_model_json2 = relations_result_model.to_dict()
+ assert relations_result_model_json2 == relations_result_model_json
+
+
+class TestModel_SemanticRolesEntity:
+ """
+ Test Class for SemanticRolesEntity
+ """
+
+ def test_semantic_roles_entity_serialization(self):
+ """
+ Test serialization/deserialization for SemanticRolesEntity
+ """
+
+ # Construct a json representation of a SemanticRolesEntity model
+ semantic_roles_entity_model_json = {}
+ semantic_roles_entity_model_json['type'] = 'testString'
+ semantic_roles_entity_model_json['text'] = 'testString'
+
+ # Construct a model instance of SemanticRolesEntity by calling from_dict on the json representation
+ semantic_roles_entity_model = SemanticRolesEntity.from_dict(semantic_roles_entity_model_json)
+ assert semantic_roles_entity_model != False
+
+ # Construct a model instance of SemanticRolesEntity by calling from_dict on the json representation
+ semantic_roles_entity_model_dict = SemanticRolesEntity.from_dict(semantic_roles_entity_model_json).__dict__
+ semantic_roles_entity_model2 = SemanticRolesEntity(**semantic_roles_entity_model_dict)
+
+ # Verify the model instances are equivalent
+ assert semantic_roles_entity_model == semantic_roles_entity_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ semantic_roles_entity_model_json2 = semantic_roles_entity_model.to_dict()
+ assert semantic_roles_entity_model_json2 == semantic_roles_entity_model_json
+
+
+class TestModel_SemanticRolesKeyword:
+ """
+ Test Class for SemanticRolesKeyword
+ """
+
+ def test_semantic_roles_keyword_serialization(self):
+ """
+ Test serialization/deserialization for SemanticRolesKeyword
+ """
+
+ # Construct a json representation of a SemanticRolesKeyword model
+ semantic_roles_keyword_model_json = {}
+ semantic_roles_keyword_model_json['text'] = 'testString'
+
+ # Construct a model instance of SemanticRolesKeyword by calling from_dict on the json representation
+ semantic_roles_keyword_model = SemanticRolesKeyword.from_dict(semantic_roles_keyword_model_json)
+ assert semantic_roles_keyword_model != False
+
+ # Construct a model instance of SemanticRolesKeyword by calling from_dict on the json representation
+ semantic_roles_keyword_model_dict = SemanticRolesKeyword.from_dict(semantic_roles_keyword_model_json).__dict__
+ semantic_roles_keyword_model2 = SemanticRolesKeyword(**semantic_roles_keyword_model_dict)
+
+ # Verify the model instances are equivalent
+ assert semantic_roles_keyword_model == semantic_roles_keyword_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ semantic_roles_keyword_model_json2 = semantic_roles_keyword_model.to_dict()
+ assert semantic_roles_keyword_model_json2 == semantic_roles_keyword_model_json
+
+
+class TestModel_SemanticRolesOptions:
+ """
+ Test Class for SemanticRolesOptions
+ """
+
+ def test_semantic_roles_options_serialization(self):
+ """
+ Test serialization/deserialization for SemanticRolesOptions
+ """
+
+ # Construct a json representation of a SemanticRolesOptions model
+ semantic_roles_options_model_json = {}
+ semantic_roles_options_model_json['limit'] = 50
+ semantic_roles_options_model_json['keywords'] = False
+ semantic_roles_options_model_json['entities'] = False
+
+ # Construct a model instance of SemanticRolesOptions by calling from_dict on the json representation
+ semantic_roles_options_model = SemanticRolesOptions.from_dict(semantic_roles_options_model_json)
+ assert semantic_roles_options_model != False
+
+ # Construct a model instance of SemanticRolesOptions by calling from_dict on the json representation
+ semantic_roles_options_model_dict = SemanticRolesOptions.from_dict(semantic_roles_options_model_json).__dict__
+ semantic_roles_options_model2 = SemanticRolesOptions(**semantic_roles_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert semantic_roles_options_model == semantic_roles_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ semantic_roles_options_model_json2 = semantic_roles_options_model.to_dict()
+ assert semantic_roles_options_model_json2 == semantic_roles_options_model_json
+
+
+class TestModel_SemanticRolesResult:
+ """
+ Test Class for SemanticRolesResult
+ """
+
+ def test_semantic_roles_result_serialization(self):
+ """
+ Test serialization/deserialization for SemanticRolesResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ semantic_roles_entity_model = {} # SemanticRolesEntity
+ semantic_roles_entity_model['type'] = 'testString'
+ semantic_roles_entity_model['text'] = 'testString'
+
+ semantic_roles_keyword_model = {} # SemanticRolesKeyword
+ semantic_roles_keyword_model['text'] = 'testString'
+
+ semantic_roles_result_subject_model = {} # SemanticRolesResultSubject
+ semantic_roles_result_subject_model['text'] = 'testString'
+ semantic_roles_result_subject_model['entities'] = [semantic_roles_entity_model]
+ semantic_roles_result_subject_model['keywords'] = [semantic_roles_keyword_model]
+
+ semantic_roles_verb_model = {} # SemanticRolesVerb
+ semantic_roles_verb_model['text'] = 'testString'
+ semantic_roles_verb_model['tense'] = 'testString'
+
+ semantic_roles_result_action_model = {} # SemanticRolesResultAction
+ semantic_roles_result_action_model['text'] = 'testString'
+ semantic_roles_result_action_model['normalized'] = 'testString'
+ semantic_roles_result_action_model['verb'] = semantic_roles_verb_model
+
+ semantic_roles_result_object_model = {} # SemanticRolesResultObject
+ semantic_roles_result_object_model['text'] = 'testString'
+ semantic_roles_result_object_model['keywords'] = [semantic_roles_keyword_model]
+
+ # Construct a json representation of a SemanticRolesResult model
+ semantic_roles_result_model_json = {}
+ semantic_roles_result_model_json['sentence'] = 'testString'
+ semantic_roles_result_model_json['subject'] = semantic_roles_result_subject_model
+ semantic_roles_result_model_json['action'] = semantic_roles_result_action_model
+ semantic_roles_result_model_json['object'] = semantic_roles_result_object_model
+
+ # Construct a model instance of SemanticRolesResult by calling from_dict on the json representation
+ semantic_roles_result_model = SemanticRolesResult.from_dict(semantic_roles_result_model_json)
+ assert semantic_roles_result_model != False
+
+ # Construct a model instance of SemanticRolesResult by calling from_dict on the json representation
+ semantic_roles_result_model_dict = SemanticRolesResult.from_dict(semantic_roles_result_model_json).__dict__
+ semantic_roles_result_model2 = SemanticRolesResult(**semantic_roles_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert semantic_roles_result_model == semantic_roles_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ semantic_roles_result_model_json2 = semantic_roles_result_model.to_dict()
+ assert semantic_roles_result_model_json2 == semantic_roles_result_model_json
+
+
+class TestModel_SemanticRolesResultAction:
+ """
+ Test Class for SemanticRolesResultAction
+ """
+
+ def test_semantic_roles_result_action_serialization(self):
+ """
+ Test serialization/deserialization for SemanticRolesResultAction
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ semantic_roles_verb_model = {} # SemanticRolesVerb
+ semantic_roles_verb_model['text'] = 'testString'
+ semantic_roles_verb_model['tense'] = 'testString'
+
+ # Construct a json representation of a SemanticRolesResultAction model
+ semantic_roles_result_action_model_json = {}
+ semantic_roles_result_action_model_json['text'] = 'testString'
+ semantic_roles_result_action_model_json['normalized'] = 'testString'
+ semantic_roles_result_action_model_json['verb'] = semantic_roles_verb_model
+
+ # Construct a model instance of SemanticRolesResultAction by calling from_dict on the json representation
+ semantic_roles_result_action_model = SemanticRolesResultAction.from_dict(semantic_roles_result_action_model_json)
+ assert semantic_roles_result_action_model != False
+
+ # Construct a model instance of SemanticRolesResultAction by calling from_dict on the json representation
+ semantic_roles_result_action_model_dict = SemanticRolesResultAction.from_dict(semantic_roles_result_action_model_json).__dict__
+ semantic_roles_result_action_model2 = SemanticRolesResultAction(**semantic_roles_result_action_model_dict)
+
+ # Verify the model instances are equivalent
+ assert semantic_roles_result_action_model == semantic_roles_result_action_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ semantic_roles_result_action_model_json2 = semantic_roles_result_action_model.to_dict()
+ assert semantic_roles_result_action_model_json2 == semantic_roles_result_action_model_json
+
+
+class TestModel_SemanticRolesResultObject:
+ """
+ Test Class for SemanticRolesResultObject
+ """
+
+ def test_semantic_roles_result_object_serialization(self):
+ """
+ Test serialization/deserialization for SemanticRolesResultObject
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ semantic_roles_keyword_model = {} # SemanticRolesKeyword
+ semantic_roles_keyword_model['text'] = 'testString'
+
+ # Construct a json representation of a SemanticRolesResultObject model
+ semantic_roles_result_object_model_json = {}
+ semantic_roles_result_object_model_json['text'] = 'testString'
+ semantic_roles_result_object_model_json['keywords'] = [semantic_roles_keyword_model]
+
+ # Construct a model instance of SemanticRolesResultObject by calling from_dict on the json representation
+ semantic_roles_result_object_model = SemanticRolesResultObject.from_dict(semantic_roles_result_object_model_json)
+ assert semantic_roles_result_object_model != False
+
+ # Construct a model instance of SemanticRolesResultObject by calling from_dict on the json representation
+ semantic_roles_result_object_model_dict = SemanticRolesResultObject.from_dict(semantic_roles_result_object_model_json).__dict__
+ semantic_roles_result_object_model2 = SemanticRolesResultObject(**semantic_roles_result_object_model_dict)
+
+ # Verify the model instances are equivalent
+ assert semantic_roles_result_object_model == semantic_roles_result_object_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ semantic_roles_result_object_model_json2 = semantic_roles_result_object_model.to_dict()
+ assert semantic_roles_result_object_model_json2 == semantic_roles_result_object_model_json
+
+
+class TestModel_SemanticRolesResultSubject:
+ """
+ Test Class for SemanticRolesResultSubject
+ """
+
+ def test_semantic_roles_result_subject_serialization(self):
+ """
+ Test serialization/deserialization for SemanticRolesResultSubject
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ semantic_roles_entity_model = {} # SemanticRolesEntity
+ semantic_roles_entity_model['type'] = 'testString'
+ semantic_roles_entity_model['text'] = 'testString'
+
+ semantic_roles_keyword_model = {} # SemanticRolesKeyword
+ semantic_roles_keyword_model['text'] = 'testString'
+
+ # Construct a json representation of a SemanticRolesResultSubject model
+ semantic_roles_result_subject_model_json = {}
+ semantic_roles_result_subject_model_json['text'] = 'testString'
+ semantic_roles_result_subject_model_json['entities'] = [semantic_roles_entity_model]
+ semantic_roles_result_subject_model_json['keywords'] = [semantic_roles_keyword_model]
+
+ # Construct a model instance of SemanticRolesResultSubject by calling from_dict on the json representation
+ semantic_roles_result_subject_model = SemanticRolesResultSubject.from_dict(semantic_roles_result_subject_model_json)
+ assert semantic_roles_result_subject_model != False
+
+ # Construct a model instance of SemanticRolesResultSubject by calling from_dict on the json representation
+ semantic_roles_result_subject_model_dict = SemanticRolesResultSubject.from_dict(semantic_roles_result_subject_model_json).__dict__
+ semantic_roles_result_subject_model2 = SemanticRolesResultSubject(**semantic_roles_result_subject_model_dict)
+
+ # Verify the model instances are equivalent
+ assert semantic_roles_result_subject_model == semantic_roles_result_subject_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ semantic_roles_result_subject_model_json2 = semantic_roles_result_subject_model.to_dict()
+ assert semantic_roles_result_subject_model_json2 == semantic_roles_result_subject_model_json
+
+
+class TestModel_SemanticRolesVerb:
+ """
+ Test Class for SemanticRolesVerb
+ """
+
+ def test_semantic_roles_verb_serialization(self):
+ """
+ Test serialization/deserialization for SemanticRolesVerb
+ """
+
+ # Construct a json representation of a SemanticRolesVerb model
+ semantic_roles_verb_model_json = {}
+ semantic_roles_verb_model_json['text'] = 'testString'
+ semantic_roles_verb_model_json['tense'] = 'testString'
+
+ # Construct a model instance of SemanticRolesVerb by calling from_dict on the json representation
+ semantic_roles_verb_model = SemanticRolesVerb.from_dict(semantic_roles_verb_model_json)
+ assert semantic_roles_verb_model != False
+
+ # Construct a model instance of SemanticRolesVerb by calling from_dict on the json representation
+ semantic_roles_verb_model_dict = SemanticRolesVerb.from_dict(semantic_roles_verb_model_json).__dict__
+ semantic_roles_verb_model2 = SemanticRolesVerb(**semantic_roles_verb_model_dict)
+
+ # Verify the model instances are equivalent
+ assert semantic_roles_verb_model == semantic_roles_verb_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ semantic_roles_verb_model_json2 = semantic_roles_verb_model.to_dict()
+ assert semantic_roles_verb_model_json2 == semantic_roles_verb_model_json
+
+
+class TestModel_SentenceResult:
+ """
+ Test Class for SentenceResult
+ """
+
+ def test_sentence_result_serialization(self):
+ """
+ Test serialization/deserialization for SentenceResult
+ """
+
+ # Construct a json representation of a SentenceResult model
+ sentence_result_model_json = {}
+ sentence_result_model_json['text'] = 'testString'
+ sentence_result_model_json['location'] = [38]
+
+ # Construct a model instance of SentenceResult by calling from_dict on the json representation
+ sentence_result_model = SentenceResult.from_dict(sentence_result_model_json)
+ assert sentence_result_model != False
+
+ # Construct a model instance of SentenceResult by calling from_dict on the json representation
+ sentence_result_model_dict = SentenceResult.from_dict(sentence_result_model_json).__dict__
+ sentence_result_model2 = SentenceResult(**sentence_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert sentence_result_model == sentence_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ sentence_result_model_json2 = sentence_result_model.to_dict()
+ assert sentence_result_model_json2 == sentence_result_model_json
+
+
+class TestModel_SentimentOptions:
+ """
+ Test Class for SentimentOptions
+ """
+
+ def test_sentiment_options_serialization(self):
+ """
+ Test serialization/deserialization for SentimentOptions
+ """
+
+ # Construct a json representation of a SentimentOptions model
+ sentiment_options_model_json = {}
+ sentiment_options_model_json['document'] = True
+ sentiment_options_model_json['targets'] = ['testString']
+
+ # Construct a model instance of SentimentOptions by calling from_dict on the json representation
+ sentiment_options_model = SentimentOptions.from_dict(sentiment_options_model_json)
+ assert sentiment_options_model != False
+
+ # Construct a model instance of SentimentOptions by calling from_dict on the json representation
+ sentiment_options_model_dict = SentimentOptions.from_dict(sentiment_options_model_json).__dict__
+ sentiment_options_model2 = SentimentOptions(**sentiment_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert sentiment_options_model == sentiment_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ sentiment_options_model_json2 = sentiment_options_model.to_dict()
+ assert sentiment_options_model_json2 == sentiment_options_model_json
+
+
+class TestModel_SentimentResult:
+ """
+ Test Class for SentimentResult
+ """
+
+ def test_sentiment_result_serialization(self):
+ """
+ Test serialization/deserialization for SentimentResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ document_sentiment_results_model = {} # DocumentSentimentResults
+ document_sentiment_results_model['label'] = 'positive'
+ document_sentiment_results_model['score'] = 0.127034
+
+ targeted_sentiment_results_model = {} # TargetedSentimentResults
+ targeted_sentiment_results_model['text'] = 'stocks'
+ targeted_sentiment_results_model['score'] = 0.279964
+
+ # Construct a json representation of a SentimentResult model
+ sentiment_result_model_json = {}
+ sentiment_result_model_json['document'] = document_sentiment_results_model
+ sentiment_result_model_json['targets'] = [targeted_sentiment_results_model]
+
+ # Construct a model instance of SentimentResult by calling from_dict on the json representation
+ sentiment_result_model = SentimentResult.from_dict(sentiment_result_model_json)
+ assert sentiment_result_model != False
+
+ # Construct a model instance of SentimentResult by calling from_dict on the json representation
+ sentiment_result_model_dict = SentimentResult.from_dict(sentiment_result_model_json).__dict__
+ sentiment_result_model2 = SentimentResult(**sentiment_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert sentiment_result_model == sentiment_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ sentiment_result_model_json2 = sentiment_result_model.to_dict()
+ assert sentiment_result_model_json2 == sentiment_result_model_json
+
+
+class TestModel_SyntaxOptions:
+ """
+ Test Class for SyntaxOptions
+ """
+
+ def test_syntax_options_serialization(self):
+ """
+ Test serialization/deserialization for SyntaxOptions
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ syntax_options_tokens_model = {} # SyntaxOptionsTokens
+ syntax_options_tokens_model['lemma'] = True
+ syntax_options_tokens_model['part_of_speech'] = True
+
+ # Construct a json representation of a SyntaxOptions model
+ syntax_options_model_json = {}
+ syntax_options_model_json['tokens'] = syntax_options_tokens_model
+ syntax_options_model_json['sentences'] = True
+
+ # Construct a model instance of SyntaxOptions by calling from_dict on the json representation
+ syntax_options_model = SyntaxOptions.from_dict(syntax_options_model_json)
+ assert syntax_options_model != False
+
+ # Construct a model instance of SyntaxOptions by calling from_dict on the json representation
+ syntax_options_model_dict = SyntaxOptions.from_dict(syntax_options_model_json).__dict__
+ syntax_options_model2 = SyntaxOptions(**syntax_options_model_dict)
+
+ # Verify the model instances are equivalent
+ assert syntax_options_model == syntax_options_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ syntax_options_model_json2 = syntax_options_model.to_dict()
+ assert syntax_options_model_json2 == syntax_options_model_json
+
+
+class TestModel_SyntaxOptionsTokens:
+ """
+ Test Class for SyntaxOptionsTokens
+ """
+
+ def test_syntax_options_tokens_serialization(self):
+ """
+ Test serialization/deserialization for SyntaxOptionsTokens
+ """
+
+ # Construct a json representation of a SyntaxOptionsTokens model
+ syntax_options_tokens_model_json = {}
+ syntax_options_tokens_model_json['lemma'] = True
+ syntax_options_tokens_model_json['part_of_speech'] = True
+
+ # Construct a model instance of SyntaxOptionsTokens by calling from_dict on the json representation
+ syntax_options_tokens_model = SyntaxOptionsTokens.from_dict(syntax_options_tokens_model_json)
+ assert syntax_options_tokens_model != False
+
+ # Construct a model instance of SyntaxOptionsTokens by calling from_dict on the json representation
+ syntax_options_tokens_model_dict = SyntaxOptionsTokens.from_dict(syntax_options_tokens_model_json).__dict__
+ syntax_options_tokens_model2 = SyntaxOptionsTokens(**syntax_options_tokens_model_dict)
+
+ # Verify the model instances are equivalent
+ assert syntax_options_tokens_model == syntax_options_tokens_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ syntax_options_tokens_model_json2 = syntax_options_tokens_model.to_dict()
+ assert syntax_options_tokens_model_json2 == syntax_options_tokens_model_json
+
+
+class TestModel_SyntaxResult:
+ """
+ Test Class for SyntaxResult
+ """
+
+ def test_syntax_result_serialization(self):
+ """
+ Test serialization/deserialization for SyntaxResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ token_result_model = {} # TokenResult
+ token_result_model['text'] = 'testString'
+ token_result_model['part_of_speech'] = 'ADJ'
+ token_result_model['location'] = [38]
+ token_result_model['lemma'] = 'testString'
+
+ sentence_result_model = {} # SentenceResult
+ sentence_result_model['text'] = 'testString'
+ sentence_result_model['location'] = [38]
+
+ # Construct a json representation of a SyntaxResult model
+ syntax_result_model_json = {}
+ syntax_result_model_json['tokens'] = [token_result_model]
+ syntax_result_model_json['sentences'] = [sentence_result_model]
+
+ # Construct a model instance of SyntaxResult by calling from_dict on the json representation
+ syntax_result_model = SyntaxResult.from_dict(syntax_result_model_json)
+ assert syntax_result_model != False
+
+ # Construct a model instance of SyntaxResult by calling from_dict on the json representation
+ syntax_result_model_dict = SyntaxResult.from_dict(syntax_result_model_json).__dict__
+ syntax_result_model2 = SyntaxResult(**syntax_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert syntax_result_model == syntax_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ syntax_result_model_json2 = syntax_result_model.to_dict()
+ assert syntax_result_model_json2 == syntax_result_model_json
+
+
+class TestModel_TargetedEmotionResults:
+ """
+ Test Class for TargetedEmotionResults
+ """
+
+ def test_targeted_emotion_results_serialization(self):
+ """
+ Test serialization/deserialization for TargetedEmotionResults
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ emotion_scores_model = {} # EmotionScores
+ emotion_scores_model['anger'] = 72.5
+ emotion_scores_model['disgust'] = 72.5
+ emotion_scores_model['fear'] = 72.5
+ emotion_scores_model['joy'] = 72.5
+ emotion_scores_model['sadness'] = 72.5
+
+ # Construct a json representation of a TargetedEmotionResults model
+ targeted_emotion_results_model_json = {}
+ targeted_emotion_results_model_json['text'] = 'testString'
+ targeted_emotion_results_model_json['emotion'] = emotion_scores_model
+
+ # Construct a model instance of TargetedEmotionResults by calling from_dict on the json representation
+ targeted_emotion_results_model = TargetedEmotionResults.from_dict(targeted_emotion_results_model_json)
+ assert targeted_emotion_results_model != False
+
+ # Construct a model instance of TargetedEmotionResults by calling from_dict on the json representation
+ targeted_emotion_results_model_dict = TargetedEmotionResults.from_dict(targeted_emotion_results_model_json).__dict__
+ targeted_emotion_results_model2 = TargetedEmotionResults(**targeted_emotion_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert targeted_emotion_results_model == targeted_emotion_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ targeted_emotion_results_model_json2 = targeted_emotion_results_model.to_dict()
+ assert targeted_emotion_results_model_json2 == targeted_emotion_results_model_json
+
+
+class TestModel_TargetedSentimentResults:
+ """
+ Test Class for TargetedSentimentResults
+ """
+
+ def test_targeted_sentiment_results_serialization(self):
+ """
+ Test serialization/deserialization for TargetedSentimentResults
+ """
+
+ # Construct a json representation of a TargetedSentimentResults model
+ targeted_sentiment_results_model_json = {}
+ targeted_sentiment_results_model_json['text'] = 'testString'
+ targeted_sentiment_results_model_json['score'] = 72.5
+
+ # Construct a model instance of TargetedSentimentResults by calling from_dict on the json representation
+ targeted_sentiment_results_model = TargetedSentimentResults.from_dict(targeted_sentiment_results_model_json)
+ assert targeted_sentiment_results_model != False
+
+ # Construct a model instance of TargetedSentimentResults by calling from_dict on the json representation
+ targeted_sentiment_results_model_dict = TargetedSentimentResults.from_dict(targeted_sentiment_results_model_json).__dict__
+ targeted_sentiment_results_model2 = TargetedSentimentResults(**targeted_sentiment_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert targeted_sentiment_results_model == targeted_sentiment_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ targeted_sentiment_results_model_json2 = targeted_sentiment_results_model.to_dict()
+ assert targeted_sentiment_results_model_json2 == targeted_sentiment_results_model_json
+
+
+class TestModel_TokenResult:
+ """
+ Test Class for TokenResult
+ """
+
+ def test_token_result_serialization(self):
+ """
+ Test serialization/deserialization for TokenResult
+ """
+
+ # Construct a json representation of a TokenResult model
+ token_result_model_json = {}
+ token_result_model_json['text'] = 'testString'
+ token_result_model_json['part_of_speech'] = 'ADJ'
+ token_result_model_json['location'] = [38]
+ token_result_model_json['lemma'] = 'testString'
+
+ # Construct a model instance of TokenResult by calling from_dict on the json representation
+ token_result_model = TokenResult.from_dict(token_result_model_json)
+ assert token_result_model != False
+
+ # Construct a model instance of TokenResult by calling from_dict on the json representation
+ token_result_model_dict = TokenResult.from_dict(token_result_model_json).__dict__
+ token_result_model2 = TokenResult(**token_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert token_result_model == token_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ token_result_model_json2 = token_result_model.to_dict()
+ assert token_result_model_json2 == token_result_model_json
+
+
+# endregion
+##############################################################################
+# End of Model Tests
+##############################################################################
diff --git a/test/unit/test_personality_insights_v3.py b/test/unit/test_personality_insights_v3.py
deleted file mode 100755
index 548e06486..000000000
--- a/test/unit/test_personality_insights_v3.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# coding: utf-8
-import responses
-import ibm_watson
-import os
-import codecs
-from ibm_watson.personality_insights_v3 import Profile
-
-profile_url = 'https://gateway.watsonplatform.net/personality-insights/api/v3/profile'
-
-@responses.activate
-def test_plain_to_json():
-
- personality_insights = ibm_watson.PersonalityInsightsV3(
- '2016-10-20', username="username", password="password")
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect1.txt')) as expect_file:
- profile_response = expect_file.read()
-
- responses.add(responses.POST, profile_url,
- body=profile_response, status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3.txt')) as personality_text:
- response = personality_insights.profile(
- personality_text, 'application/json', content_type='text/plain;charset=utf-8').get_result()
-
- assert 'version=2016-10-20' in responses.calls[0].request.url
- assert responses.calls[0].response.text == profile_response
- assert len(responses.calls) == 1
- # Verify that response can be converted to a Profile
- Profile._from_dict(response)
-
-@responses.activate
-def test_json_to_json():
-
- personality_insights = ibm_watson.PersonalityInsightsV3(
- '2016-10-20', username="username", password="password")
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect2.txt')) as expect_file:
- profile_response = expect_file.read()
-
- responses.add(responses.POST, profile_url,
- body=profile_response, status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3.json')) as personality_text:
- response = personality_insights.profile(
- personality_text, accept='application/json',
- content_type='application/json',
- raw_scores=True,
- consumption_preferences=True).get_result()
-
- assert 'version=2016-10-20' in responses.calls[0].request.url
- assert 'raw_scores=true' in responses.calls[0].request.url
- assert 'consumption_preferences=true' in responses.calls[0].request.url
- assert responses.calls[0].response.text == profile_response
- assert len(responses.calls) == 1
- # Verify that response can be converted to a Profile
- Profile._from_dict(response)
-
-@responses.activate
-def test_json_to_csv():
-
- personality_insights = ibm_watson.PersonalityInsightsV3(
- '2016-10-20', username="username", password="password")
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect3.txt')) as expect_file:
- profile_response = expect_file.read()
-
- responses.add(responses.POST, profile_url,
- body=profile_response, status=200,
- content_type='text/csv')
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3.json')) as personality_text:
- personality_insights.profile(
- personality_text,
- 'text/csv',
- content_type='application/json',
- csv_headers=True,
- raw_scores=True,
- consumption_preferences=True)
-
- assert 'version=2016-10-20' in responses.calls[0].request.url
- assert 'raw_scores=true' in responses.calls[0].request.url
- assert 'consumption_preferences=true' in responses.calls[0].request.url
- assert 'csv_headers=true' in responses.calls[0].request.url
- assert responses.calls[0].response.text == profile_response
- assert len(responses.calls) == 1
-
-
-@responses.activate
-def test_plain_to_json_es():
-
- personality_insights = ibm_watson.PersonalityInsightsV3(
- '2016-10-20', username="username", password="password")
-
- with codecs.open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect4.txt'), \
- encoding='utf-8') as expect_file:
- profile_response = expect_file.read()
-
- responses.add(responses.POST, profile_url,
- body=profile_response, status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-es.txt')) as personality_text:
- response = personality_insights.profile(
- personality_text,
- 'application/json',
- content_type='text/plain;charset=utf-8',
- content_language='es',
- accept_language='es').get_result()
-
- assert 'version=2016-10-20' in responses.calls[0].request.url
- assert responses.calls[0].response.text == profile_response
- assert len(responses.calls) == 1
- # Verify that response can be converted to a Profile
- Profile._from_dict(response)
diff --git a/test/unit/test_speech_to_text_v1.py b/test/unit/test_speech_to_text_v1.py
old mode 100755
new mode 100644
index 87a5514c9..cb5babf87
--- a/test/unit/test_speech_to_text_v1.py
+++ b/test/unit/test_speech_to_text_v1.py
@@ -1,590 +1,6112 @@
-# coding=utf-8
-import os
+# -*- coding: utf-8 -*-
+# (C) Copyright IBM Corp. 2026.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit Tests for SpeechToTextV1
+"""
+
+from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
+import inspect
+import io
import json
+import pytest
+import re
+import requests
import responses
-import ibm_watson
-from ibm_watson.speech_to_text_v1 import CustomWord
-
-
-@responses.activate
-def test_success():
- models_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/models'
- models_response = '{"models": [{"url": "https://stream.watsonplatform.net/speech-to-text/api/v1/models/' \
- 'WatsonModel", "rate": 16000, "name": "WatsonModel", "language": "en-US", "description": ' \
- '"Watson model \'v7w_134k.3\' for Attila 2-5 reco engine."}]}'
-
- responses.add(
- responses.GET,
- models_url,
- body=models_response,
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
- speech_to_text.list_models()
-
- assert responses.calls[0].request.url == models_url
- assert responses.calls[0].response.text == models_response
-
- recognize_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/recognize'
- recognize_response = '{"results":[{"alternatives":[{"transcript":"thunderstorms could produce large hail ' \
- 'isolated tornadoes and heavy rain "}],"final":true}],"result_index":0}'
-
- responses.add(
- responses.POST,
- recognize_url,
- body=recognize_response,
- status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file:
- speech_to_text.recognize(
- audio=audio_file, content_type='audio/l16; rate=44100')
-
- request_url = responses.calls[1].request.url
- assert request_url == recognize_url
- assert responses.calls[1].response.text == recognize_response
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file:
- speech_to_text.recognize(
- audio=audio_file, customization_id='x', content_type='audio/l16; rate=44100')
- expected_url = "{0}?customization_id=x".format(recognize_url)
- assert expected_url == responses.calls[2].request.url
- assert len(responses.calls) == 3
-
-
-@responses.activate
-def test_get_model():
- model_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/models/modelid'
- responses.add(
- responses.GET,
- model_url,
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
- speech_to_text.get_model(model_id='modelid')
- assert len(responses.calls) == 1
-
-
-def _decode_body(body):
- try:
- return body.decode('utf-8')
- except:
- return body
-
-
-@responses.activate
-def test_recognitions():
- url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/recognitions'
- get_response = '{"recognitions": [{"created": "2018-02-01T17:43:15.432Z","id": "6193190c-0777-11e8-9b4b-43ad845196dd","updated": "2018-02-01T17:43:17.998Z","status": "failed"}]}'
- responses.add(
- responses.GET,
- url,
- body=get_response,
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.POST,
- url,
- body='{"status": "waiting"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.DELETE,
- "{0}/jobid".format(url),
- body='{"description": "deleted successfully"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- "{0}/jobid".format(url),
- body='{"status": "waiting"}',
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
-
- speech_to_text.check_jobs()
- assert responses.calls[0].response.json()['recognitions'][0][
- 'id'] == '6193190c-0777-11e8-9b4b-43ad845196dd'
-
- speech_to_text.check_job('jobid')
- assert responses.calls[1].response.json() == {'status': 'waiting'}
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file:
- speech_to_text.create_job(audio=audio_file, content_type='audio/basic')
- assert responses.calls[2].response.json() == {'status': 'waiting'}
-
- speech_to_text.delete_job('jobid')
- assert responses.calls[3].response.json() == {
- "description": "deleted successfully"
- }
-
- assert len(responses.calls) == 4
-
-
-@responses.activate
-def test_callbacks():
- base_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1'
- responses.add(
- responses.POST,
- "{0}/register_callback".format(base_url),
- body='{"status": "created", "url": "monitorcalls.com"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.POST,
- "{0}/unregister_callback".format(base_url),
- body='{"response": "The callback URL was successfully unregistered"}',
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
- speech_to_text.register_callback("monitorcalls.com")
- assert responses.calls[0].response.json() == {
- "status": "created",
- "url": "monitorcalls.com"
- }
-
- speech_to_text.unregister_callback("monitorcalls.com")
- assert responses.calls[1].response.json() == {
- "response": "The callback URL was successfully unregistered"
- }
-
- assert len(responses.calls) == 2
-
-
-@responses.activate
-def test_custom_model():
- customization_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations'
- train_url = "{0}/{1}/train".format(customization_url, 'customid')
-
- responses.add(
- responses.GET,
- customization_url,
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.POST,
- customization_url,
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- "{0}/modelid".format(customization_url),
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.DELETE,
- "{0}/modelid".format(customization_url),
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.POST,
- train_url,
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
-
- speech_to_text.list_language_models()
-
- speech_to_text.create_language_model(
- name="Example model",
- base_model_name="en-US_BroadbandModel")
-
- parsed_body = json.loads(_decode_body(responses.calls[1].request.body))
- assert parsed_body['name'] == 'Example model'
-
- speech_to_text.create_language_model(
- name="Example model Two",
- base_model_name="en-US_BroadbandModel")
-
- parsed_body = json.loads(_decode_body(responses.calls[2].request.body))
- assert parsed_body['name'] == 'Example model Two'
- assert parsed_body['base_model_name'] == 'en-US_BroadbandModel'
-
- speech_to_text.train_language_model('customid')
- speech_to_text.get_language_model(customization_id='modelid')
- speech_to_text.delete_language_model(customization_id='modelid')
-
- assert len(responses.calls) == 6
-
-
-@responses.activate
-def test_acoustic_model():
- acoustic_customization_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations'
- train_url = "{0}/{1}/train".format(acoustic_customization_url, 'customid')
-
- responses.add(
- responses.GET,
- acoustic_customization_url,
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.POST,
- acoustic_customization_url,
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- "{0}/modelid".format(acoustic_customization_url),
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.DELETE,
- "{0}/modelid".format(acoustic_customization_url),
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.POST,
- train_url,
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
-
- speech_to_text.list_acoustic_models()
-
- speech_to_text.create_acoustic_model(
- name="Example model",
- base_model_name="en-US_BroadbandModel",
- description="Example custom language model")
-
- parsed_body = json.loads(_decode_body(responses.calls[1].request.body))
- assert parsed_body['name'] == 'Example model'
-
- speech_to_text.create_acoustic_model(
- name="Example model Two",
- base_model_name="en-US_BroadbandModel")
-
- parsed_body = json.loads(_decode_body(responses.calls[2].request.body))
- assert parsed_body['name'] == 'Example model Two'
- assert parsed_body['base_model_name'] == 'en-US_BroadbandModel'
-
- speech_to_text.train_acoustic_model('customid')
- speech_to_text.get_acoustic_model(customization_id='modelid')
- speech_to_text.delete_acoustic_model(customization_id='modelid')
+import tempfile
+import urllib
+from ibm_watson.speech_to_text_v1 import *
+
+
+_service = SpeechToTextV1(
+ authenticator=NoAuthAuthenticator()
+)
+
+_base_url = 'https://api.us-south.speech-to-text.watson.cloud.ibm.com'
+_service.set_service_url(_base_url)
+
+
+def preprocess_url(operation_path: str):
+ """
+ Returns the request url associated with the specified operation path.
+ This will be base_url concatenated with a quoted version of operation_path.
+ The returned request URL is used to register the mock response so it needs
+ to match the request URL that is formed by the requests library.
+ """
+
+ # Form the request URL from the base URL and operation path.
+ request_url = _base_url + operation_path
+
+ # If the request url does NOT end with a /, then just return it as-is.
+ # Otherwise, return a regular expression that matches one or more trailing /.
+ if not request_url.endswith('/'):
+ return request_url
+ return re.compile(request_url.rstrip('/') + '/+')
+
+
+##############################################################################
+# Start of Service: Models
+##############################################################################
+# region
+
+
+class TestListModels:
+ """
+ Test Class for list_models
+ """
+
+ @responses.activate
+ def test_list_models_all_params(self):
+ """
+ list_models()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models')
+ mock_response = '{"models": [{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "custom_acoustic_model": false, "speaker_labels": true, "low_latency": false}, "description": "description"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_models()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_models_all_params_with_retries(self):
+ # Enable retries and run test_list_models_all_params.
+ _service.enable_retries()
+ self.test_list_models_all_params()
+
+ # Disable retries and run test_list_models_all_params.
+ _service.disable_retries()
+ self.test_list_models_all_params()
+
+
+class TestGetModel:
+ """
+ Test Class for get_model
+ """
+
+ @responses.activate
+ def test_get_model_all_params(self):
+ """
+ get_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/ar-MS_BroadbandModel')
+ mock_response = '{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "custom_acoustic_model": false, "speaker_labels": true, "low_latency": false}, "description": "description"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'ar-MS_BroadbandModel'
+
+ # Invoke method
+ response = _service.get_model(
+ model_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_model_all_params_with_retries(self):
+ # Enable retries and run test_get_model_all_params.
+ _service.enable_retries()
+ self.test_get_model_all_params()
+
+ # Disable retries and run test_get_model_all_params.
+ _service.disable_retries()
+ self.test_get_model_all_params()
+
+ @responses.activate
+ def test_get_model_value_error(self):
+ """
+ test_get_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/models/ar-MS_BroadbandModel')
+ mock_response = '{"name": "name", "language": "language", "rate": 4, "url": "url", "supported_features": {"custom_language_model": false, "custom_acoustic_model": false, "speaker_labels": true, "low_latency": false}, "description": "description"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ model_id = 'ar-MS_BroadbandModel'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "model_id": model_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_model(**req_copy)
+
+ def test_get_model_value_error_with_retries(self):
+ # Enable retries and run test_get_model_value_error.
+ _service.enable_retries()
+ self.test_get_model_value_error()
+
+ # Disable retries and run test_get_model_value_error.
+ _service.disable_retries()
+ self.test_get_model_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Models
+##############################################################################
+
+##############################################################################
+# Start of Service: Synchronous
+##############################################################################
+# region
+
+
+class TestRecognize:
+ """
+ Test Class for recognize
+ """
+
+ @responses.activate
+ def test_recognize_all_params(self):
+ """
+ recognize()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognize')
+ mock_response = '{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+ content_type = 'application/octet-stream'
+ model = 'en-US_BroadbandModel'
+ speech_begin_event = False
+ enrichments = 'testString'
+ language_customization_id = 'testString'
+ acoustic_customization_id = 'testString'
+ base_model_version = 'testString'
+ customization_weight = 72.5
+ inactivity_timeout = 30
+ keywords = ['testString']
+ keywords_threshold = 36.0
+ max_alternatives = 1
+ word_alternatives_threshold = 36.0
+ word_confidence = False
+ timestamps = False
+ profanity_filter = True
+ smart_formatting = False
+ smart_formatting_version = 0
+ speaker_labels = False
+ grammar_name = 'testString'
+ redaction = False
+ audio_metrics = False
+ end_of_phrase_silence_time = 0.8
+ split_transcript_at_phrase_end = False
+ speech_detector_sensitivity = 0.5
+ sad_module = 1
+ background_audio_suppression = 0.0
+ low_latency = False
+ character_insertion_bias = 0.0
+
+ # Invoke method
+ response = _service.recognize(
+ audio,
+ content_type=content_type,
+ model=model,
+ speech_begin_event=speech_begin_event,
+ enrichments=enrichments,
+ language_customization_id=language_customization_id,
+ acoustic_customization_id=acoustic_customization_id,
+ base_model_version=base_model_version,
+ customization_weight=customization_weight,
+ inactivity_timeout=inactivity_timeout,
+ keywords=keywords,
+ keywords_threshold=keywords_threshold,
+ max_alternatives=max_alternatives,
+ word_alternatives_threshold=word_alternatives_threshold,
+ word_confidence=word_confidence,
+ timestamps=timestamps,
+ profanity_filter=profanity_filter,
+ smart_formatting=smart_formatting,
+ smart_formatting_version=smart_formatting_version,
+ speaker_labels=speaker_labels,
+ grammar_name=grammar_name,
+ redaction=redaction,
+ audio_metrics=audio_metrics,
+ end_of_phrase_silence_time=end_of_phrase_silence_time,
+ split_transcript_at_phrase_end=split_transcript_at_phrase_end,
+ speech_detector_sensitivity=speech_detector_sensitivity,
+ sad_module=sad_module,
+ background_audio_suppression=background_audio_suppression,
+ low_latency=low_latency,
+ character_insertion_bias=character_insertion_bias,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'model={}'.format(model) in query_string
+ assert 'speech_begin_event={}'.format('true' if speech_begin_event else 'false') in query_string
+ assert 'enrichments={}'.format(enrichments) in query_string
+ assert 'language_customization_id={}'.format(language_customization_id) in query_string
+ assert 'acoustic_customization_id={}'.format(acoustic_customization_id) in query_string
+ assert 'base_model_version={}'.format(base_model_version) in query_string
+ assert 'customization_weight={}'.format(customization_weight) in query_string
+ assert 'inactivity_timeout={}'.format(inactivity_timeout) in query_string
+ assert 'keywords={}'.format(','.join(keywords)) in query_string
+ assert 'max_alternatives={}'.format(max_alternatives) in query_string
+ assert 'word_confidence={}'.format('true' if word_confidence else 'false') in query_string
+ assert 'timestamps={}'.format('true' if timestamps else 'false') in query_string
+ assert 'profanity_filter={}'.format('true' if profanity_filter else 'false') in query_string
+ assert 'smart_formatting={}'.format('true' if smart_formatting else 'false') in query_string
+ assert 'smart_formatting_version={}'.format(smart_formatting_version) in query_string
+ assert 'speaker_labels={}'.format('true' if speaker_labels else 'false') in query_string
+ assert 'grammar_name={}'.format(grammar_name) in query_string
+ assert 'redaction={}'.format('true' if redaction else 'false') in query_string
+ assert 'audio_metrics={}'.format('true' if audio_metrics else 'false') in query_string
+ assert 'end_of_phrase_silence_time={}'.format(end_of_phrase_silence_time) in query_string
+ assert 'split_transcript_at_phrase_end={}'.format('true' if split_transcript_at_phrase_end else 'false') in query_string
+ assert 'sad_module={}'.format(sad_module) in query_string
+ assert 'low_latency={}'.format('true' if low_latency else 'false') in query_string
+ # Validate body params
+
+ def test_recognize_all_params_with_retries(self):
+ # Enable retries and run test_recognize_all_params.
+ _service.enable_retries()
+ self.test_recognize_all_params()
+
+ # Disable retries and run test_recognize_all_params.
+ _service.disable_retries()
+ self.test_recognize_all_params()
+
+ @responses.activate
+ def test_recognize_required_params(self):
+ """
+ test_recognize_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognize')
+ mock_response = '{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.recognize(
+ audio,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+
+ def test_recognize_required_params_with_retries(self):
+ # Enable retries and run test_recognize_required_params.
+ _service.enable_retries()
+ self.test_recognize_required_params()
+
+ # Disable retries and run test_recognize_required_params.
+ _service.disable_retries()
+ self.test_recognize_required_params()
+
+ @responses.activate
+ def test_recognize_value_error(self):
+ """
+ test_recognize_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognize')
+ mock_response = '{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "audio": audio,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.recognize(**req_copy)
+
+ def test_recognize_value_error_with_retries(self):
+ # Enable retries and run test_recognize_value_error.
+ _service.enable_retries()
+ self.test_recognize_value_error()
+
+ # Disable retries and run test_recognize_value_error.
+ _service.disable_retries()
+ self.test_recognize_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Synchronous
+##############################################################################
+
+##############################################################################
+# Start of Service: Asynchronous
+##############################################################################
+# region
+
+
+class TestRegisterCallback:
+ """
+ Test Class for register_callback
+ """
+
+ @responses.activate
+ def test_register_callback_all_params(self):
+ """
+ register_callback()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/register_callback')
+ mock_response = '{"status": "created", "url": "url"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ callback_url = 'testString'
+ user_secret = 'testString'
+
+ # Invoke method
+ response = _service.register_callback(
+ callback_url,
+ user_secret=user_secret,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'callback_url={}'.format(callback_url) in query_string
+ assert 'user_secret={}'.format(user_secret) in query_string
+
+ def test_register_callback_all_params_with_retries(self):
+ # Enable retries and run test_register_callback_all_params.
+ _service.enable_retries()
+ self.test_register_callback_all_params()
+
+ # Disable retries and run test_register_callback_all_params.
+ _service.disable_retries()
+ self.test_register_callback_all_params()
+
+ @responses.activate
+ def test_register_callback_required_params(self):
+ """
+ test_register_callback_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/register_callback')
+ mock_response = '{"status": "created", "url": "url"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ callback_url = 'testString'
+
+ # Invoke method
+ response = _service.register_callback(
+ callback_url,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'callback_url={}'.format(callback_url) in query_string
+
+ def test_register_callback_required_params_with_retries(self):
+ # Enable retries and run test_register_callback_required_params.
+ _service.enable_retries()
+ self.test_register_callback_required_params()
+
+ # Disable retries and run test_register_callback_required_params.
+ _service.disable_retries()
+ self.test_register_callback_required_params()
+
+ @responses.activate
+ def test_register_callback_value_error(self):
+ """
+ test_register_callback_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/register_callback')
+ mock_response = '{"status": "created", "url": "url"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ callback_url = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "callback_url": callback_url,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.register_callback(**req_copy)
+
+ def test_register_callback_value_error_with_retries(self):
+ # Enable retries and run test_register_callback_value_error.
+ _service.enable_retries()
+ self.test_register_callback_value_error()
+
+ # Disable retries and run test_register_callback_value_error.
+ _service.disable_retries()
+ self.test_register_callback_value_error()
+
+
+class TestUnregisterCallback:
+ """
+ Test Class for unregister_callback
+ """
+
+ @responses.activate
+ def test_unregister_callback_all_params(self):
+ """
+ unregister_callback()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/unregister_callback')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ callback_url = 'testString'
+
+ # Invoke method
+ response = _service.unregister_callback(
+ callback_url,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'callback_url={}'.format(callback_url) in query_string
+
+ def test_unregister_callback_all_params_with_retries(self):
+ # Enable retries and run test_unregister_callback_all_params.
+ _service.enable_retries()
+ self.test_unregister_callback_all_params()
+
+ # Disable retries and run test_unregister_callback_all_params.
+ _service.disable_retries()
+ self.test_unregister_callback_all_params()
+
+ @responses.activate
+ def test_unregister_callback_value_error(self):
+ """
+ test_unregister_callback_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/unregister_callback')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ callback_url = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "callback_url": callback_url,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.unregister_callback(**req_copy)
+
+ def test_unregister_callback_value_error_with_retries(self):
+ # Enable retries and run test_unregister_callback_value_error.
+ _service.enable_retries()
+ self.test_unregister_callback_value_error()
+
+ # Disable retries and run test_unregister_callback_value_error.
+ _service.disable_retries()
+ self.test_unregister_callback_value_error()
+
+
+class TestCreateJob:
+ """
+ Test Class for create_job
+ """
+
+ @responses.activate
+ def test_create_job_all_params(self):
+ """
+ create_job()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognitions')
+ mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+ content_type = 'application/octet-stream'
+ model = 'en-US_BroadbandModel'
+ callback_url = 'testString'
+ events = 'recognitions.started'
+ user_token = 'testString'
+ results_ttl = 38
+ speech_begin_event = False
+ enrichments = 'testString'
+ language_customization_id = 'testString'
+ acoustic_customization_id = 'testString'
+ base_model_version = 'testString'
+ customization_weight = 72.5
+ inactivity_timeout = 30
+ keywords = ['testString']
+ keywords_threshold = 36.0
+ max_alternatives = 1
+ word_alternatives_threshold = 36.0
+ word_confidence = False
+ timestamps = False
+ profanity_filter = True
+ smart_formatting = False
+ smart_formatting_version = 0
+ speaker_labels = False
+ grammar_name = 'testString'
+ redaction = False
+ processing_metrics = False
+ processing_metrics_interval = 1.0
+ audio_metrics = False
+ end_of_phrase_silence_time = 0.8
+ split_transcript_at_phrase_end = False
+ speech_detector_sensitivity = 0.5
+ sad_module = 1
+ background_audio_suppression = 0.0
+ low_latency = False
+ character_insertion_bias = 0.0
+
+ # Invoke method
+ response = _service.create_job(
+ audio,
+ content_type=content_type,
+ model=model,
+ callback_url=callback_url,
+ events=events,
+ user_token=user_token,
+ results_ttl=results_ttl,
+ speech_begin_event=speech_begin_event,
+ enrichments=enrichments,
+ language_customization_id=language_customization_id,
+ acoustic_customization_id=acoustic_customization_id,
+ base_model_version=base_model_version,
+ customization_weight=customization_weight,
+ inactivity_timeout=inactivity_timeout,
+ keywords=keywords,
+ keywords_threshold=keywords_threshold,
+ max_alternatives=max_alternatives,
+ word_alternatives_threshold=word_alternatives_threshold,
+ word_confidence=word_confidence,
+ timestamps=timestamps,
+ profanity_filter=profanity_filter,
+ smart_formatting=smart_formatting,
+ smart_formatting_version=smart_formatting_version,
+ speaker_labels=speaker_labels,
+ grammar_name=grammar_name,
+ redaction=redaction,
+ processing_metrics=processing_metrics,
+ processing_metrics_interval=processing_metrics_interval,
+ audio_metrics=audio_metrics,
+ end_of_phrase_silence_time=end_of_phrase_silence_time,
+ split_transcript_at_phrase_end=split_transcript_at_phrase_end,
+ speech_detector_sensitivity=speech_detector_sensitivity,
+ sad_module=sad_module,
+ background_audio_suppression=background_audio_suppression,
+ low_latency=low_latency,
+ character_insertion_bias=character_insertion_bias,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'model={}'.format(model) in query_string
+ assert 'callback_url={}'.format(callback_url) in query_string
+ assert 'events={}'.format(events) in query_string
+ assert 'user_token={}'.format(user_token) in query_string
+ assert 'results_ttl={}'.format(results_ttl) in query_string
+ assert 'speech_begin_event={}'.format('true' if speech_begin_event else 'false') in query_string
+ assert 'enrichments={}'.format(enrichments) in query_string
+ assert 'language_customization_id={}'.format(language_customization_id) in query_string
+ assert 'acoustic_customization_id={}'.format(acoustic_customization_id) in query_string
+ assert 'base_model_version={}'.format(base_model_version) in query_string
+ assert 'customization_weight={}'.format(customization_weight) in query_string
+ assert 'inactivity_timeout={}'.format(inactivity_timeout) in query_string
+ assert 'keywords={}'.format(','.join(keywords)) in query_string
+ assert 'max_alternatives={}'.format(max_alternatives) in query_string
+ assert 'word_confidence={}'.format('true' if word_confidence else 'false') in query_string
+ assert 'timestamps={}'.format('true' if timestamps else 'false') in query_string
+ assert 'profanity_filter={}'.format('true' if profanity_filter else 'false') in query_string
+ assert 'smart_formatting={}'.format('true' if smart_formatting else 'false') in query_string
+ assert 'smart_formatting_version={}'.format(smart_formatting_version) in query_string
+ assert 'speaker_labels={}'.format('true' if speaker_labels else 'false') in query_string
+ assert 'grammar_name={}'.format(grammar_name) in query_string
+ assert 'redaction={}'.format('true' if redaction else 'false') in query_string
+ assert 'processing_metrics={}'.format('true' if processing_metrics else 'false') in query_string
+ assert 'audio_metrics={}'.format('true' if audio_metrics else 'false') in query_string
+ assert 'end_of_phrase_silence_time={}'.format(end_of_phrase_silence_time) in query_string
+ assert 'split_transcript_at_phrase_end={}'.format('true' if split_transcript_at_phrase_end else 'false') in query_string
+ assert 'sad_module={}'.format(sad_module) in query_string
+ assert 'low_latency={}'.format('true' if low_latency else 'false') in query_string
+ # Validate body params
+
+ def test_create_job_all_params_with_retries(self):
+ # Enable retries and run test_create_job_all_params.
+ _service.enable_retries()
+ self.test_create_job_all_params()
+
+ # Disable retries and run test_create_job_all_params.
+ _service.disable_retries()
+ self.test_create_job_all_params()
+
+ @responses.activate
+ def test_create_job_required_params(self):
+ """
+ test_create_job_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognitions')
+ mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.create_job(
+ audio,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+
+ def test_create_job_required_params_with_retries(self):
+ # Enable retries and run test_create_job_required_params.
+ _service.enable_retries()
+ self.test_create_job_required_params()
+
+ # Disable retries and run test_create_job_required_params.
+ _service.disable_retries()
+ self.test_create_job_required_params()
+
+ @responses.activate
+ def test_create_job_value_error(self):
+ """
+ test_create_job_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognitions')
+ mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
- assert len(responses.calls) == 6
-
-@responses.activate
-def test_upgrade_acoustic_model():
- acoustic_customization_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations'
- upgrade_url = "{0}/{1}/upgrade_model".format(acoustic_customization_url, 'customid')
+ # Set up parameter values
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
- responses.add(
- responses.POST,
- upgrade_url,
- body='{"bogus_response": "yep"}',
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "audio": audio,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_job(**req_copy)
- speech_to_text.upgrade_acoustic_model(
- 'customid',
- 'model_x',
- force=True)
- assert responses.calls[0].response.json() == {"bogus_response": "yep"}
-
- assert len(responses.calls) == 1
-
-
-def test_custom_corpora():
+ def test_create_job_value_error_with_retries(self):
+ # Enable retries and run test_create_job_value_error.
+ _service.enable_retries()
+ self.test_create_job_value_error()
- corpora_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/corpora'
- get_corpora_url = '{0}/{1}'.format(
- corpora_url.format('customid'), 'corpus')
+ # Disable retries and run test_create_job_value_error.
+ _service.disable_retries()
+ self.test_create_job_value_error()
- with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps:
- rsps.add(
+
+class TestCheckJobs:
+ """
+ Test Class for check_jobs
+ """
+
+ @responses.activate
+ def test_check_jobs_all_params(self):
+ """
+ check_jobs()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognitions')
+ mock_response = '{"recognitions": [{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.check_jobs()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_check_jobs_all_params_with_retries(self):
+ # Enable retries and run test_check_jobs_all_params.
+ _service.enable_retries()
+ self.test_check_jobs_all_params()
+
+ # Disable retries and run test_check_jobs_all_params.
+ _service.disable_retries()
+ self.test_check_jobs_all_params()
+
+
+class TestCheckJob:
+ """
+ Test Class for check_job
+ """
+
+ @responses.activate
+ def test_check_job_all_params(self):
+ """
+ check_job()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognitions/testString')
+ mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ id = 'testString'
+
+ # Invoke method
+ response = _service.check_job(
+ id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_check_job_all_params_with_retries(self):
+ # Enable retries and run test_check_job_all_params.
+ _service.enable_retries()
+ self.test_check_job_all_params()
+
+ # Disable retries and run test_check_job_all_params.
+ _service.disable_retries()
+ self.test_check_job_all_params()
+
+ @responses.activate
+ def test_check_job_value_error(self):
+ """
+ test_check_job_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognitions/testString')
+ mock_response = '{"id": "id", "status": "waiting", "created": "created", "updated": "updated", "url": "url", "user_token": "user_token", "results": [{"results": [{"final": false, "alternatives": [{"transcript": "transcript", "confidence": 0, "timestamps": ["timestamps"], "word_confidence": ["word_confidence"]}], "keywords_result": {"mapKey": [{"normalized_text": "normalized_text", "start_time": 10, "end_time": 8, "confidence": 0}]}, "word_alternatives": [{"start_time": 10, "end_time": 8, "alternatives": [{"confidence": 0, "word": "word"}]}], "end_of_utterance": "end_of_data"}], "result_index": 12, "speaker_labels": [{"from": 5, "to": 2, "speaker": 7, "confidence": 10, "final": false}], "processing_metrics": {"processed_audio": {"received": 8, "seen_by_engine": 14, "transcription": 13, "speaker_labels": 14}, "wall_clock_since_first_byte_received": 36, "periodic": true}, "audio_metrics": {"sampling_interval": 17, "accumulated": {"final": false, "end_time": 8, "signal_to_noise_ratio": 21, "speech_ratio": 12, "high_frequency_loss": 19, "direct_current_offset": [{"begin": 5, "end": 3, "count": 5}], "clipping_rate": [{"begin": 5, "end": 3, "count": 5}], "speech_level": [{"begin": 5, "end": 3, "count": 5}], "non_speech_level": [{"begin": 5, "end": 3, "count": 5}]}}, "warnings": ["warnings"], "enriched_results": {"transcript": {"text": "text", "timestamp": {"from": 5, "to": 2}}, "status": "status"}}], "warnings": ["warnings"]}'
+ responses.add(
responses.GET,
- corpora_url.format('customid'),
- body='{"get response": "yep"}',
+ url,
+ body=mock_response,
+ content_type='application/json',
status=200,
- content_type='application/json')
+ )
+
+ # Set up parameter values
+ id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "id": id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.check_job(**req_copy)
+
+ def test_check_job_value_error_with_retries(self):
+ # Enable retries and run test_check_job_value_error.
+ _service.enable_retries()
+ self.test_check_job_value_error()
+
+ # Disable retries and run test_check_job_value_error.
+ _service.disable_retries()
+ self.test_check_job_value_error()
+
+
+class TestDeleteJob:
+ """
+ Test Class for delete_job
+ """
+
+ @responses.activate
+ def test_delete_job_all_params(self):
+ """
+ delete_job()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognitions/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ id = 'testString'
+
+ # Invoke method
+ response = _service.delete_job(
+ id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_job_all_params_with_retries(self):
+ # Enable retries and run test_delete_job_all_params.
+ _service.enable_retries()
+ self.test_delete_job_all_params()
+
+ # Disable retries and run test_delete_job_all_params.
+ _service.disable_retries()
+ self.test_delete_job_all_params()
- rsps.add(
+ @responses.activate
+ def test_delete_job_value_error(self):
+ """
+ test_delete_job_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/recognitions/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "id": id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_job(**req_copy)
+
+ def test_delete_job_value_error_with_retries(self):
+ # Enable retries and run test_delete_job_value_error.
+ _service.enable_retries()
+ self.test_delete_job_value_error()
+
+ # Disable retries and run test_delete_job_value_error.
+ _service.disable_retries()
+ self.test_delete_job_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Asynchronous
+##############################################################################
+
+##############################################################################
+# Start of Service: CustomLanguageModels
+##############################################################################
+# region
+
+
+class TestCreateLanguageModel:
+ """
+ Test Class for create_language_model
+ """
+
+ @responses.activate
+ def test_create_language_model_all_params(self):
+ """
+ create_language_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations')
+ mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ name = 'testString'
+ base_model_name = 'ar-MS_Telephony'
+ dialect = 'testString'
+ description = 'testString'
+
+ # Invoke method
+ response = _service.create_language_model(
+ name,
+ base_model_name,
+ dialect=dialect,
+ description=description,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['base_model_name'] == 'ar-MS_Telephony'
+ assert req_body['dialect'] == 'testString'
+ assert req_body['description'] == 'testString'
+
+ def test_create_language_model_all_params_with_retries(self):
+ # Enable retries and run test_create_language_model_all_params.
+ _service.enable_retries()
+ self.test_create_language_model_all_params()
+
+ # Disable retries and run test_create_language_model_all_params.
+ _service.disable_retries()
+ self.test_create_language_model_all_params()
+
+ @responses.activate
+ def test_create_language_model_value_error(self):
+ """
+ test_create_language_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations')
+ mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}'
+ responses.add(
responses.POST,
- get_corpora_url,
- body='{"get response": "yep"}',
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ name = 'testString'
+ base_model_name = 'ar-MS_Telephony'
+ dialect = 'testString'
+ description = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "name": name,
+ "base_model_name": base_model_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_language_model(**req_copy)
+
+ def test_create_language_model_value_error_with_retries(self):
+ # Enable retries and run test_create_language_model_value_error.
+ _service.enable_retries()
+ self.test_create_language_model_value_error()
+
+ # Disable retries and run test_create_language_model_value_error.
+ _service.disable_retries()
+ self.test_create_language_model_value_error()
+
+
+class TestListLanguageModels:
+ """
+ Test Class for list_language_models
+ """
+
+ @responses.activate
+ def test_list_language_models_all_params(self):
+ """
+ list_language_models()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations')
+ mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ language = 'ar-MS'
+
+ # Invoke method
+ response = _service.list_language_models(
+ language=language,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'language={}'.format(language) in query_string
+
+ def test_list_language_models_all_params_with_retries(self):
+ # Enable retries and run test_list_language_models_all_params.
+ _service.enable_retries()
+ self.test_list_language_models_all_params()
+
+ # Disable retries and run test_list_language_models_all_params.
+ _service.disable_retries()
+ self.test_list_language_models_all_params()
+
+ @responses.activate
+ def test_list_language_models_required_params(self):
+ """
+ test_list_language_models_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations')
+ mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_language_models()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_language_models_required_params_with_retries(self):
+ # Enable retries and run test_list_language_models_required_params.
+ _service.enable_retries()
+ self.test_list_language_models_required_params()
+
+ # Disable retries and run test_list_language_models_required_params.
+ _service.disable_retries()
+ self.test_list_language_models_required_params()
+
+
+class TestGetLanguageModel:
+ """
+ Test Class for get_language_model
+ """
+
+ @responses.activate
+ def test_get_language_model_all_params(self):
+ """
+ get_language_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
status=200,
- content_type='application/json')
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.get_language_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_language_model_all_params_with_retries(self):
+ # Enable retries and run test_get_language_model_all_params.
+ _service.enable_retries()
+ self.test_get_language_model_all_params()
+
+ # Disable retries and run test_get_language_model_all_params.
+ _service.disable_retries()
+ self.test_get_language_model_all_params()
- rsps.add(
+ @responses.activate
+ def test_get_language_model_value_error(self):
+ """
+ test_get_language_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "dialect": "dialect", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "error": "error", "warnings": "warnings"}'
+ responses.add(
responses.GET,
- get_corpora_url,
- body='{"get response": "yep"}',
+ url,
+ body=mock_response,
+ content_type='application/json',
status=200,
- content_type='application/json')
+ )
- rsps.add(
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_language_model(**req_copy)
+
+ def test_get_language_model_value_error_with_retries(self):
+ # Enable retries and run test_get_language_model_value_error.
+ _service.enable_retries()
+ self.test_get_language_model_value_error()
+
+ # Disable retries and run test_get_language_model_value_error.
+ _service.disable_retries()
+ self.test_get_language_model_value_error()
+
+
+class TestDeleteLanguageModel:
+ """
+ Test Class for delete_language_model
+ """
+
+ @responses.activate
+ def test_delete_language_model_all_params(self):
+ """
+ delete_language_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_language_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_language_model_all_params_with_retries(self):
+ # Enable retries and run test_delete_language_model_all_params.
+ _service.enable_retries()
+ self.test_delete_language_model_all_params()
+
+ # Disable retries and run test_delete_language_model_all_params.
+ _service.disable_retries()
+ self.test_delete_language_model_all_params()
+
+ @responses.activate
+ def test_delete_language_model_value_error(self):
+ """
+ test_delete_language_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ responses.add(
responses.DELETE,
- get_corpora_url,
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
-
- speech_to_text.list_corpora(customization_id='customid')
-
- file_path = '../../resources/speech_to_text/corpus-short-1.txt'
- full_path = os.path.join(os.path.dirname(__file__), file_path)
- with open(full_path) as corpus_file:
- speech_to_text.add_corpus(
- customization_id='customid',
- corpus_name="corpus",
- corpus_file=corpus_file)
-
- speech_to_text.get_corpus(
- customization_id='customid', corpus_name='corpus')
-
- speech_to_text.delete_corpus(
- customization_id='customid', corpus_name='corpus')
-
-
-@responses.activate
-def test_custom_words():
- words_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/words'
- word_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/words/{1}'
-
- responses.add(
- responses.PUT,
- word_url.format('custid', 'IEEE'),
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.PUT,
- word_url.format('custid', 'wordname'),
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.DELETE,
- word_url.format('custid', 'IEEE'),
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.DELETE,
- word_url.format('custid', 'wordname'),
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- word_url.format('custid', 'IEEE'),
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- word_url.format('custid', 'wordname'),
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.POST,
- words_url.format('custid'),
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- words_url.format('custid'),
- body='{"get response": "yep"}',
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
-
- custom_word = CustomWord(
- word="IEEE", sounds_like=["i triple e"], display_as="IEEE")
-
- speech_to_text.add_word(
- customization_id='custid',
- word_name="IEEE",
- sounds_like=["i triple e"],
- display_as="IEEE")
-
- speech_to_text.delete_word(customization_id='custid', word_name="wordname")
-
- speech_to_text.delete_word(customization_id='custid', word_name='IEEE')
-
- custom_words = [custom_word, custom_word, custom_word]
- speech_to_text.add_words(
- customization_id='custid',
- words=custom_words)
-
- speech_to_text.get_word(customization_id='custid', word_name="IEEE")
-
- speech_to_text.get_word(customization_id='custid', word_name='wordname')
-
- speech_to_text.list_words(customization_id='custid')
- speech_to_text.list_words(customization_id='custid', sort='alphabetical')
-
- speech_to_text.list_words(customization_id='custid', word_type='all')
-
- assert len(responses.calls) == 9
-
-
-@responses.activate
-def test_custom_audio_resources():
- url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations/{0}/audio/{1}'
-
- responses.add(
- responses.POST,
- url.format('custid', 'hiee'),
- body='{"post response": "done"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.DELETE,
- url.format('custid', 'hiee'),
- body='{"delete response": "done"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- url.format('custid', 'hiee'),
- body='{"get response": "done"}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations/custid/audio',
- body='{"get response all": "done"}',
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file:
- speech_to_text.add_audio(
- customization_id='custid',
- audio_name="hiee",
- audio_resource=audio_file,
- content_type="application/json")
- assert responses.calls[0].response.json() == {"post response": "done"}
-
- speech_to_text.delete_audio('custid', 'hiee')
- assert responses.calls[1].response.json() == {"delete response": "done"}
-
- speech_to_text.get_audio('custid', 'hiee')
- assert responses.calls[2].response.json() == {"get response": "done"}
-
- speech_to_text.list_audio('custid')
- assert responses.calls[3].response.json() == {"get response all": "done"}
-
-@responses.activate
-def test_delete_user_data():
- url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/user_data'
- responses.add(
- responses.DELETE,
- url,
- body='{"description": "success" }',
- status=204,
- content_type='application_json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(username="username", password="password")
- response = speech_to_text.delete_user_data('id').get_result()
- assert response is None
- assert len(responses.calls) == 1
-
-@responses.activate
-def test_custom_grammars():
- url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/grammars/{1}'
-
- responses.add(
- responses.POST,
- url.format('customization_id', 'grammar_name'),
- body='{}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.DELETE,
- url.format('customization_id', 'grammar_name'),
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- url.format('customization_id', 'grammar_name'),
- body='{"status": "analyzed", "name": "test-add-grammar-python", "out_of_vocabulary_words": 0}',
- status=200,
- content_type='application/json')
-
- responses.add(
- responses.GET,
- url='https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/customization_id/grammars',
- body='{"grammars":[{"status": "analyzed", "name": "test-add-grammar-python", "out_of_vocabulary_words": 0}]}',
- status=200,
- content_type='application/json')
-
- speech_to_text = ibm_watson.SpeechToTextV1(
- username="username", password="password")
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/confirm-grammar.xml'), 'rb') as grammar_file:
- speech_to_text.add_grammar(
- "customization_id",
- grammar_name='grammar_name',
- grammar_file=grammar_file,
- content_type='application/srgs+xml',
- allow_overwrite=True)
- assert responses.calls[0].response.json() == {}
-
- speech_to_text.delete_grammar('customization_id', 'grammar_name')
- assert responses.calls[1].response.status_code == 200
-
- speech_to_text.get_grammar('customization_id', 'grammar_name')
- assert responses.calls[2].response.json() == {"status": "analyzed", "name": "test-add-grammar-python", "out_of_vocabulary_words": 0}
-
- speech_to_text.list_grammars('customization_id')
- assert responses.calls[3].response.json() == {"grammars":[{"status": "analyzed", "name": "test-add-grammar-python", "out_of_vocabulary_words": 0}]}
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_language_model(**req_copy)
+
+ def test_delete_language_model_value_error_with_retries(self):
+ # Enable retries and run test_delete_language_model_value_error.
+ _service.enable_retries()
+ self.test_delete_language_model_value_error()
+
+ # Disable retries and run test_delete_language_model_value_error.
+ _service.disable_retries()
+ self.test_delete_language_model_value_error()
+
+
+class TestTrainLanguageModel:
+ """
+ Test Class for train_language_model
+ """
+
+ @responses.activate
+ def test_train_language_model_all_params(self):
+ """
+ train_language_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/train')
+ mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word_type_to_add = 'all'
+ customization_weight = 72.5
+ strict = True
+ force = False
+
+ # Invoke method
+ response = _service.train_language_model(
+ customization_id,
+ word_type_to_add=word_type_to_add,
+ customization_weight=customization_weight,
+ strict=strict,
+ force=force,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'word_type_to_add={}'.format(word_type_to_add) in query_string
+ assert 'customization_weight={}'.format(customization_weight) in query_string
+ assert 'strict={}'.format('true' if strict else 'false') in query_string
+ assert 'force={}'.format('true' if force else 'false') in query_string
+
+ def test_train_language_model_all_params_with_retries(self):
+ # Enable retries and run test_train_language_model_all_params.
+ _service.enable_retries()
+ self.test_train_language_model_all_params()
+
+ # Disable retries and run test_train_language_model_all_params.
+ _service.disable_retries()
+ self.test_train_language_model_all_params()
+
+ @responses.activate
+ def test_train_language_model_required_params(self):
+ """
+ test_train_language_model_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/train')
+ mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.train_language_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_train_language_model_required_params_with_retries(self):
+ # Enable retries and run test_train_language_model_required_params.
+ _service.enable_retries()
+ self.test_train_language_model_required_params()
+
+ # Disable retries and run test_train_language_model_required_params.
+ _service.disable_retries()
+ self.test_train_language_model_required_params()
+
+ @responses.activate
+ def test_train_language_model_value_error(self):
+ """
+ test_train_language_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/train')
+ mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.train_language_model(**req_copy)
+
+ def test_train_language_model_value_error_with_retries(self):
+ # Enable retries and run test_train_language_model_value_error.
+ _service.enable_retries()
+ self.test_train_language_model_value_error()
+
+ # Disable retries and run test_train_language_model_value_error.
+ _service.disable_retries()
+ self.test_train_language_model_value_error()
+
+
+class TestResetLanguageModel:
+ """
+ Test Class for reset_language_model
+ """
+
+ @responses.activate
+ def test_reset_language_model_all_params(self):
+ """
+ reset_language_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/reset')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.reset_language_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_reset_language_model_all_params_with_retries(self):
+ # Enable retries and run test_reset_language_model_all_params.
+ _service.enable_retries()
+ self.test_reset_language_model_all_params()
+
+ # Disable retries and run test_reset_language_model_all_params.
+ _service.disable_retries()
+ self.test_reset_language_model_all_params()
+
+ @responses.activate
+ def test_reset_language_model_value_error(self):
+ """
+ test_reset_language_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/reset')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.reset_language_model(**req_copy)
+
+ def test_reset_language_model_value_error_with_retries(self):
+ # Enable retries and run test_reset_language_model_value_error.
+ _service.enable_retries()
+ self.test_reset_language_model_value_error()
+
+ # Disable retries and run test_reset_language_model_value_error.
+ _service.disable_retries()
+ self.test_reset_language_model_value_error()
+
+
+class TestUpgradeLanguageModel:
+ """
+ Test Class for upgrade_language_model
+ """
+
+ @responses.activate
+ def test_upgrade_language_model_all_params(self):
+ """
+ upgrade_language_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/upgrade_model')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.upgrade_language_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_upgrade_language_model_all_params_with_retries(self):
+ # Enable retries and run test_upgrade_language_model_all_params.
+ _service.enable_retries()
+ self.test_upgrade_language_model_all_params()
+
+ # Disable retries and run test_upgrade_language_model_all_params.
+ _service.disable_retries()
+ self.test_upgrade_language_model_all_params()
+
+ @responses.activate
+ def test_upgrade_language_model_value_error(self):
+ """
+ test_upgrade_language_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/upgrade_model')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.upgrade_language_model(**req_copy)
+
+ def test_upgrade_language_model_value_error_with_retries(self):
+ # Enable retries and run test_upgrade_language_model_value_error.
+ _service.enable_retries()
+ self.test_upgrade_language_model_value_error()
+
+ # Disable retries and run test_upgrade_language_model_value_error.
+ _service.disable_retries()
+ self.test_upgrade_language_model_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: CustomLanguageModels
+##############################################################################
+
+##############################################################################
+# Start of Service: CustomCorpora
+##############################################################################
+# region
+
+
+class TestListCorpora:
+ """
+ Test Class for list_corpora
+ """
+
+ @responses.activate
+ def test_list_corpora_all_params(self):
+ """
+ list_corpora()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/corpora')
+ mock_response = '{"corpora": [{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.list_corpora(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_corpora_all_params_with_retries(self):
+ # Enable retries and run test_list_corpora_all_params.
+ _service.enable_retries()
+ self.test_list_corpora_all_params()
+
+ # Disable retries and run test_list_corpora_all_params.
+ _service.disable_retries()
+ self.test_list_corpora_all_params()
+
+ @responses.activate
+ def test_list_corpora_value_error(self):
+ """
+ test_list_corpora_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/corpora')
+ mock_response = '{"corpora": [{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_corpora(**req_copy)
+
+ def test_list_corpora_value_error_with_retries(self):
+ # Enable retries and run test_list_corpora_value_error.
+ _service.enable_retries()
+ self.test_list_corpora_value_error()
+
+ # Disable retries and run test_list_corpora_value_error.
+ _service.disable_retries()
+ self.test_list_corpora_value_error()
+
+
+class TestAddCorpus:
+ """
+ Test Class for add_corpus
+ """
+
+ @responses.activate
+ def test_add_corpus_all_params(self):
+ """
+ add_corpus()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/corpora/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ corpus_name = 'testString'
+ corpus_file = io.BytesIO(b'This is a mock file.').getvalue()
+ allow_overwrite = False
+
+ # Invoke method
+ response = _service.add_corpus(
+ customization_id,
+ corpus_name,
+ corpus_file,
+ allow_overwrite=allow_overwrite,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'allow_overwrite={}'.format('true' if allow_overwrite else 'false') in query_string
+
+ def test_add_corpus_all_params_with_retries(self):
+ # Enable retries and run test_add_corpus_all_params.
+ _service.enable_retries()
+ self.test_add_corpus_all_params()
+
+ # Disable retries and run test_add_corpus_all_params.
+ _service.disable_retries()
+ self.test_add_corpus_all_params()
+
+ @responses.activate
+ def test_add_corpus_required_params(self):
+ """
+ test_add_corpus_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/corpora/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ corpus_name = 'testString'
+ corpus_file = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.add_corpus(
+ customization_id,
+ corpus_name,
+ corpus_file,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_add_corpus_required_params_with_retries(self):
+ # Enable retries and run test_add_corpus_required_params.
+ _service.enable_retries()
+ self.test_add_corpus_required_params()
+
+ # Disable retries and run test_add_corpus_required_params.
+ _service.disable_retries()
+ self.test_add_corpus_required_params()
+
+ @responses.activate
+ def test_add_corpus_value_error(self):
+ """
+ test_add_corpus_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/corpora/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ corpus_name = 'testString'
+ corpus_file = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "corpus_name": corpus_name,
+ "corpus_file": corpus_file,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.add_corpus(**req_copy)
+
+ def test_add_corpus_value_error_with_retries(self):
+ # Enable retries and run test_add_corpus_value_error.
+ _service.enable_retries()
+ self.test_add_corpus_value_error()
+
+ # Disable retries and run test_add_corpus_value_error.
+ _service.disable_retries()
+ self.test_add_corpus_value_error()
+
+
+class TestGetCorpus:
+ """
+ Test Class for get_corpus
+ """
+
+ @responses.activate
+ def test_get_corpus_all_params(self):
+ """
+ get_corpus()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/corpora/testString')
+ mock_response = '{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ corpus_name = 'testString'
+
+ # Invoke method
+ response = _service.get_corpus(
+ customization_id,
+ corpus_name,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_corpus_all_params_with_retries(self):
+ # Enable retries and run test_get_corpus_all_params.
+ _service.enable_retries()
+ self.test_get_corpus_all_params()
+
+ # Disable retries and run test_get_corpus_all_params.
+ _service.disable_retries()
+ self.test_get_corpus_all_params()
+
+ @responses.activate
+ def test_get_corpus_value_error(self):
+ """
+ test_get_corpus_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/corpora/testString')
+ mock_response = '{"name": "name", "total_words": 11, "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ corpus_name = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "corpus_name": corpus_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_corpus(**req_copy)
+
+ def test_get_corpus_value_error_with_retries(self):
+ # Enable retries and run test_get_corpus_value_error.
+ _service.enable_retries()
+ self.test_get_corpus_value_error()
+
+ # Disable retries and run test_get_corpus_value_error.
+ _service.disable_retries()
+ self.test_get_corpus_value_error()
+
+
+class TestDeleteCorpus:
+ """
+ Test Class for delete_corpus
+ """
+
+ @responses.activate
+ def test_delete_corpus_all_params(self):
+ """
+ delete_corpus()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/corpora/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ corpus_name = 'testString'
+
+ # Invoke method
+ response = _service.delete_corpus(
+ customization_id,
+ corpus_name,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_corpus_all_params_with_retries(self):
+ # Enable retries and run test_delete_corpus_all_params.
+ _service.enable_retries()
+ self.test_delete_corpus_all_params()
+
+ # Disable retries and run test_delete_corpus_all_params.
+ _service.disable_retries()
+ self.test_delete_corpus_all_params()
+
+ @responses.activate
+ def test_delete_corpus_value_error(self):
+ """
+ test_delete_corpus_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/corpora/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ corpus_name = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "corpus_name": corpus_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_corpus(**req_copy)
+
+ def test_delete_corpus_value_error_with_retries(self):
+ # Enable retries and run test_delete_corpus_value_error.
+ _service.enable_retries()
+ self.test_delete_corpus_value_error()
+
+ # Disable retries and run test_delete_corpus_value_error.
+ _service.disable_retries()
+ self.test_delete_corpus_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: CustomCorpora
+##############################################################################
+
+##############################################################################
+# Start of Service: CustomWords
+##############################################################################
+# region
+
+
+class TestListWords:
+ """
+ Test Class for list_words
+ """
+
+ @responses.activate
+ def test_list_words_all_params(self):
+ """
+ list_words()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words')
+ mock_response = '{"words": [{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word_type = 'all'
+ sort = 'alphabetical'
+
+ # Invoke method
+ response = _service.list_words(
+ customization_id,
+ word_type=word_type,
+ sort=sort,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'word_type={}'.format(word_type) in query_string
+ assert 'sort={}'.format(sort) in query_string
+
+ def test_list_words_all_params_with_retries(self):
+ # Enable retries and run test_list_words_all_params.
+ _service.enable_retries()
+ self.test_list_words_all_params()
+
+ # Disable retries and run test_list_words_all_params.
+ _service.disable_retries()
+ self.test_list_words_all_params()
+
+ @responses.activate
+ def test_list_words_required_params(self):
+ """
+ test_list_words_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words')
+ mock_response = '{"words": [{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.list_words(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_words_required_params_with_retries(self):
+ # Enable retries and run test_list_words_required_params.
+ _service.enable_retries()
+ self.test_list_words_required_params()
+
+ # Disable retries and run test_list_words_required_params.
+ _service.disable_retries()
+ self.test_list_words_required_params()
+
+ @responses.activate
+ def test_list_words_value_error(self):
+ """
+ test_list_words_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words')
+ mock_response = '{"words": [{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_words(**req_copy)
+
+ def test_list_words_value_error_with_retries(self):
+ # Enable retries and run test_list_words_value_error.
+ _service.enable_retries()
+ self.test_list_words_value_error()
+
+ # Disable retries and run test_list_words_value_error.
+ _service.disable_retries()
+ self.test_list_words_value_error()
+
+
+class TestAddWords:
+ """
+ Test Class for add_words
+ """
+
+ @responses.activate
+ def test_add_words_all_params(self):
+ """
+ add_words()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Construct a dict representation of a CustomWord model
+ custom_word_model = {}
+ custom_word_model['word'] = 'testString'
+ custom_word_model['mapping_only'] = ['testString']
+ custom_word_model['sounds_like'] = ['testString']
+ custom_word_model['display_as'] = 'testString'
+
+ # Set up parameter values
+ customization_id = 'testString'
+ words = [custom_word_model]
+
+ # Invoke method
+ response = _service.add_words(
+ customization_id,
+ words,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['words'] == [custom_word_model]
+
+ def test_add_words_all_params_with_retries(self):
+ # Enable retries and run test_add_words_all_params.
+ _service.enable_retries()
+ self.test_add_words_all_params()
+
+ # Disable retries and run test_add_words_all_params.
+ _service.disable_retries()
+ self.test_add_words_all_params()
+
+ @responses.activate
+ def test_add_words_value_error(self):
+ """
+ test_add_words_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Construct a dict representation of a CustomWord model
+ custom_word_model = {}
+ custom_word_model['word'] = 'testString'
+ custom_word_model['mapping_only'] = ['testString']
+ custom_word_model['sounds_like'] = ['testString']
+ custom_word_model['display_as'] = 'testString'
+
+ # Set up parameter values
+ customization_id = 'testString'
+ words = [custom_word_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "words": words,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.add_words(**req_copy)
+
+ def test_add_words_value_error_with_retries(self):
+ # Enable retries and run test_add_words_value_error.
+ _service.enable_retries()
+ self.test_add_words_value_error()
+
+ # Disable retries and run test_add_words_value_error.
+ _service.disable_retries()
+ self.test_add_words_value_error()
+
+
+class TestAddWord:
+ """
+ Test Class for add_word
+ """
+
+ @responses.activate
+ def test_add_word_all_params(self):
+ """
+ add_word()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ responses.add(
+ responses.PUT,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word_name = 'testString'
+ word = 'testString'
+ mapping_only = ['testString']
+ sounds_like = ['testString']
+ display_as = 'testString'
+
+ # Invoke method
+ response = _service.add_word(
+ customization_id,
+ word_name,
+ word=word,
+ mapping_only=mapping_only,
+ sounds_like=sounds_like,
+ display_as=display_as,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['word'] == 'testString'
+ assert req_body['mapping_only'] == ['testString']
+ assert req_body['sounds_like'] == ['testString']
+ assert req_body['display_as'] == 'testString'
+
+ def test_add_word_all_params_with_retries(self):
+ # Enable retries and run test_add_word_all_params.
+ _service.enable_retries()
+ self.test_add_word_all_params()
+
+ # Disable retries and run test_add_word_all_params.
+ _service.disable_retries()
+ self.test_add_word_all_params()
+
+ @responses.activate
+ def test_add_word_value_error(self):
+ """
+ test_add_word_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ responses.add(
+ responses.PUT,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word_name = 'testString'
+ word = 'testString'
+ mapping_only = ['testString']
+ sounds_like = ['testString']
+ display_as = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "word_name": word_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.add_word(**req_copy)
+
+ def test_add_word_value_error_with_retries(self):
+ # Enable retries and run test_add_word_value_error.
+ _service.enable_retries()
+ self.test_add_word_value_error()
+
+ # Disable retries and run test_add_word_value_error.
+ _service.disable_retries()
+ self.test_add_word_value_error()
+
+
+class TestGetWord:
+ """
+ Test Class for get_word
+ """
+
+ @responses.activate
+ def test_get_word_all_params(self):
+ """
+ get_word()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ mock_response = '{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word_name = 'testString'
+
+ # Invoke method
+ response = _service.get_word(
+ customization_id,
+ word_name,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_word_all_params_with_retries(self):
+ # Enable retries and run test_get_word_all_params.
+ _service.enable_retries()
+ self.test_get_word_all_params()
+
+ # Disable retries and run test_get_word_all_params.
+ _service.disable_retries()
+ self.test_get_word_all_params()
+
+ @responses.activate
+ def test_get_word_value_error(self):
+ """
+ test_get_word_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ mock_response = '{"word": "word", "mapping_only": ["mapping_only"], "sounds_like": ["sounds_like"], "display_as": "display_as", "count": 5, "source": ["source"], "error": [{"element": "element"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word_name = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "word_name": word_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_word(**req_copy)
+
+ def test_get_word_value_error_with_retries(self):
+ # Enable retries and run test_get_word_value_error.
+ _service.enable_retries()
+ self.test_get_word_value_error()
+
+ # Disable retries and run test_get_word_value_error.
+ _service.disable_retries()
+ self.test_get_word_value_error()
+
+
+class TestDeleteWord:
+ """
+ Test Class for delete_word
+ """
+
+ @responses.activate
+ def test_delete_word_all_params(self):
+ """
+ delete_word()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word_name = 'testString'
+
+ # Invoke method
+ response = _service.delete_word(
+ customization_id,
+ word_name,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_word_all_params_with_retries(self):
+ # Enable retries and run test_delete_word_all_params.
+ _service.enable_retries()
+ self.test_delete_word_all_params()
+
+ # Disable retries and run test_delete_word_all_params.
+ _service.disable_retries()
+ self.test_delete_word_all_params()
+
+ @responses.activate
+ def test_delete_word_value_error(self):
+ """
+ test_delete_word_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word_name = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "word_name": word_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_word(**req_copy)
+
+ def test_delete_word_value_error_with_retries(self):
+ # Enable retries and run test_delete_word_value_error.
+ _service.enable_retries()
+ self.test_delete_word_value_error()
+
+ # Disable retries and run test_delete_word_value_error.
+ _service.disable_retries()
+ self.test_delete_word_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: CustomWords
+##############################################################################
+
+##############################################################################
+# Start of Service: CustomGrammars
+##############################################################################
+# region
+
+
+class TestListGrammars:
+ """
+ Test Class for list_grammars
+ """
+
+ @responses.activate
+ def test_list_grammars_all_params(self):
+ """
+ list_grammars()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/grammars')
+ mock_response = '{"grammars": [{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.list_grammars(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_grammars_all_params_with_retries(self):
+ # Enable retries and run test_list_grammars_all_params.
+ _service.enable_retries()
+ self.test_list_grammars_all_params()
+
+ # Disable retries and run test_list_grammars_all_params.
+ _service.disable_retries()
+ self.test_list_grammars_all_params()
+
+ @responses.activate
+ def test_list_grammars_value_error(self):
+ """
+ test_list_grammars_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/grammars')
+ mock_response = '{"grammars": [{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_grammars(**req_copy)
+
+ def test_list_grammars_value_error_with_retries(self):
+ # Enable retries and run test_list_grammars_value_error.
+ _service.enable_retries()
+ self.test_list_grammars_value_error()
+
+ # Disable retries and run test_list_grammars_value_error.
+ _service.disable_retries()
+ self.test_list_grammars_value_error()
+
+
+class TestAddGrammar:
+ """
+ Test Class for add_grammar
+ """
+
+ @responses.activate
+ def test_add_grammar_all_params(self):
+ """
+ add_grammar()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/grammars/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ grammar_name = 'testString'
+ grammar_file = io.BytesIO(b'This is a mock file.').getvalue()
+ content_type = 'application/srgs'
+ allow_overwrite = False
+
+ # Invoke method
+ response = _service.add_grammar(
+ customization_id,
+ grammar_name,
+ grammar_file,
+ content_type,
+ allow_overwrite=allow_overwrite,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'allow_overwrite={}'.format('true' if allow_overwrite else 'false') in query_string
+ # Validate body params
+
+ def test_add_grammar_all_params_with_retries(self):
+ # Enable retries and run test_add_grammar_all_params.
+ _service.enable_retries()
+ self.test_add_grammar_all_params()
+
+ # Disable retries and run test_add_grammar_all_params.
+ _service.disable_retries()
+ self.test_add_grammar_all_params()
+
+ @responses.activate
+ def test_add_grammar_required_params(self):
+ """
+ test_add_grammar_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/grammars/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ grammar_name = 'testString'
+ grammar_file = io.BytesIO(b'This is a mock file.').getvalue()
+ content_type = 'application/srgs'
+
+ # Invoke method
+ response = _service.add_grammar(
+ customization_id,
+ grammar_name,
+ grammar_file,
+ content_type,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+
+ def test_add_grammar_required_params_with_retries(self):
+ # Enable retries and run test_add_grammar_required_params.
+ _service.enable_retries()
+ self.test_add_grammar_required_params()
+
+ # Disable retries and run test_add_grammar_required_params.
+ _service.disable_retries()
+ self.test_add_grammar_required_params()
+
+ @responses.activate
+ def test_add_grammar_value_error(self):
+ """
+ test_add_grammar_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/grammars/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ grammar_name = 'testString'
+ grammar_file = io.BytesIO(b'This is a mock file.').getvalue()
+ content_type = 'application/srgs'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "grammar_name": grammar_name,
+ "grammar_file": grammar_file,
+ "content_type": content_type,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.add_grammar(**req_copy)
+
+ def test_add_grammar_value_error_with_retries(self):
+ # Enable retries and run test_add_grammar_value_error.
+ _service.enable_retries()
+ self.test_add_grammar_value_error()
+
+ # Disable retries and run test_add_grammar_value_error.
+ _service.disable_retries()
+ self.test_add_grammar_value_error()
+
+
+class TestGetGrammar:
+ """
+ Test Class for get_grammar
+ """
+
+ @responses.activate
+ def test_get_grammar_all_params(self):
+ """
+ get_grammar()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/grammars/testString')
+ mock_response = '{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ grammar_name = 'testString'
+
+ # Invoke method
+ response = _service.get_grammar(
+ customization_id,
+ grammar_name,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_grammar_all_params_with_retries(self):
+ # Enable retries and run test_get_grammar_all_params.
+ _service.enable_retries()
+ self.test_get_grammar_all_params()
+
+ # Disable retries and run test_get_grammar_all_params.
+ _service.disable_retries()
+ self.test_get_grammar_all_params()
+
+ @responses.activate
+ def test_get_grammar_value_error(self):
+ """
+ test_get_grammar_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/grammars/testString')
+ mock_response = '{"name": "name", "out_of_vocabulary_words": 23, "status": "analyzed", "error": "error"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ grammar_name = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "grammar_name": grammar_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_grammar(**req_copy)
+
+ def test_get_grammar_value_error_with_retries(self):
+ # Enable retries and run test_get_grammar_value_error.
+ _service.enable_retries()
+ self.test_get_grammar_value_error()
+
+ # Disable retries and run test_get_grammar_value_error.
+ _service.disable_retries()
+ self.test_get_grammar_value_error()
+
+
+class TestDeleteGrammar:
+ """
+ Test Class for delete_grammar
+ """
+
+ @responses.activate
+ def test_delete_grammar_all_params(self):
+ """
+ delete_grammar()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/grammars/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ grammar_name = 'testString'
+
+ # Invoke method
+ response = _service.delete_grammar(
+ customization_id,
+ grammar_name,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_grammar_all_params_with_retries(self):
+ # Enable retries and run test_delete_grammar_all_params.
+ _service.enable_retries()
+ self.test_delete_grammar_all_params()
+
+ # Disable retries and run test_delete_grammar_all_params.
+ _service.disable_retries()
+ self.test_delete_grammar_all_params()
+
+ @responses.activate
+ def test_delete_grammar_value_error(self):
+ """
+ test_delete_grammar_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/grammars/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ grammar_name = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "grammar_name": grammar_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_grammar(**req_copy)
+
+ def test_delete_grammar_value_error_with_retries(self):
+ # Enable retries and run test_delete_grammar_value_error.
+ _service.enable_retries()
+ self.test_delete_grammar_value_error()
+
+ # Disable retries and run test_delete_grammar_value_error.
+ _service.disable_retries()
+ self.test_delete_grammar_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: CustomGrammars
+##############################################################################
+
+##############################################################################
+# Start of Service: CustomAcousticModels
+##############################################################################
+# region
+
+
+class TestCreateAcousticModel:
+ """
+ Test Class for create_acoustic_model
+ """
+
+ @responses.activate
+ def test_create_acoustic_model_all_params(self):
+ """
+ create_acoustic_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations')
+ mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ name = 'testString'
+ base_model_name = 'ar-MS_BroadbandModel'
+ description = 'testString'
+
+ # Invoke method
+ response = _service.create_acoustic_model(
+ name,
+ base_model_name,
+ description=description,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['base_model_name'] == 'ar-MS_BroadbandModel'
+ assert req_body['description'] == 'testString'
+
+ def test_create_acoustic_model_all_params_with_retries(self):
+ # Enable retries and run test_create_acoustic_model_all_params.
+ _service.enable_retries()
+ self.test_create_acoustic_model_all_params()
+
+ # Disable retries and run test_create_acoustic_model_all_params.
+ _service.disable_retries()
+ self.test_create_acoustic_model_all_params()
+
+ @responses.activate
+ def test_create_acoustic_model_value_error(self):
+ """
+ test_create_acoustic_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations')
+ mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ name = 'testString'
+ base_model_name = 'ar-MS_BroadbandModel'
+ description = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "name": name,
+ "base_model_name": base_model_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_acoustic_model(**req_copy)
+
+ def test_create_acoustic_model_value_error_with_retries(self):
+ # Enable retries and run test_create_acoustic_model_value_error.
+ _service.enable_retries()
+ self.test_create_acoustic_model_value_error()
+
+ # Disable retries and run test_create_acoustic_model_value_error.
+ _service.disable_retries()
+ self.test_create_acoustic_model_value_error()
+
+
+class TestListAcousticModels:
+ """
+ Test Class for list_acoustic_models
+ """
+
+ @responses.activate
+ def test_list_acoustic_models_all_params(self):
+ """
+ list_acoustic_models()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations')
+ mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ language = 'ar-MS'
+
+ # Invoke method
+ response = _service.list_acoustic_models(
+ language=language,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'language={}'.format(language) in query_string
+
+ def test_list_acoustic_models_all_params_with_retries(self):
+ # Enable retries and run test_list_acoustic_models_all_params.
+ _service.enable_retries()
+ self.test_list_acoustic_models_all_params()
+
+ # Disable retries and run test_list_acoustic_models_all_params.
+ _service.disable_retries()
+ self.test_list_acoustic_models_all_params()
+
+ @responses.activate
+ def test_list_acoustic_models_required_params(self):
+ """
+ test_list_acoustic_models_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations')
+ mock_response = '{"customizations": [{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_acoustic_models()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_acoustic_models_required_params_with_retries(self):
+ # Enable retries and run test_list_acoustic_models_required_params.
+ _service.enable_retries()
+ self.test_list_acoustic_models_required_params()
+
+ # Disable retries and run test_list_acoustic_models_required_params.
+ _service.disable_retries()
+ self.test_list_acoustic_models_required_params()
+
+
+class TestGetAcousticModel:
+ """
+ Test Class for get_acoustic_model
+ """
+
+ @responses.activate
+ def test_get_acoustic_model_all_params(self):
+ """
+ get_acoustic_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString')
+ mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.get_acoustic_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_acoustic_model_all_params_with_retries(self):
+ # Enable retries and run test_get_acoustic_model_all_params.
+ _service.enable_retries()
+ self.test_get_acoustic_model_all_params()
+
+ # Disable retries and run test_get_acoustic_model_all_params.
+ _service.disable_retries()
+ self.test_get_acoustic_model_all_params()
+
+ @responses.activate
+ def test_get_acoustic_model_value_error(self):
+ """
+ test_get_acoustic_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString')
+ mock_response = '{"customization_id": "customization_id", "created": "created", "updated": "updated", "language": "language", "versions": ["versions"], "owner": "owner", "name": "name", "description": "description", "base_model_name": "base_model_name", "status": "pending", "progress": 8, "warnings": "warnings"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_acoustic_model(**req_copy)
+
+ def test_get_acoustic_model_value_error_with_retries(self):
+ # Enable retries and run test_get_acoustic_model_value_error.
+ _service.enable_retries()
+ self.test_get_acoustic_model_value_error()
+
+ # Disable retries and run test_get_acoustic_model_value_error.
+ _service.disable_retries()
+ self.test_get_acoustic_model_value_error()
+
+
+class TestDeleteAcousticModel:
+ """
+ Test Class for delete_acoustic_model
+ """
+
+ @responses.activate
+ def test_delete_acoustic_model_all_params(self):
+ """
+ delete_acoustic_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_acoustic_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_acoustic_model_all_params_with_retries(self):
+ # Enable retries and run test_delete_acoustic_model_all_params.
+ _service.enable_retries()
+ self.test_delete_acoustic_model_all_params()
+
+ # Disable retries and run test_delete_acoustic_model_all_params.
+ _service.disable_retries()
+ self.test_delete_acoustic_model_all_params()
+
+ @responses.activate
+ def test_delete_acoustic_model_value_error(self):
+ """
+ test_delete_acoustic_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_acoustic_model(**req_copy)
+
+ def test_delete_acoustic_model_value_error_with_retries(self):
+ # Enable retries and run test_delete_acoustic_model_value_error.
+ _service.enable_retries()
+ self.test_delete_acoustic_model_value_error()
+
+ # Disable retries and run test_delete_acoustic_model_value_error.
+ _service.disable_retries()
+ self.test_delete_acoustic_model_value_error()
+
+
+class TestTrainAcousticModel:
+ """
+ Test Class for train_acoustic_model
+ """
+
+ @responses.activate
+ def test_train_acoustic_model_all_params(self):
+ """
+ train_acoustic_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/train')
+ mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ custom_language_model_id = 'testString'
+ strict = True
+
+ # Invoke method
+ response = _service.train_acoustic_model(
+ customization_id,
+ custom_language_model_id=custom_language_model_id,
+ strict=strict,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'custom_language_model_id={}'.format(custom_language_model_id) in query_string
+ assert 'strict={}'.format('true' if strict else 'false') in query_string
+
+ def test_train_acoustic_model_all_params_with_retries(self):
+ # Enable retries and run test_train_acoustic_model_all_params.
+ _service.enable_retries()
+ self.test_train_acoustic_model_all_params()
+
+ # Disable retries and run test_train_acoustic_model_all_params.
+ _service.disable_retries()
+ self.test_train_acoustic_model_all_params()
+
+ @responses.activate
+ def test_train_acoustic_model_required_params(self):
+ """
+ test_train_acoustic_model_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/train')
+ mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.train_acoustic_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_train_acoustic_model_required_params_with_retries(self):
+ # Enable retries and run test_train_acoustic_model_required_params.
+ _service.enable_retries()
+ self.test_train_acoustic_model_required_params()
+
+ # Disable retries and run test_train_acoustic_model_required_params.
+ _service.disable_retries()
+ self.test_train_acoustic_model_required_params()
+
+ @responses.activate
+ def test_train_acoustic_model_value_error(self):
+ """
+ test_train_acoustic_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/train')
+ mock_response = '{"warnings": [{"code": "invalid_audio_files", "message": "message"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.train_acoustic_model(**req_copy)
+
+ def test_train_acoustic_model_value_error_with_retries(self):
+ # Enable retries and run test_train_acoustic_model_value_error.
+ _service.enable_retries()
+ self.test_train_acoustic_model_value_error()
+
+ # Disable retries and run test_train_acoustic_model_value_error.
+ _service.disable_retries()
+ self.test_train_acoustic_model_value_error()
+
+
+class TestResetAcousticModel:
+ """
+ Test Class for reset_acoustic_model
+ """
+
+ @responses.activate
+ def test_reset_acoustic_model_all_params(self):
+ """
+ reset_acoustic_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/reset')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.reset_acoustic_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_reset_acoustic_model_all_params_with_retries(self):
+ # Enable retries and run test_reset_acoustic_model_all_params.
+ _service.enable_retries()
+ self.test_reset_acoustic_model_all_params()
+
+ # Disable retries and run test_reset_acoustic_model_all_params.
+ _service.disable_retries()
+ self.test_reset_acoustic_model_all_params()
+
+ @responses.activate
+ def test_reset_acoustic_model_value_error(self):
+ """
+ test_reset_acoustic_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/reset')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.reset_acoustic_model(**req_copy)
+
+ def test_reset_acoustic_model_value_error_with_retries(self):
+ # Enable retries and run test_reset_acoustic_model_value_error.
+ _service.enable_retries()
+ self.test_reset_acoustic_model_value_error()
+
+ # Disable retries and run test_reset_acoustic_model_value_error.
+ _service.disable_retries()
+ self.test_reset_acoustic_model_value_error()
+
+
+class TestUpgradeAcousticModel:
+ """
+ Test Class for upgrade_acoustic_model
+ """
+
+ @responses.activate
+ def test_upgrade_acoustic_model_all_params(self):
+ """
+ upgrade_acoustic_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/upgrade_model')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ custom_language_model_id = 'testString'
+ force = False
+
+ # Invoke method
+ response = _service.upgrade_acoustic_model(
+ customization_id,
+ custom_language_model_id=custom_language_model_id,
+ force=force,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'custom_language_model_id={}'.format(custom_language_model_id) in query_string
+ assert 'force={}'.format('true' if force else 'false') in query_string
+
+ def test_upgrade_acoustic_model_all_params_with_retries(self):
+ # Enable retries and run test_upgrade_acoustic_model_all_params.
+ _service.enable_retries()
+ self.test_upgrade_acoustic_model_all_params()
+
+ # Disable retries and run test_upgrade_acoustic_model_all_params.
+ _service.disable_retries()
+ self.test_upgrade_acoustic_model_all_params()
+
+ @responses.activate
+ def test_upgrade_acoustic_model_required_params(self):
+ """
+ test_upgrade_acoustic_model_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/upgrade_model')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.upgrade_acoustic_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_upgrade_acoustic_model_required_params_with_retries(self):
+ # Enable retries and run test_upgrade_acoustic_model_required_params.
+ _service.enable_retries()
+ self.test_upgrade_acoustic_model_required_params()
+
+ # Disable retries and run test_upgrade_acoustic_model_required_params.
+ _service.disable_retries()
+ self.test_upgrade_acoustic_model_required_params()
+
+ @responses.activate
+ def test_upgrade_acoustic_model_value_error(self):
+ """
+ test_upgrade_acoustic_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/upgrade_model')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.upgrade_acoustic_model(**req_copy)
+
+ def test_upgrade_acoustic_model_value_error_with_retries(self):
+ # Enable retries and run test_upgrade_acoustic_model_value_error.
+ _service.enable_retries()
+ self.test_upgrade_acoustic_model_value_error()
+
+ # Disable retries and run test_upgrade_acoustic_model_value_error.
+ _service.disable_retries()
+ self.test_upgrade_acoustic_model_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: CustomAcousticModels
+##############################################################################
+
+##############################################################################
+# Start of Service: CustomAudioResources
+##############################################################################
+# region
+
+
+class TestListAudio:
+ """
+ Test Class for list_audio
+ """
+
+ @responses.activate
+ def test_list_audio_all_params(self):
+ """
+ list_audio()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/audio')
+ mock_response = '{"total_minutes_of_audio": 22, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.list_audio(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_audio_all_params_with_retries(self):
+ # Enable retries and run test_list_audio_all_params.
+ _service.enable_retries()
+ self.test_list_audio_all_params()
+
+ # Disable retries and run test_list_audio_all_params.
+ _service.disable_retries()
+ self.test_list_audio_all_params()
+
+ @responses.activate
+ def test_list_audio_value_error(self):
+ """
+ test_list_audio_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/audio')
+ mock_response = '{"total_minutes_of_audio": 22, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_audio(**req_copy)
+
+ def test_list_audio_value_error_with_retries(self):
+ # Enable retries and run test_list_audio_value_error.
+ _service.enable_retries()
+ self.test_list_audio_value_error()
+
+ # Disable retries and run test_list_audio_value_error.
+ _service.disable_retries()
+ self.test_list_audio_value_error()
+
+
+class TestAddAudio:
+ """
+ Test Class for add_audio
+ """
+
+ @responses.activate
+ def test_add_audio_all_params(self):
+ """
+ add_audio()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ audio_name = 'testString'
+ audio_resource = io.BytesIO(b'This is a mock file.').getvalue()
+ content_type = 'application/zip'
+ contained_content_type = 'audio/alaw'
+ allow_overwrite = False
+
+ # Invoke method
+ response = _service.add_audio(
+ customization_id,
+ audio_name,
+ audio_resource,
+ content_type=content_type,
+ contained_content_type=contained_content_type,
+ allow_overwrite=allow_overwrite,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'allow_overwrite={}'.format('true' if allow_overwrite else 'false') in query_string
+ # Validate body params
+
+ def test_add_audio_all_params_with_retries(self):
+ # Enable retries and run test_add_audio_all_params.
+ _service.enable_retries()
+ self.test_add_audio_all_params()
+
+ # Disable retries and run test_add_audio_all_params.
+ _service.disable_retries()
+ self.test_add_audio_all_params()
+
+ @responses.activate
+ def test_add_audio_required_params(self):
+ """
+ test_add_audio_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ audio_name = 'testString'
+ audio_resource = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.add_audio(
+ customization_id,
+ audio_name,
+ audio_resource,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+
+ def test_add_audio_required_params_with_retries(self):
+ # Enable retries and run test_add_audio_required_params.
+ _service.enable_retries()
+ self.test_add_audio_required_params()
+
+ # Disable retries and run test_add_audio_required_params.
+ _service.disable_retries()
+ self.test_add_audio_required_params()
+
+ @responses.activate
+ def test_add_audio_value_error(self):
+ """
+ test_add_audio_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=201,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ audio_name = 'testString'
+ audio_resource = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "audio_name": audio_name,
+ "audio_resource": audio_resource,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.add_audio(**req_copy)
+
+ def test_add_audio_value_error_with_retries(self):
+ # Enable retries and run test_add_audio_value_error.
+ _service.enable_retries()
+ self.test_add_audio_value_error()
+
+ # Disable retries and run test_add_audio_value_error.
+ _service.disable_retries()
+ self.test_add_audio_value_error()
+
+
+class TestGetAudio:
+ """
+ Test Class for get_audio
+ """
+
+ @responses.activate
+ def test_get_audio_all_params(self):
+ """
+ get_audio()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString')
+ mock_response = '{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok", "container": {"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ audio_name = 'testString'
+
+ # Invoke method
+ response = _service.get_audio(
+ customization_id,
+ audio_name,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_audio_all_params_with_retries(self):
+ # Enable retries and run test_get_audio_all_params.
+ _service.enable_retries()
+ self.test_get_audio_all_params()
+
+ # Disable retries and run test_get_audio_all_params.
+ _service.disable_retries()
+ self.test_get_audio_all_params()
+
+ @responses.activate
+ def test_get_audio_value_error(self):
+ """
+ test_get_audio_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString')
+ mock_response = '{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok", "container": {"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}, "audio": [{"duration": 8, "name": "name", "details": {"type": "audio", "codec": "codec", "frequency": 9, "compression": "zip"}, "status": "ok"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ audio_name = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "audio_name": audio_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_audio(**req_copy)
+
+ def test_get_audio_value_error_with_retries(self):
+ # Enable retries and run test_get_audio_value_error.
+ _service.enable_retries()
+ self.test_get_audio_value_error()
+
+ # Disable retries and run test_get_audio_value_error.
+ _service.disable_retries()
+ self.test_get_audio_value_error()
+
+
+class TestDeleteAudio:
+ """
+ Test Class for delete_audio
+ """
+
+ @responses.activate
+ def test_delete_audio_all_params(self):
+ """
+ delete_audio()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ audio_name = 'testString'
+
+ # Invoke method
+ response = _service.delete_audio(
+ customization_id,
+ audio_name,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_delete_audio_all_params_with_retries(self):
+ # Enable retries and run test_delete_audio_all_params.
+ _service.enable_retries()
+ self.test_delete_audio_all_params()
+
+ # Disable retries and run test_delete_audio_all_params.
+ _service.disable_retries()
+ self.test_delete_audio_all_params()
+
+ @responses.activate
+ def test_delete_audio_value_error(self):
+ """
+ test_delete_audio_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/acoustic_customizations/testString/audio/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ audio_name = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "audio_name": audio_name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_audio(**req_copy)
+
+ def test_delete_audio_value_error_with_retries(self):
+ # Enable retries and run test_delete_audio_value_error.
+ _service.enable_retries()
+ self.test_delete_audio_value_error()
+
+ # Disable retries and run test_delete_audio_value_error.
+ _service.disable_retries()
+ self.test_delete_audio_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: CustomAudioResources
+##############################################################################
+
+##############################################################################
+# Start of Service: UserData
+##############################################################################
+# region
+
+
+class TestDeleteUserData:
+ """
+ Test Class for delete_user_data
+ """
+
+ @responses.activate
+ def test_delete_user_data_all_params(self):
+ """
+ delete_user_data()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_user_data(
+ customer_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'customer_id={}'.format(customer_id) in query_string
+
+ def test_delete_user_data_all_params_with_retries(self):
+ # Enable retries and run test_delete_user_data_all_params.
+ _service.enable_retries()
+ self.test_delete_user_data_all_params()
+
+ # Disable retries and run test_delete_user_data_all_params.
+ _service.disable_retries()
+ self.test_delete_user_data_all_params()
+
+ @responses.activate
+ def test_delete_user_data_value_error(self):
+ """
+ test_delete_user_data_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customer_id": customer_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_user_data(**req_copy)
+
+ def test_delete_user_data_value_error_with_retries(self):
+ # Enable retries and run test_delete_user_data_value_error.
+ _service.enable_retries()
+ self.test_delete_user_data_value_error()
+
+ # Disable retries and run test_delete_user_data_value_error.
+ _service.disable_retries()
+ self.test_delete_user_data_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: UserData
+##############################################################################
+
+##############################################################################
+# Start of Service: LanguageIdentification
+##############################################################################
+# region
+
+
+class TestDetectLanguage:
+ """
+ Test Class for detect_language
+ """
+
+ @responses.activate
+ def test_detect_language_all_params(self):
+ """
+ detect_language()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/detect_language')
+ mock_response = '{"results": [{"language_info": [{"confidence": 10, "language": "language", "timestamp": 9}]}], "result_index": 12}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ lid_confidence = 36.0
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+ content_type = 'application/octet-stream'
+
+ # Invoke method
+ response = _service.detect_language(
+ lid_confidence,
+ audio,
+ content_type=content_type,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ # Validate body params
+
+ def test_detect_language_all_params_with_retries(self):
+ # Enable retries and run test_detect_language_all_params.
+ _service.enable_retries()
+ self.test_detect_language_all_params()
+
+ # Disable retries and run test_detect_language_all_params.
+ _service.disable_retries()
+ self.test_detect_language_all_params()
+
+ @responses.activate
+ def test_detect_language_required_params(self):
+ """
+ test_detect_language_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/detect_language')
+ mock_response = '{"results": [{"language_info": [{"confidence": 10, "language": "language", "timestamp": 9}]}], "result_index": 12}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ lid_confidence = 36.0
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.detect_language(
+ lid_confidence,
+ audio,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ # Validate body params
+
+ def test_detect_language_required_params_with_retries(self):
+ # Enable retries and run test_detect_language_required_params.
+ _service.enable_retries()
+ self.test_detect_language_required_params()
+
+ # Disable retries and run test_detect_language_required_params.
+ _service.disable_retries()
+ self.test_detect_language_required_params()
+
+ @responses.activate
+ def test_detect_language_value_error(self):
+ """
+ test_detect_language_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/detect_language')
+ mock_response = '{"results": [{"language_info": [{"confidence": 10, "language": "language", "timestamp": 9}]}], "result_index": 12}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ lid_confidence = 36.0
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "lid_confidence": lid_confidence,
+ "audio": audio,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.detect_language(**req_copy)
+
+ def test_detect_language_value_error_with_retries(self):
+ # Enable retries and run test_detect_language_value_error.
+ _service.enable_retries()
+ self.test_detect_language_value_error()
+
+ # Disable retries and run test_detect_language_value_error.
+ _service.disable_retries()
+ self.test_detect_language_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: LanguageIdentification
+##############################################################################
+
+
+##############################################################################
+# Start of Model Tests
+##############################################################################
+# region
+
+
+class TestModel_AcousticModel:
+ """
+ Test Class for AcousticModel
+ """
+
+ def test_acoustic_model_serialization(self):
+ """
+ Test serialization/deserialization for AcousticModel
+ """
+
+ # Construct a json representation of a AcousticModel model
+ acoustic_model_model_json = {}
+ acoustic_model_model_json['customization_id'] = 'testString'
+ acoustic_model_model_json['created'] = 'testString'
+ acoustic_model_model_json['updated'] = 'testString'
+ acoustic_model_model_json['language'] = 'testString'
+ acoustic_model_model_json['versions'] = ['testString']
+ acoustic_model_model_json['owner'] = 'testString'
+ acoustic_model_model_json['name'] = 'testString'
+ acoustic_model_model_json['description'] = 'testString'
+ acoustic_model_model_json['base_model_name'] = 'testString'
+ acoustic_model_model_json['status'] = 'pending'
+ acoustic_model_model_json['progress'] = 38
+ acoustic_model_model_json['warnings'] = 'testString'
+
+ # Construct a model instance of AcousticModel by calling from_dict on the json representation
+ acoustic_model_model = AcousticModel.from_dict(acoustic_model_model_json)
+ assert acoustic_model_model != False
+
+ # Construct a model instance of AcousticModel by calling from_dict on the json representation
+ acoustic_model_model_dict = AcousticModel.from_dict(acoustic_model_model_json).__dict__
+ acoustic_model_model2 = AcousticModel(**acoustic_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert acoustic_model_model == acoustic_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ acoustic_model_model_json2 = acoustic_model_model.to_dict()
+ assert acoustic_model_model_json2 == acoustic_model_model_json
+
+
+class TestModel_AcousticModels:
+ """
+ Test Class for AcousticModels
+ """
+
+ def test_acoustic_models_serialization(self):
+ """
+ Test serialization/deserialization for AcousticModels
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ acoustic_model_model = {} # AcousticModel
+ acoustic_model_model['customization_id'] = 'testString'
+ acoustic_model_model['created'] = 'testString'
+ acoustic_model_model['updated'] = 'testString'
+ acoustic_model_model['language'] = 'testString'
+ acoustic_model_model['versions'] = ['testString']
+ acoustic_model_model['owner'] = 'testString'
+ acoustic_model_model['name'] = 'testString'
+ acoustic_model_model['description'] = 'testString'
+ acoustic_model_model['base_model_name'] = 'testString'
+ acoustic_model_model['status'] = 'pending'
+ acoustic_model_model['progress'] = 38
+ acoustic_model_model['warnings'] = 'testString'
+
+ # Construct a json representation of a AcousticModels model
+ acoustic_models_model_json = {}
+ acoustic_models_model_json['customizations'] = [acoustic_model_model]
+
+ # Construct a model instance of AcousticModels by calling from_dict on the json representation
+ acoustic_models_model = AcousticModels.from_dict(acoustic_models_model_json)
+ assert acoustic_models_model != False
+
+ # Construct a model instance of AcousticModels by calling from_dict on the json representation
+ acoustic_models_model_dict = AcousticModels.from_dict(acoustic_models_model_json).__dict__
+ acoustic_models_model2 = AcousticModels(**acoustic_models_model_dict)
+
+ # Verify the model instances are equivalent
+ assert acoustic_models_model == acoustic_models_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ acoustic_models_model_json2 = acoustic_models_model.to_dict()
+ assert acoustic_models_model_json2 == acoustic_models_model_json
+
+
+class TestModel_AudioDetails:
+ """
+ Test Class for AudioDetails
+ """
+
+ def test_audio_details_serialization(self):
+ """
+ Test serialization/deserialization for AudioDetails
+ """
+
+ # Construct a json representation of a AudioDetails model
+ audio_details_model_json = {}
+ audio_details_model_json['type'] = 'audio'
+ audio_details_model_json['codec'] = 'testString'
+ audio_details_model_json['frequency'] = 38
+ audio_details_model_json['compression'] = 'zip'
+
+ # Construct a model instance of AudioDetails by calling from_dict on the json representation
+ audio_details_model = AudioDetails.from_dict(audio_details_model_json)
+ assert audio_details_model != False
+
+ # Construct a model instance of AudioDetails by calling from_dict on the json representation
+ audio_details_model_dict = AudioDetails.from_dict(audio_details_model_json).__dict__
+ audio_details_model2 = AudioDetails(**audio_details_model_dict)
+
+ # Verify the model instances are equivalent
+ assert audio_details_model == audio_details_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ audio_details_model_json2 = audio_details_model.to_dict()
+ assert audio_details_model_json2 == audio_details_model_json
+
+
+class TestModel_AudioListing:
+ """
+ Test Class for AudioListing
+ """
+
+ def test_audio_listing_serialization(self):
+ """
+ Test serialization/deserialization for AudioListing
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ audio_details_model = {} # AudioDetails
+ audio_details_model['type'] = 'audio'
+ audio_details_model['codec'] = 'testString'
+ audio_details_model['frequency'] = 38
+ audio_details_model['compression'] = 'zip'
+
+ audio_resource_model = {} # AudioResource
+ audio_resource_model['duration'] = 38
+ audio_resource_model['name'] = 'testString'
+ audio_resource_model['details'] = audio_details_model
+ audio_resource_model['status'] = 'ok'
+
+ # Construct a json representation of a AudioListing model
+ audio_listing_model_json = {}
+ audio_listing_model_json['duration'] = 38
+ audio_listing_model_json['name'] = 'testString'
+ audio_listing_model_json['details'] = audio_details_model
+ audio_listing_model_json['status'] = 'ok'
+ audio_listing_model_json['container'] = audio_resource_model
+ audio_listing_model_json['audio'] = [audio_resource_model]
+
+ # Construct a model instance of AudioListing by calling from_dict on the json representation
+ audio_listing_model = AudioListing.from_dict(audio_listing_model_json)
+ assert audio_listing_model != False
+
+ # Construct a model instance of AudioListing by calling from_dict on the json representation
+ audio_listing_model_dict = AudioListing.from_dict(audio_listing_model_json).__dict__
+ audio_listing_model2 = AudioListing(**audio_listing_model_dict)
+
+ # Verify the model instances are equivalent
+ assert audio_listing_model == audio_listing_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ audio_listing_model_json2 = audio_listing_model.to_dict()
+ assert audio_listing_model_json2 == audio_listing_model_json
+
+
+class TestModel_AudioMetrics:
+ """
+ Test Class for AudioMetrics
+ """
+
+ def test_audio_metrics_serialization(self):
+ """
+ Test serialization/deserialization for AudioMetrics
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin
+ audio_metrics_histogram_bin_model['begin'] = 36.0
+ audio_metrics_histogram_bin_model['end'] = 36.0
+ audio_metrics_histogram_bin_model['count'] = 38
+
+ audio_metrics_details_model = {} # AudioMetricsDetails
+ audio_metrics_details_model['final'] = True
+ audio_metrics_details_model['end_time'] = 36.0
+ audio_metrics_details_model['signal_to_noise_ratio'] = 36.0
+ audio_metrics_details_model['speech_ratio'] = 36.0
+ audio_metrics_details_model['high_frequency_loss'] = 36.0
+ audio_metrics_details_model['direct_current_offset'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['clipping_rate'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['speech_level'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['non_speech_level'] = [audio_metrics_histogram_bin_model]
+
+ # Construct a json representation of a AudioMetrics model
+ audio_metrics_model_json = {}
+ audio_metrics_model_json['sampling_interval'] = 36.0
+ audio_metrics_model_json['accumulated'] = audio_metrics_details_model
+
+ # Construct a model instance of AudioMetrics by calling from_dict on the json representation
+ audio_metrics_model = AudioMetrics.from_dict(audio_metrics_model_json)
+ assert audio_metrics_model != False
+
+ # Construct a model instance of AudioMetrics by calling from_dict on the json representation
+ audio_metrics_model_dict = AudioMetrics.from_dict(audio_metrics_model_json).__dict__
+ audio_metrics_model2 = AudioMetrics(**audio_metrics_model_dict)
+
+ # Verify the model instances are equivalent
+ assert audio_metrics_model == audio_metrics_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ audio_metrics_model_json2 = audio_metrics_model.to_dict()
+ assert audio_metrics_model_json2 == audio_metrics_model_json
+
+
+class TestModel_AudioMetricsDetails:
+ """
+ Test Class for AudioMetricsDetails
+ """
+
+ def test_audio_metrics_details_serialization(self):
+ """
+ Test serialization/deserialization for AudioMetricsDetails
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin
+ audio_metrics_histogram_bin_model['begin'] = 36.0
+ audio_metrics_histogram_bin_model['end'] = 36.0
+ audio_metrics_histogram_bin_model['count'] = 38
+
+ # Construct a json representation of a AudioMetricsDetails model
+ audio_metrics_details_model_json = {}
+ audio_metrics_details_model_json['final'] = True
+ audio_metrics_details_model_json['end_time'] = 36.0
+ audio_metrics_details_model_json['signal_to_noise_ratio'] = 36.0
+ audio_metrics_details_model_json['speech_ratio'] = 36.0
+ audio_metrics_details_model_json['high_frequency_loss'] = 36.0
+ audio_metrics_details_model_json['direct_current_offset'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model_json['clipping_rate'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model_json['speech_level'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model_json['non_speech_level'] = [audio_metrics_histogram_bin_model]
+
+ # Construct a model instance of AudioMetricsDetails by calling from_dict on the json representation
+ audio_metrics_details_model = AudioMetricsDetails.from_dict(audio_metrics_details_model_json)
+ assert audio_metrics_details_model != False
+
+ # Construct a model instance of AudioMetricsDetails by calling from_dict on the json representation
+ audio_metrics_details_model_dict = AudioMetricsDetails.from_dict(audio_metrics_details_model_json).__dict__
+ audio_metrics_details_model2 = AudioMetricsDetails(**audio_metrics_details_model_dict)
+
+ # Verify the model instances are equivalent
+ assert audio_metrics_details_model == audio_metrics_details_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ audio_metrics_details_model_json2 = audio_metrics_details_model.to_dict()
+ assert audio_metrics_details_model_json2 == audio_metrics_details_model_json
+
+
+class TestModel_AudioMetricsHistogramBin:
+ """
+ Test Class for AudioMetricsHistogramBin
+ """
+
+ def test_audio_metrics_histogram_bin_serialization(self):
+ """
+ Test serialization/deserialization for AudioMetricsHistogramBin
+ """
+
+ # Construct a json representation of a AudioMetricsHistogramBin model
+ audio_metrics_histogram_bin_model_json = {}
+ audio_metrics_histogram_bin_model_json['begin'] = 36.0
+ audio_metrics_histogram_bin_model_json['end'] = 36.0
+ audio_metrics_histogram_bin_model_json['count'] = 38
+
+ # Construct a model instance of AudioMetricsHistogramBin by calling from_dict on the json representation
+ audio_metrics_histogram_bin_model = AudioMetricsHistogramBin.from_dict(audio_metrics_histogram_bin_model_json)
+ assert audio_metrics_histogram_bin_model != False
+
+ # Construct a model instance of AudioMetricsHistogramBin by calling from_dict on the json representation
+ audio_metrics_histogram_bin_model_dict = AudioMetricsHistogramBin.from_dict(audio_metrics_histogram_bin_model_json).__dict__
+ audio_metrics_histogram_bin_model2 = AudioMetricsHistogramBin(**audio_metrics_histogram_bin_model_dict)
+
+ # Verify the model instances are equivalent
+ assert audio_metrics_histogram_bin_model == audio_metrics_histogram_bin_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ audio_metrics_histogram_bin_model_json2 = audio_metrics_histogram_bin_model.to_dict()
+ assert audio_metrics_histogram_bin_model_json2 == audio_metrics_histogram_bin_model_json
+
+
+class TestModel_AudioResource:
+ """
+ Test Class for AudioResource
+ """
+
+ def test_audio_resource_serialization(self):
+ """
+ Test serialization/deserialization for AudioResource
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ audio_details_model = {} # AudioDetails
+ audio_details_model['type'] = 'audio'
+ audio_details_model['codec'] = 'testString'
+ audio_details_model['frequency'] = 38
+ audio_details_model['compression'] = 'zip'
+
+ # Construct a json representation of a AudioResource model
+ audio_resource_model_json = {}
+ audio_resource_model_json['duration'] = 38
+ audio_resource_model_json['name'] = 'testString'
+ audio_resource_model_json['details'] = audio_details_model
+ audio_resource_model_json['status'] = 'ok'
+
+ # Construct a model instance of AudioResource by calling from_dict on the json representation
+ audio_resource_model = AudioResource.from_dict(audio_resource_model_json)
+ assert audio_resource_model != False
+
+ # Construct a model instance of AudioResource by calling from_dict on the json representation
+ audio_resource_model_dict = AudioResource.from_dict(audio_resource_model_json).__dict__
+ audio_resource_model2 = AudioResource(**audio_resource_model_dict)
+
+ # Verify the model instances are equivalent
+ assert audio_resource_model == audio_resource_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ audio_resource_model_json2 = audio_resource_model.to_dict()
+ assert audio_resource_model_json2 == audio_resource_model_json
+
+
+class TestModel_AudioResources:
+ """
+ Test Class for AudioResources
+ """
+
+ def test_audio_resources_serialization(self):
+ """
+ Test serialization/deserialization for AudioResources
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ audio_details_model = {} # AudioDetails
+ audio_details_model['type'] = 'audio'
+ audio_details_model['codec'] = 'testString'
+ audio_details_model['frequency'] = 38
+ audio_details_model['compression'] = 'zip'
+
+ audio_resource_model = {} # AudioResource
+ audio_resource_model['duration'] = 38
+ audio_resource_model['name'] = 'testString'
+ audio_resource_model['details'] = audio_details_model
+ audio_resource_model['status'] = 'ok'
+
+ # Construct a json representation of a AudioResources model
+ audio_resources_model_json = {}
+ audio_resources_model_json['total_minutes_of_audio'] = 72.5
+ audio_resources_model_json['audio'] = [audio_resource_model]
+
+ # Construct a model instance of AudioResources by calling from_dict on the json representation
+ audio_resources_model = AudioResources.from_dict(audio_resources_model_json)
+ assert audio_resources_model != False
+
+ # Construct a model instance of AudioResources by calling from_dict on the json representation
+ audio_resources_model_dict = AudioResources.from_dict(audio_resources_model_json).__dict__
+ audio_resources_model2 = AudioResources(**audio_resources_model_dict)
+
+ # Verify the model instances are equivalent
+ assert audio_resources_model == audio_resources_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ audio_resources_model_json2 = audio_resources_model.to_dict()
+ assert audio_resources_model_json2 == audio_resources_model_json
+
+
+class TestModel_Corpora:
+ """
+ Test Class for Corpora
+ """
+
+ def test_corpora_serialization(self):
+ """
+ Test serialization/deserialization for Corpora
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ corpus_model = {} # Corpus
+ corpus_model['name'] = 'testString'
+ corpus_model['total_words'] = 38
+ corpus_model['out_of_vocabulary_words'] = 38
+ corpus_model['status'] = 'analyzed'
+ corpus_model['error'] = 'testString'
+
+ # Construct a json representation of a Corpora model
+ corpora_model_json = {}
+ corpora_model_json['corpora'] = [corpus_model]
+
+ # Construct a model instance of Corpora by calling from_dict on the json representation
+ corpora_model = Corpora.from_dict(corpora_model_json)
+ assert corpora_model != False
+
+ # Construct a model instance of Corpora by calling from_dict on the json representation
+ corpora_model_dict = Corpora.from_dict(corpora_model_json).__dict__
+ corpora_model2 = Corpora(**corpora_model_dict)
+
+ # Verify the model instances are equivalent
+ assert corpora_model == corpora_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ corpora_model_json2 = corpora_model.to_dict()
+ assert corpora_model_json2 == corpora_model_json
+
+
+class TestModel_Corpus:
+ """
+ Test Class for Corpus
+ """
+
+ def test_corpus_serialization(self):
+ """
+ Test serialization/deserialization for Corpus
+ """
+
+ # Construct a json representation of a Corpus model
+ corpus_model_json = {}
+ corpus_model_json['name'] = 'testString'
+ corpus_model_json['total_words'] = 38
+ corpus_model_json['out_of_vocabulary_words'] = 38
+ corpus_model_json['status'] = 'analyzed'
+ corpus_model_json['error'] = 'testString'
+
+ # Construct a model instance of Corpus by calling from_dict on the json representation
+ corpus_model = Corpus.from_dict(corpus_model_json)
+ assert corpus_model != False
+
+ # Construct a model instance of Corpus by calling from_dict on the json representation
+ corpus_model_dict = Corpus.from_dict(corpus_model_json).__dict__
+ corpus_model2 = Corpus(**corpus_model_dict)
+
+ # Verify the model instances are equivalent
+ assert corpus_model == corpus_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ corpus_model_json2 = corpus_model.to_dict()
+ assert corpus_model_json2 == corpus_model_json
+
+
+class TestModel_CustomWord:
+ """
+ Test Class for CustomWord
+ """
+
+ def test_custom_word_serialization(self):
+ """
+ Test serialization/deserialization for CustomWord
+ """
+
+ # Construct a json representation of a CustomWord model
+ custom_word_model_json = {}
+ custom_word_model_json['word'] = 'testString'
+ custom_word_model_json['mapping_only'] = ['testString']
+ custom_word_model_json['sounds_like'] = ['testString']
+ custom_word_model_json['display_as'] = 'testString'
+
+ # Construct a model instance of CustomWord by calling from_dict on the json representation
+ custom_word_model = CustomWord.from_dict(custom_word_model_json)
+ assert custom_word_model != False
+
+ # Construct a model instance of CustomWord by calling from_dict on the json representation
+ custom_word_model_dict = CustomWord.from_dict(custom_word_model_json).__dict__
+ custom_word_model2 = CustomWord(**custom_word_model_dict)
+
+ # Verify the model instances are equivalent
+ assert custom_word_model == custom_word_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ custom_word_model_json2 = custom_word_model.to_dict()
+ assert custom_word_model_json2 == custom_word_model_json
+
+
+class TestModel_EnrichedResults:
+ """
+ Test Class for EnrichedResults
+ """
+
+ def test_enriched_results_serialization(self):
+ """
+ Test serialization/deserialization for EnrichedResults
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp
+ enriched_results_transcript_timestamp_model['from'] = 36.0
+ enriched_results_transcript_timestamp_model['to'] = 36.0
+
+ enriched_results_transcript_model = {} # EnrichedResultsTranscript
+ enriched_results_transcript_model['text'] = 'testString'
+ enriched_results_transcript_model['timestamp'] = enriched_results_transcript_timestamp_model
+
+ # Construct a json representation of a EnrichedResults model
+ enriched_results_model_json = {}
+ enriched_results_model_json['transcript'] = enriched_results_transcript_model
+ enriched_results_model_json['status'] = 'testString'
+
+ # Construct a model instance of EnrichedResults by calling from_dict on the json representation
+ enriched_results_model = EnrichedResults.from_dict(enriched_results_model_json)
+ assert enriched_results_model != False
+
+ # Construct a model instance of EnrichedResults by calling from_dict on the json representation
+ enriched_results_model_dict = EnrichedResults.from_dict(enriched_results_model_json).__dict__
+ enriched_results_model2 = EnrichedResults(**enriched_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert enriched_results_model == enriched_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ enriched_results_model_json2 = enriched_results_model.to_dict()
+ assert enriched_results_model_json2 == enriched_results_model_json
+
+
+class TestModel_EnrichedResultsTranscript:
+ """
+ Test Class for EnrichedResultsTranscript
+ """
+
+ def test_enriched_results_transcript_serialization(self):
+ """
+ Test serialization/deserialization for EnrichedResultsTranscript
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp
+ enriched_results_transcript_timestamp_model['from'] = 36.0
+ enriched_results_transcript_timestamp_model['to'] = 36.0
+
+ # Construct a json representation of a EnrichedResultsTranscript model
+ enriched_results_transcript_model_json = {}
+ enriched_results_transcript_model_json['text'] = 'testString'
+ enriched_results_transcript_model_json['timestamp'] = enriched_results_transcript_timestamp_model
+
+ # Construct a model instance of EnrichedResultsTranscript by calling from_dict on the json representation
+ enriched_results_transcript_model = EnrichedResultsTranscript.from_dict(enriched_results_transcript_model_json)
+ assert enriched_results_transcript_model != False
+
+ # Construct a model instance of EnrichedResultsTranscript by calling from_dict on the json representation
+ enriched_results_transcript_model_dict = EnrichedResultsTranscript.from_dict(enriched_results_transcript_model_json).__dict__
+ enriched_results_transcript_model2 = EnrichedResultsTranscript(**enriched_results_transcript_model_dict)
+
+ # Verify the model instances are equivalent
+ assert enriched_results_transcript_model == enriched_results_transcript_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ enriched_results_transcript_model_json2 = enriched_results_transcript_model.to_dict()
+ assert enriched_results_transcript_model_json2 == enriched_results_transcript_model_json
+
+
+class TestModel_EnrichedResultsTranscriptTimestamp:
+ """
+ Test Class for EnrichedResultsTranscriptTimestamp
+ """
+
+ def test_enriched_results_transcript_timestamp_serialization(self):
+ """
+ Test serialization/deserialization for EnrichedResultsTranscriptTimestamp
+ """
+
+ # Construct a json representation of a EnrichedResultsTranscriptTimestamp model
+ enriched_results_transcript_timestamp_model_json = {}
+ enriched_results_transcript_timestamp_model_json['from'] = 36.0
+ enriched_results_transcript_timestamp_model_json['to'] = 36.0
+
+ # Construct a model instance of EnrichedResultsTranscriptTimestamp by calling from_dict on the json representation
+ enriched_results_transcript_timestamp_model = EnrichedResultsTranscriptTimestamp.from_dict(enriched_results_transcript_timestamp_model_json)
+ assert enriched_results_transcript_timestamp_model != False
+
+ # Construct a model instance of EnrichedResultsTranscriptTimestamp by calling from_dict on the json representation
+ enriched_results_transcript_timestamp_model_dict = EnrichedResultsTranscriptTimestamp.from_dict(enriched_results_transcript_timestamp_model_json).__dict__
+ enriched_results_transcript_timestamp_model2 = EnrichedResultsTranscriptTimestamp(**enriched_results_transcript_timestamp_model_dict)
+
+ # Verify the model instances are equivalent
+ assert enriched_results_transcript_timestamp_model == enriched_results_transcript_timestamp_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ enriched_results_transcript_timestamp_model_json2 = enriched_results_transcript_timestamp_model.to_dict()
+ assert enriched_results_transcript_timestamp_model_json2 == enriched_results_transcript_timestamp_model_json
+
+
+class TestModel_Grammar:
+ """
+ Test Class for Grammar
+ """
+
+ def test_grammar_serialization(self):
+ """
+ Test serialization/deserialization for Grammar
+ """
+
+ # Construct a json representation of a Grammar model
+ grammar_model_json = {}
+ grammar_model_json['name'] = 'testString'
+ grammar_model_json['out_of_vocabulary_words'] = 38
+ grammar_model_json['status'] = 'analyzed'
+ grammar_model_json['error'] = 'testString'
+
+ # Construct a model instance of Grammar by calling from_dict on the json representation
+ grammar_model = Grammar.from_dict(grammar_model_json)
+ assert grammar_model != False
+
+ # Construct a model instance of Grammar by calling from_dict on the json representation
+ grammar_model_dict = Grammar.from_dict(grammar_model_json).__dict__
+ grammar_model2 = Grammar(**grammar_model_dict)
+
+ # Verify the model instances are equivalent
+ assert grammar_model == grammar_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ grammar_model_json2 = grammar_model.to_dict()
+ assert grammar_model_json2 == grammar_model_json
+
+
+class TestModel_Grammars:
+ """
+ Test Class for Grammars
+ """
+
+ def test_grammars_serialization(self):
+ """
+ Test serialization/deserialization for Grammars
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ grammar_model = {} # Grammar
+ grammar_model['name'] = 'testString'
+ grammar_model['out_of_vocabulary_words'] = 38
+ grammar_model['status'] = 'analyzed'
+ grammar_model['error'] = 'testString'
+
+ # Construct a json representation of a Grammars model
+ grammars_model_json = {}
+ grammars_model_json['grammars'] = [grammar_model]
+
+ # Construct a model instance of Grammars by calling from_dict on the json representation
+ grammars_model = Grammars.from_dict(grammars_model_json)
+ assert grammars_model != False
+
+ # Construct a model instance of Grammars by calling from_dict on the json representation
+ grammars_model_dict = Grammars.from_dict(grammars_model_json).__dict__
+ grammars_model2 = Grammars(**grammars_model_dict)
+
+ # Verify the model instances are equivalent
+ assert grammars_model == grammars_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ grammars_model_json2 = grammars_model.to_dict()
+ assert grammars_model_json2 == grammars_model_json
+
+
+class TestModel_KeywordResult:
+ """
+ Test Class for KeywordResult
+ """
+
+ def test_keyword_result_serialization(self):
+ """
+ Test serialization/deserialization for KeywordResult
+ """
+
+ # Construct a json representation of a KeywordResult model
+ keyword_result_model_json = {}
+ keyword_result_model_json['normalized_text'] = 'testString'
+ keyword_result_model_json['start_time'] = 72.5
+ keyword_result_model_json['end_time'] = 72.5
+ keyword_result_model_json['confidence'] = 0
+
+ # Construct a model instance of KeywordResult by calling from_dict on the json representation
+ keyword_result_model = KeywordResult.from_dict(keyword_result_model_json)
+ assert keyword_result_model != False
+
+ # Construct a model instance of KeywordResult by calling from_dict on the json representation
+ keyword_result_model_dict = KeywordResult.from_dict(keyword_result_model_json).__dict__
+ keyword_result_model2 = KeywordResult(**keyword_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert keyword_result_model == keyword_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ keyword_result_model_json2 = keyword_result_model.to_dict()
+ assert keyword_result_model_json2 == keyword_result_model_json
+
+
+class TestModel_LanguageDetectionResult:
+ """
+ Test Class for LanguageDetectionResult
+ """
+
+ def test_language_detection_result_serialization(self):
+ """
+ Test serialization/deserialization for LanguageDetectionResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ language_info_model = {} # LanguageInfo
+ language_info_model['confidence'] = 36.0
+ language_info_model['language'] = 'testString'
+ language_info_model['timestamp'] = 36.0
+
+ # Construct a json representation of a LanguageDetectionResult model
+ language_detection_result_model_json = {}
+ language_detection_result_model_json['language_info'] = [language_info_model]
+
+ # Construct a model instance of LanguageDetectionResult by calling from_dict on the json representation
+ language_detection_result_model = LanguageDetectionResult.from_dict(language_detection_result_model_json)
+ assert language_detection_result_model != False
+
+ # Construct a model instance of LanguageDetectionResult by calling from_dict on the json representation
+ language_detection_result_model_dict = LanguageDetectionResult.from_dict(language_detection_result_model_json).__dict__
+ language_detection_result_model2 = LanguageDetectionResult(**language_detection_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert language_detection_result_model == language_detection_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ language_detection_result_model_json2 = language_detection_result_model.to_dict()
+ assert language_detection_result_model_json2 == language_detection_result_model_json
+
+
+class TestModel_LanguageDetectionResults:
+ """
+ Test Class for LanguageDetectionResults
+ """
+
+ def test_language_detection_results_serialization(self):
+ """
+ Test serialization/deserialization for LanguageDetectionResults
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ language_info_model = {} # LanguageInfo
+ language_info_model['confidence'] = 36.0
+ language_info_model['language'] = 'testString'
+ language_info_model['timestamp'] = 36.0
+
+ language_detection_result_model = {} # LanguageDetectionResult
+ language_detection_result_model['language_info'] = [language_info_model]
+
+ # Construct a json representation of a LanguageDetectionResults model
+ language_detection_results_model_json = {}
+ language_detection_results_model_json['results'] = [language_detection_result_model]
+ language_detection_results_model_json['result_index'] = 38
+
+ # Construct a model instance of LanguageDetectionResults by calling from_dict on the json representation
+ language_detection_results_model = LanguageDetectionResults.from_dict(language_detection_results_model_json)
+ assert language_detection_results_model != False
+
+ # Construct a model instance of LanguageDetectionResults by calling from_dict on the json representation
+ language_detection_results_model_dict = LanguageDetectionResults.from_dict(language_detection_results_model_json).__dict__
+ language_detection_results_model2 = LanguageDetectionResults(**language_detection_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert language_detection_results_model == language_detection_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ language_detection_results_model_json2 = language_detection_results_model.to_dict()
+ assert language_detection_results_model_json2 == language_detection_results_model_json
+
+
+class TestModel_LanguageInfo:
+ """
+ Test Class for LanguageInfo
+ """
+
+ def test_language_info_serialization(self):
+ """
+ Test serialization/deserialization for LanguageInfo
+ """
+
+ # Construct a json representation of a LanguageInfo model
+ language_info_model_json = {}
+ language_info_model_json['confidence'] = 36.0
+ language_info_model_json['language'] = 'testString'
+ language_info_model_json['timestamp'] = 36.0
+
+ # Construct a model instance of LanguageInfo by calling from_dict on the json representation
+ language_info_model = LanguageInfo.from_dict(language_info_model_json)
+ assert language_info_model != False
+
+ # Construct a model instance of LanguageInfo by calling from_dict on the json representation
+ language_info_model_dict = LanguageInfo.from_dict(language_info_model_json).__dict__
+ language_info_model2 = LanguageInfo(**language_info_model_dict)
+
+ # Verify the model instances are equivalent
+ assert language_info_model == language_info_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ language_info_model_json2 = language_info_model.to_dict()
+ assert language_info_model_json2 == language_info_model_json
+
+
+class TestModel_LanguageModel:
+ """
+ Test Class for LanguageModel
+ """
+
+ def test_language_model_serialization(self):
+ """
+ Test serialization/deserialization for LanguageModel
+ """
+
+ # Construct a json representation of a LanguageModel model
+ language_model_model_json = {}
+ language_model_model_json['customization_id'] = 'testString'
+ language_model_model_json['created'] = 'testString'
+ language_model_model_json['updated'] = 'testString'
+ language_model_model_json['language'] = 'testString'
+ language_model_model_json['dialect'] = 'testString'
+ language_model_model_json['versions'] = ['testString']
+ language_model_model_json['owner'] = 'testString'
+ language_model_model_json['name'] = 'testString'
+ language_model_model_json['description'] = 'testString'
+ language_model_model_json['base_model_name'] = 'testString'
+ language_model_model_json['status'] = 'pending'
+ language_model_model_json['progress'] = 38
+ language_model_model_json['error'] = 'testString'
+ language_model_model_json['warnings'] = 'testString'
+
+ # Construct a model instance of LanguageModel by calling from_dict on the json representation
+ language_model_model = LanguageModel.from_dict(language_model_model_json)
+ assert language_model_model != False
+
+ # Construct a model instance of LanguageModel by calling from_dict on the json representation
+ language_model_model_dict = LanguageModel.from_dict(language_model_model_json).__dict__
+ language_model_model2 = LanguageModel(**language_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert language_model_model == language_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ language_model_model_json2 = language_model_model.to_dict()
+ assert language_model_model_json2 == language_model_model_json
+
+
+class TestModel_LanguageModels:
+ """
+ Test Class for LanguageModels
+ """
+
+ def test_language_models_serialization(self):
+ """
+ Test serialization/deserialization for LanguageModels
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ language_model_model = {} # LanguageModel
+ language_model_model['customization_id'] = 'testString'
+ language_model_model['created'] = 'testString'
+ language_model_model['updated'] = 'testString'
+ language_model_model['language'] = 'testString'
+ language_model_model['dialect'] = 'testString'
+ language_model_model['versions'] = ['testString']
+ language_model_model['owner'] = 'testString'
+ language_model_model['name'] = 'testString'
+ language_model_model['description'] = 'testString'
+ language_model_model['base_model_name'] = 'testString'
+ language_model_model['status'] = 'pending'
+ language_model_model['progress'] = 38
+ language_model_model['error'] = 'testString'
+ language_model_model['warnings'] = 'testString'
+
+ # Construct a json representation of a LanguageModels model
+ language_models_model_json = {}
+ language_models_model_json['customizations'] = [language_model_model]
+
+ # Construct a model instance of LanguageModels by calling from_dict on the json representation
+ language_models_model = LanguageModels.from_dict(language_models_model_json)
+ assert language_models_model != False
+
+ # Construct a model instance of LanguageModels by calling from_dict on the json representation
+ language_models_model_dict = LanguageModels.from_dict(language_models_model_json).__dict__
+ language_models_model2 = LanguageModels(**language_models_model_dict)
+
+ # Verify the model instances are equivalent
+ assert language_models_model == language_models_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ language_models_model_json2 = language_models_model.to_dict()
+ assert language_models_model_json2 == language_models_model_json
+
+
+class TestModel_ProcessedAudio:
+ """
+ Test Class for ProcessedAudio
+ """
+
+ def test_processed_audio_serialization(self):
+ """
+ Test serialization/deserialization for ProcessedAudio
+ """
+
+ # Construct a json representation of a ProcessedAudio model
+ processed_audio_model_json = {}
+ processed_audio_model_json['received'] = 36.0
+ processed_audio_model_json['seen_by_engine'] = 36.0
+ processed_audio_model_json['transcription'] = 36.0
+ processed_audio_model_json['speaker_labels'] = 36.0
+
+ # Construct a model instance of ProcessedAudio by calling from_dict on the json representation
+ processed_audio_model = ProcessedAudio.from_dict(processed_audio_model_json)
+ assert processed_audio_model != False
+
+ # Construct a model instance of ProcessedAudio by calling from_dict on the json representation
+ processed_audio_model_dict = ProcessedAudio.from_dict(processed_audio_model_json).__dict__
+ processed_audio_model2 = ProcessedAudio(**processed_audio_model_dict)
+
+ # Verify the model instances are equivalent
+ assert processed_audio_model == processed_audio_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ processed_audio_model_json2 = processed_audio_model.to_dict()
+ assert processed_audio_model_json2 == processed_audio_model_json
+
+
+class TestModel_ProcessingMetrics:
+ """
+ Test Class for ProcessingMetrics
+ """
+
+ def test_processing_metrics_serialization(self):
+ """
+ Test serialization/deserialization for ProcessingMetrics
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ processed_audio_model = {} # ProcessedAudio
+ processed_audio_model['received'] = 36.0
+ processed_audio_model['seen_by_engine'] = 36.0
+ processed_audio_model['transcription'] = 36.0
+ processed_audio_model['speaker_labels'] = 36.0
+
+ # Construct a json representation of a ProcessingMetrics model
+ processing_metrics_model_json = {}
+ processing_metrics_model_json['processed_audio'] = processed_audio_model
+ processing_metrics_model_json['wall_clock_since_first_byte_received'] = 36.0
+ processing_metrics_model_json['periodic'] = True
+
+ # Construct a model instance of ProcessingMetrics by calling from_dict on the json representation
+ processing_metrics_model = ProcessingMetrics.from_dict(processing_metrics_model_json)
+ assert processing_metrics_model != False
+
+ # Construct a model instance of ProcessingMetrics by calling from_dict on the json representation
+ processing_metrics_model_dict = ProcessingMetrics.from_dict(processing_metrics_model_json).__dict__
+ processing_metrics_model2 = ProcessingMetrics(**processing_metrics_model_dict)
+
+ # Verify the model instances are equivalent
+ assert processing_metrics_model == processing_metrics_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ processing_metrics_model_json2 = processing_metrics_model.to_dict()
+ assert processing_metrics_model_json2 == processing_metrics_model_json
+
+
+class TestModel_RecognitionJob:
+ """
+ Test Class for RecognitionJob
+ """
+
+ def test_recognition_job_serialization(self):
+ """
+ Test serialization/deserialization for RecognitionJob
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ speech_recognition_alternative_model = {} # SpeechRecognitionAlternative
+ speech_recognition_alternative_model['transcript'] = 'testString'
+ speech_recognition_alternative_model['confidence'] = 0
+ speech_recognition_alternative_model['timestamps'] = ['testString']
+ speech_recognition_alternative_model['word_confidence'] = ['testString']
+
+ keyword_result_model = {} # KeywordResult
+ keyword_result_model['normalized_text'] = 'testString'
+ keyword_result_model['start_time'] = 72.5
+ keyword_result_model['end_time'] = 72.5
+ keyword_result_model['confidence'] = 0
+
+ word_alternative_result_model = {} # WordAlternativeResult
+ word_alternative_result_model['confidence'] = 0
+ word_alternative_result_model['word'] = 'testString'
+
+ word_alternative_results_model = {} # WordAlternativeResults
+ word_alternative_results_model['start_time'] = 72.5
+ word_alternative_results_model['end_time'] = 72.5
+ word_alternative_results_model['alternatives'] = [word_alternative_result_model]
+
+ speech_recognition_result_model = {} # SpeechRecognitionResult
+ speech_recognition_result_model['final'] = True
+ speech_recognition_result_model['alternatives'] = [speech_recognition_alternative_model]
+ speech_recognition_result_model['keywords_result'] = {'key1': [keyword_result_model]}
+ speech_recognition_result_model['word_alternatives'] = [word_alternative_results_model]
+ speech_recognition_result_model['end_of_utterance'] = 'end_of_data'
+
+ speaker_labels_result_model = {} # SpeakerLabelsResult
+ speaker_labels_result_model['from'] = 36.0
+ speaker_labels_result_model['to'] = 36.0
+ speaker_labels_result_model['speaker'] = 38
+ speaker_labels_result_model['confidence'] = 36.0
+ speaker_labels_result_model['final'] = True
+
+ processed_audio_model = {} # ProcessedAudio
+ processed_audio_model['received'] = 36.0
+ processed_audio_model['seen_by_engine'] = 36.0
+ processed_audio_model['transcription'] = 36.0
+ processed_audio_model['speaker_labels'] = 36.0
+
+ processing_metrics_model = {} # ProcessingMetrics
+ processing_metrics_model['processed_audio'] = processed_audio_model
+ processing_metrics_model['wall_clock_since_first_byte_received'] = 36.0
+ processing_metrics_model['periodic'] = True
+
+ audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin
+ audio_metrics_histogram_bin_model['begin'] = 36.0
+ audio_metrics_histogram_bin_model['end'] = 36.0
+ audio_metrics_histogram_bin_model['count'] = 38
+
+ audio_metrics_details_model = {} # AudioMetricsDetails
+ audio_metrics_details_model['final'] = True
+ audio_metrics_details_model['end_time'] = 36.0
+ audio_metrics_details_model['signal_to_noise_ratio'] = 36.0
+ audio_metrics_details_model['speech_ratio'] = 36.0
+ audio_metrics_details_model['high_frequency_loss'] = 36.0
+ audio_metrics_details_model['direct_current_offset'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['clipping_rate'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['speech_level'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['non_speech_level'] = [audio_metrics_histogram_bin_model]
+
+ audio_metrics_model = {} # AudioMetrics
+ audio_metrics_model['sampling_interval'] = 36.0
+ audio_metrics_model['accumulated'] = audio_metrics_details_model
+
+ enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp
+ enriched_results_transcript_timestamp_model['from'] = 36.0
+ enriched_results_transcript_timestamp_model['to'] = 36.0
+
+ enriched_results_transcript_model = {} # EnrichedResultsTranscript
+ enriched_results_transcript_model['text'] = 'testString'
+ enriched_results_transcript_model['timestamp'] = enriched_results_transcript_timestamp_model
+
+ enriched_results_model = {} # EnrichedResults
+ enriched_results_model['transcript'] = enriched_results_transcript_model
+ enriched_results_model['status'] = 'testString'
+
+ speech_recognition_results_model = {} # SpeechRecognitionResults
+ speech_recognition_results_model['results'] = [speech_recognition_result_model]
+ speech_recognition_results_model['result_index'] = 38
+ speech_recognition_results_model['speaker_labels'] = [speaker_labels_result_model]
+ speech_recognition_results_model['processing_metrics'] = processing_metrics_model
+ speech_recognition_results_model['audio_metrics'] = audio_metrics_model
+ speech_recognition_results_model['warnings'] = ['testString']
+ speech_recognition_results_model['enriched_results'] = enriched_results_model
+
+ # Construct a json representation of a RecognitionJob model
+ recognition_job_model_json = {}
+ recognition_job_model_json['id'] = 'testString'
+ recognition_job_model_json['status'] = 'waiting'
+ recognition_job_model_json['created'] = 'testString'
+ recognition_job_model_json['updated'] = 'testString'
+ recognition_job_model_json['url'] = 'testString'
+ recognition_job_model_json['user_token'] = 'testString'
+ recognition_job_model_json['results'] = [speech_recognition_results_model]
+ recognition_job_model_json['warnings'] = ['testString']
+
+ # Construct a model instance of RecognitionJob by calling from_dict on the json representation
+ recognition_job_model = RecognitionJob.from_dict(recognition_job_model_json)
+ assert recognition_job_model != False
+
+ # Construct a model instance of RecognitionJob by calling from_dict on the json representation
+ recognition_job_model_dict = RecognitionJob.from_dict(recognition_job_model_json).__dict__
+ recognition_job_model2 = RecognitionJob(**recognition_job_model_dict)
+
+ # Verify the model instances are equivalent
+ assert recognition_job_model == recognition_job_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ recognition_job_model_json2 = recognition_job_model.to_dict()
+ assert recognition_job_model_json2 == recognition_job_model_json
+
+
+class TestModel_RecognitionJobs:
+ """
+ Test Class for RecognitionJobs
+ """
+
+ def test_recognition_jobs_serialization(self):
+ """
+ Test serialization/deserialization for RecognitionJobs
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ speech_recognition_alternative_model = {} # SpeechRecognitionAlternative
+ speech_recognition_alternative_model['transcript'] = 'testString'
+ speech_recognition_alternative_model['confidence'] = 0
+ speech_recognition_alternative_model['timestamps'] = ['testString']
+ speech_recognition_alternative_model['word_confidence'] = ['testString']
+
+ keyword_result_model = {} # KeywordResult
+ keyword_result_model['normalized_text'] = 'testString'
+ keyword_result_model['start_time'] = 72.5
+ keyword_result_model['end_time'] = 72.5
+ keyword_result_model['confidence'] = 0
+
+ word_alternative_result_model = {} # WordAlternativeResult
+ word_alternative_result_model['confidence'] = 0
+ word_alternative_result_model['word'] = 'testString'
+
+ word_alternative_results_model = {} # WordAlternativeResults
+ word_alternative_results_model['start_time'] = 72.5
+ word_alternative_results_model['end_time'] = 72.5
+ word_alternative_results_model['alternatives'] = [word_alternative_result_model]
+
+ speech_recognition_result_model = {} # SpeechRecognitionResult
+ speech_recognition_result_model['final'] = True
+ speech_recognition_result_model['alternatives'] = [speech_recognition_alternative_model]
+ speech_recognition_result_model['keywords_result'] = {'key1': [keyword_result_model]}
+ speech_recognition_result_model['word_alternatives'] = [word_alternative_results_model]
+ speech_recognition_result_model['end_of_utterance'] = 'end_of_data'
+
+ speaker_labels_result_model = {} # SpeakerLabelsResult
+ speaker_labels_result_model['from'] = 36.0
+ speaker_labels_result_model['to'] = 36.0
+ speaker_labels_result_model['speaker'] = 38
+ speaker_labels_result_model['confidence'] = 36.0
+ speaker_labels_result_model['final'] = True
+
+ processed_audio_model = {} # ProcessedAudio
+ processed_audio_model['received'] = 36.0
+ processed_audio_model['seen_by_engine'] = 36.0
+ processed_audio_model['transcription'] = 36.0
+ processed_audio_model['speaker_labels'] = 36.0
+
+ processing_metrics_model = {} # ProcessingMetrics
+ processing_metrics_model['processed_audio'] = processed_audio_model
+ processing_metrics_model['wall_clock_since_first_byte_received'] = 36.0
+ processing_metrics_model['periodic'] = True
+
+ audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin
+ audio_metrics_histogram_bin_model['begin'] = 36.0
+ audio_metrics_histogram_bin_model['end'] = 36.0
+ audio_metrics_histogram_bin_model['count'] = 38
+
+ audio_metrics_details_model = {} # AudioMetricsDetails
+ audio_metrics_details_model['final'] = True
+ audio_metrics_details_model['end_time'] = 36.0
+ audio_metrics_details_model['signal_to_noise_ratio'] = 36.0
+ audio_metrics_details_model['speech_ratio'] = 36.0
+ audio_metrics_details_model['high_frequency_loss'] = 36.0
+ audio_metrics_details_model['direct_current_offset'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['clipping_rate'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['speech_level'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['non_speech_level'] = [audio_metrics_histogram_bin_model]
+
+ audio_metrics_model = {} # AudioMetrics
+ audio_metrics_model['sampling_interval'] = 36.0
+ audio_metrics_model['accumulated'] = audio_metrics_details_model
+
+ enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp
+ enriched_results_transcript_timestamp_model['from'] = 36.0
+ enriched_results_transcript_timestamp_model['to'] = 36.0
+
+ enriched_results_transcript_model = {} # EnrichedResultsTranscript
+ enriched_results_transcript_model['text'] = 'testString'
+ enriched_results_transcript_model['timestamp'] = enriched_results_transcript_timestamp_model
+
+ enriched_results_model = {} # EnrichedResults
+ enriched_results_model['transcript'] = enriched_results_transcript_model
+ enriched_results_model['status'] = 'testString'
+
+ speech_recognition_results_model = {} # SpeechRecognitionResults
+ speech_recognition_results_model['results'] = [speech_recognition_result_model]
+ speech_recognition_results_model['result_index'] = 38
+ speech_recognition_results_model['speaker_labels'] = [speaker_labels_result_model]
+ speech_recognition_results_model['processing_metrics'] = processing_metrics_model
+ speech_recognition_results_model['audio_metrics'] = audio_metrics_model
+ speech_recognition_results_model['warnings'] = ['testString']
+ speech_recognition_results_model['enriched_results'] = enriched_results_model
+
+ recognition_job_model = {} # RecognitionJob
+ recognition_job_model['id'] = 'testString'
+ recognition_job_model['status'] = 'waiting'
+ recognition_job_model['created'] = 'testString'
+ recognition_job_model['updated'] = 'testString'
+ recognition_job_model['url'] = 'testString'
+ recognition_job_model['user_token'] = 'testString'
+ recognition_job_model['results'] = [speech_recognition_results_model]
+ recognition_job_model['warnings'] = ['testString']
+
+ # Construct a json representation of a RecognitionJobs model
+ recognition_jobs_model_json = {}
+ recognition_jobs_model_json['recognitions'] = [recognition_job_model]
+
+ # Construct a model instance of RecognitionJobs by calling from_dict on the json representation
+ recognition_jobs_model = RecognitionJobs.from_dict(recognition_jobs_model_json)
+ assert recognition_jobs_model != False
+
+ # Construct a model instance of RecognitionJobs by calling from_dict on the json representation
+ recognition_jobs_model_dict = RecognitionJobs.from_dict(recognition_jobs_model_json).__dict__
+ recognition_jobs_model2 = RecognitionJobs(**recognition_jobs_model_dict)
+
+ # Verify the model instances are equivalent
+ assert recognition_jobs_model == recognition_jobs_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ recognition_jobs_model_json2 = recognition_jobs_model.to_dict()
+ assert recognition_jobs_model_json2 == recognition_jobs_model_json
+
+
+class TestModel_RegisterStatus:
+ """
+ Test Class for RegisterStatus
+ """
+
+ def test_register_status_serialization(self):
+ """
+ Test serialization/deserialization for RegisterStatus
+ """
+
+ # Construct a json representation of a RegisterStatus model
+ register_status_model_json = {}
+ register_status_model_json['status'] = 'created'
+ register_status_model_json['url'] = 'testString'
+
+ # Construct a model instance of RegisterStatus by calling from_dict on the json representation
+ register_status_model = RegisterStatus.from_dict(register_status_model_json)
+ assert register_status_model != False
+
+ # Construct a model instance of RegisterStatus by calling from_dict on the json representation
+ register_status_model_dict = RegisterStatus.from_dict(register_status_model_json).__dict__
+ register_status_model2 = RegisterStatus(**register_status_model_dict)
+
+ # Verify the model instances are equivalent
+ assert register_status_model == register_status_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ register_status_model_json2 = register_status_model.to_dict()
+ assert register_status_model_json2 == register_status_model_json
+
+
+class TestModel_SpeakerLabelsResult:
+ """
+ Test Class for SpeakerLabelsResult
+ """
+
+ def test_speaker_labels_result_serialization(self):
+ """
+ Test serialization/deserialization for SpeakerLabelsResult
+ """
+
+ # Construct a json representation of a SpeakerLabelsResult model
+ speaker_labels_result_model_json = {}
+ speaker_labels_result_model_json['from'] = 36.0
+ speaker_labels_result_model_json['to'] = 36.0
+ speaker_labels_result_model_json['speaker'] = 38
+ speaker_labels_result_model_json['confidence'] = 36.0
+ speaker_labels_result_model_json['final'] = True
+
+ # Construct a model instance of SpeakerLabelsResult by calling from_dict on the json representation
+ speaker_labels_result_model = SpeakerLabelsResult.from_dict(speaker_labels_result_model_json)
+ assert speaker_labels_result_model != False
+
+ # Construct a model instance of SpeakerLabelsResult by calling from_dict on the json representation
+ speaker_labels_result_model_dict = SpeakerLabelsResult.from_dict(speaker_labels_result_model_json).__dict__
+ speaker_labels_result_model2 = SpeakerLabelsResult(**speaker_labels_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speaker_labels_result_model == speaker_labels_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speaker_labels_result_model_json2 = speaker_labels_result_model.to_dict()
+ assert speaker_labels_result_model_json2 == speaker_labels_result_model_json
+
+
+class TestModel_SpeechModel:
+ """
+ Test Class for SpeechModel
+ """
+
+ def test_speech_model_serialization(self):
+ """
+ Test serialization/deserialization for SpeechModel
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ supported_features_model = {} # SupportedFeatures
+ supported_features_model['custom_language_model'] = True
+ supported_features_model['custom_acoustic_model'] = True
+ supported_features_model['speaker_labels'] = True
+ supported_features_model['low_latency'] = True
+
+ # Construct a json representation of a SpeechModel model
+ speech_model_model_json = {}
+ speech_model_model_json['name'] = 'testString'
+ speech_model_model_json['language'] = 'testString'
+ speech_model_model_json['rate'] = 38
+ speech_model_model_json['url'] = 'testString'
+ speech_model_model_json['supported_features'] = supported_features_model
+ speech_model_model_json['description'] = 'testString'
+
+ # Construct a model instance of SpeechModel by calling from_dict on the json representation
+ speech_model_model = SpeechModel.from_dict(speech_model_model_json)
+ assert speech_model_model != False
+
+ # Construct a model instance of SpeechModel by calling from_dict on the json representation
+ speech_model_model_dict = SpeechModel.from_dict(speech_model_model_json).__dict__
+ speech_model_model2 = SpeechModel(**speech_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speech_model_model == speech_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speech_model_model_json2 = speech_model_model.to_dict()
+ assert speech_model_model_json2 == speech_model_model_json
+
+
+class TestModel_SpeechModels:
+ """
+ Test Class for SpeechModels
+ """
+
+ def test_speech_models_serialization(self):
+ """
+ Test serialization/deserialization for SpeechModels
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ supported_features_model = {} # SupportedFeatures
+ supported_features_model['custom_language_model'] = True
+ supported_features_model['custom_acoustic_model'] = True
+ supported_features_model['speaker_labels'] = True
+ supported_features_model['low_latency'] = True
+
+ speech_model_model = {} # SpeechModel
+ speech_model_model['name'] = 'testString'
+ speech_model_model['language'] = 'testString'
+ speech_model_model['rate'] = 38
+ speech_model_model['url'] = 'testString'
+ speech_model_model['supported_features'] = supported_features_model
+ speech_model_model['description'] = 'testString'
+
+ # Construct a json representation of a SpeechModels model
+ speech_models_model_json = {}
+ speech_models_model_json['models'] = [speech_model_model]
+
+ # Construct a model instance of SpeechModels by calling from_dict on the json representation
+ speech_models_model = SpeechModels.from_dict(speech_models_model_json)
+ assert speech_models_model != False
+
+ # Construct a model instance of SpeechModels by calling from_dict on the json representation
+ speech_models_model_dict = SpeechModels.from_dict(speech_models_model_json).__dict__
+ speech_models_model2 = SpeechModels(**speech_models_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speech_models_model == speech_models_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speech_models_model_json2 = speech_models_model.to_dict()
+ assert speech_models_model_json2 == speech_models_model_json
+
+
+class TestModel_SpeechRecognitionAlternative:
+ """
+ Test Class for SpeechRecognitionAlternative
+ """
+
+ def test_speech_recognition_alternative_serialization(self):
+ """
+ Test serialization/deserialization for SpeechRecognitionAlternative
+ """
+
+ # Construct a json representation of a SpeechRecognitionAlternative model
+ speech_recognition_alternative_model_json = {}
+ speech_recognition_alternative_model_json['transcript'] = 'testString'
+ speech_recognition_alternative_model_json['confidence'] = 0
+ speech_recognition_alternative_model_json['timestamps'] = ['testString']
+ speech_recognition_alternative_model_json['word_confidence'] = ['testString']
+
+ # Construct a model instance of SpeechRecognitionAlternative by calling from_dict on the json representation
+ speech_recognition_alternative_model = SpeechRecognitionAlternative.from_dict(speech_recognition_alternative_model_json)
+ assert speech_recognition_alternative_model != False
+
+ # Construct a model instance of SpeechRecognitionAlternative by calling from_dict on the json representation
+ speech_recognition_alternative_model_dict = SpeechRecognitionAlternative.from_dict(speech_recognition_alternative_model_json).__dict__
+ speech_recognition_alternative_model2 = SpeechRecognitionAlternative(**speech_recognition_alternative_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speech_recognition_alternative_model == speech_recognition_alternative_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speech_recognition_alternative_model_json2 = speech_recognition_alternative_model.to_dict()
+ assert speech_recognition_alternative_model_json2 == speech_recognition_alternative_model_json
+
+
+class TestModel_SpeechRecognitionResult:
+ """
+ Test Class for SpeechRecognitionResult
+ """
+
+ def test_speech_recognition_result_serialization(self):
+ """
+ Test serialization/deserialization for SpeechRecognitionResult
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ speech_recognition_alternative_model = {} # SpeechRecognitionAlternative
+ speech_recognition_alternative_model['transcript'] = 'testString'
+ speech_recognition_alternative_model['confidence'] = 0
+ speech_recognition_alternative_model['timestamps'] = ['testString']
+ speech_recognition_alternative_model['word_confidence'] = ['testString']
+
+ keyword_result_model = {} # KeywordResult
+ keyword_result_model['normalized_text'] = 'testString'
+ keyword_result_model['start_time'] = 72.5
+ keyword_result_model['end_time'] = 72.5
+ keyword_result_model['confidence'] = 0
+
+ word_alternative_result_model = {} # WordAlternativeResult
+ word_alternative_result_model['confidence'] = 0
+ word_alternative_result_model['word'] = 'testString'
+
+ word_alternative_results_model = {} # WordAlternativeResults
+ word_alternative_results_model['start_time'] = 72.5
+ word_alternative_results_model['end_time'] = 72.5
+ word_alternative_results_model['alternatives'] = [word_alternative_result_model]
+
+ # Construct a json representation of a SpeechRecognitionResult model
+ speech_recognition_result_model_json = {}
+ speech_recognition_result_model_json['final'] = True
+ speech_recognition_result_model_json['alternatives'] = [speech_recognition_alternative_model]
+ speech_recognition_result_model_json['keywords_result'] = {'key1': [keyword_result_model]}
+ speech_recognition_result_model_json['word_alternatives'] = [word_alternative_results_model]
+ speech_recognition_result_model_json['end_of_utterance'] = 'end_of_data'
+
+ # Construct a model instance of SpeechRecognitionResult by calling from_dict on the json representation
+ speech_recognition_result_model = SpeechRecognitionResult.from_dict(speech_recognition_result_model_json)
+ assert speech_recognition_result_model != False
+
+ # Construct a model instance of SpeechRecognitionResult by calling from_dict on the json representation
+ speech_recognition_result_model_dict = SpeechRecognitionResult.from_dict(speech_recognition_result_model_json).__dict__
+ speech_recognition_result_model2 = SpeechRecognitionResult(**speech_recognition_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speech_recognition_result_model == speech_recognition_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speech_recognition_result_model_json2 = speech_recognition_result_model.to_dict()
+ assert speech_recognition_result_model_json2 == speech_recognition_result_model_json
+
+
+class TestModel_SpeechRecognitionResults:
+ """
+ Test Class for SpeechRecognitionResults
+ """
+
+ def test_speech_recognition_results_serialization(self):
+ """
+ Test serialization/deserialization for SpeechRecognitionResults
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ speech_recognition_alternative_model = {} # SpeechRecognitionAlternative
+ speech_recognition_alternative_model['transcript'] = 'testString'
+ speech_recognition_alternative_model['confidence'] = 0
+ speech_recognition_alternative_model['timestamps'] = ['testString']
+ speech_recognition_alternative_model['word_confidence'] = ['testString']
+
+ keyword_result_model = {} # KeywordResult
+ keyword_result_model['normalized_text'] = 'testString'
+ keyword_result_model['start_time'] = 72.5
+ keyword_result_model['end_time'] = 72.5
+ keyword_result_model['confidence'] = 0
+
+ word_alternative_result_model = {} # WordAlternativeResult
+ word_alternative_result_model['confidence'] = 0
+ word_alternative_result_model['word'] = 'testString'
+
+ word_alternative_results_model = {} # WordAlternativeResults
+ word_alternative_results_model['start_time'] = 72.5
+ word_alternative_results_model['end_time'] = 72.5
+ word_alternative_results_model['alternatives'] = [word_alternative_result_model]
+
+ speech_recognition_result_model = {} # SpeechRecognitionResult
+ speech_recognition_result_model['final'] = True
+ speech_recognition_result_model['alternatives'] = [speech_recognition_alternative_model]
+ speech_recognition_result_model['keywords_result'] = {'key1': [keyword_result_model]}
+ speech_recognition_result_model['word_alternatives'] = [word_alternative_results_model]
+ speech_recognition_result_model['end_of_utterance'] = 'end_of_data'
+
+ speaker_labels_result_model = {} # SpeakerLabelsResult
+ speaker_labels_result_model['from'] = 36.0
+ speaker_labels_result_model['to'] = 36.0
+ speaker_labels_result_model['speaker'] = 38
+ speaker_labels_result_model['confidence'] = 36.0
+ speaker_labels_result_model['final'] = True
+
+ processed_audio_model = {} # ProcessedAudio
+ processed_audio_model['received'] = 36.0
+ processed_audio_model['seen_by_engine'] = 36.0
+ processed_audio_model['transcription'] = 36.0
+ processed_audio_model['speaker_labels'] = 36.0
+
+ processing_metrics_model = {} # ProcessingMetrics
+ processing_metrics_model['processed_audio'] = processed_audio_model
+ processing_metrics_model['wall_clock_since_first_byte_received'] = 36.0
+ processing_metrics_model['periodic'] = True
+
+ audio_metrics_histogram_bin_model = {} # AudioMetricsHistogramBin
+ audio_metrics_histogram_bin_model['begin'] = 36.0
+ audio_metrics_histogram_bin_model['end'] = 36.0
+ audio_metrics_histogram_bin_model['count'] = 38
+
+ audio_metrics_details_model = {} # AudioMetricsDetails
+ audio_metrics_details_model['final'] = True
+ audio_metrics_details_model['end_time'] = 36.0
+ audio_metrics_details_model['signal_to_noise_ratio'] = 36.0
+ audio_metrics_details_model['speech_ratio'] = 36.0
+ audio_metrics_details_model['high_frequency_loss'] = 36.0
+ audio_metrics_details_model['direct_current_offset'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['clipping_rate'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['speech_level'] = [audio_metrics_histogram_bin_model]
+ audio_metrics_details_model['non_speech_level'] = [audio_metrics_histogram_bin_model]
+
+ audio_metrics_model = {} # AudioMetrics
+ audio_metrics_model['sampling_interval'] = 36.0
+ audio_metrics_model['accumulated'] = audio_metrics_details_model
+
+ enriched_results_transcript_timestamp_model = {} # EnrichedResultsTranscriptTimestamp
+ enriched_results_transcript_timestamp_model['from'] = 36.0
+ enriched_results_transcript_timestamp_model['to'] = 36.0
+
+ enriched_results_transcript_model = {} # EnrichedResultsTranscript
+ enriched_results_transcript_model['text'] = 'testString'
+ enriched_results_transcript_model['timestamp'] = enriched_results_transcript_timestamp_model
+
+ enriched_results_model = {} # EnrichedResults
+ enriched_results_model['transcript'] = enriched_results_transcript_model
+ enriched_results_model['status'] = 'testString'
+
+ # Construct a json representation of a SpeechRecognitionResults model
+ speech_recognition_results_model_json = {}
+ speech_recognition_results_model_json['results'] = [speech_recognition_result_model]
+ speech_recognition_results_model_json['result_index'] = 38
+ speech_recognition_results_model_json['speaker_labels'] = [speaker_labels_result_model]
+ speech_recognition_results_model_json['processing_metrics'] = processing_metrics_model
+ speech_recognition_results_model_json['audio_metrics'] = audio_metrics_model
+ speech_recognition_results_model_json['warnings'] = ['testString']
+ speech_recognition_results_model_json['enriched_results'] = enriched_results_model
+
+ # Construct a model instance of SpeechRecognitionResults by calling from_dict on the json representation
+ speech_recognition_results_model = SpeechRecognitionResults.from_dict(speech_recognition_results_model_json)
+ assert speech_recognition_results_model != False
+
+ # Construct a model instance of SpeechRecognitionResults by calling from_dict on the json representation
+ speech_recognition_results_model_dict = SpeechRecognitionResults.from_dict(speech_recognition_results_model_json).__dict__
+ speech_recognition_results_model2 = SpeechRecognitionResults(**speech_recognition_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speech_recognition_results_model == speech_recognition_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speech_recognition_results_model_json2 = speech_recognition_results_model.to_dict()
+ assert speech_recognition_results_model_json2 == speech_recognition_results_model_json
+
+
+class TestModel_SupportedFeatures:
+ """
+ Test Class for SupportedFeatures
+ """
+
+ def test_supported_features_serialization(self):
+ """
+ Test serialization/deserialization for SupportedFeatures
+ """
+
+ # Construct a json representation of a SupportedFeatures model
+ supported_features_model_json = {}
+ supported_features_model_json['custom_language_model'] = True
+ supported_features_model_json['custom_acoustic_model'] = True
+ supported_features_model_json['speaker_labels'] = True
+ supported_features_model_json['low_latency'] = True
+
+ # Construct a model instance of SupportedFeatures by calling from_dict on the json representation
+ supported_features_model = SupportedFeatures.from_dict(supported_features_model_json)
+ assert supported_features_model != False
+
+ # Construct a model instance of SupportedFeatures by calling from_dict on the json representation
+ supported_features_model_dict = SupportedFeatures.from_dict(supported_features_model_json).__dict__
+ supported_features_model2 = SupportedFeatures(**supported_features_model_dict)
+
+ # Verify the model instances are equivalent
+ assert supported_features_model == supported_features_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ supported_features_model_json2 = supported_features_model.to_dict()
+ assert supported_features_model_json2 == supported_features_model_json
+
+
+class TestModel_TrainingResponse:
+ """
+ Test Class for TrainingResponse
+ """
+
+ def test_training_response_serialization(self):
+ """
+ Test serialization/deserialization for TrainingResponse
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ training_warning_model = {} # TrainingWarning
+ training_warning_model['code'] = 'invalid_audio_files'
+ training_warning_model['message'] = 'testString'
+
+ # Construct a json representation of a TrainingResponse model
+ training_response_model_json = {}
+ training_response_model_json['warnings'] = [training_warning_model]
+
+ # Construct a model instance of TrainingResponse by calling from_dict on the json representation
+ training_response_model = TrainingResponse.from_dict(training_response_model_json)
+ assert training_response_model != False
+
+ # Construct a model instance of TrainingResponse by calling from_dict on the json representation
+ training_response_model_dict = TrainingResponse.from_dict(training_response_model_json).__dict__
+ training_response_model2 = TrainingResponse(**training_response_model_dict)
+
+ # Verify the model instances are equivalent
+ assert training_response_model == training_response_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ training_response_model_json2 = training_response_model.to_dict()
+ assert training_response_model_json2 == training_response_model_json
+
+
+class TestModel_TrainingWarning:
+ """
+ Test Class for TrainingWarning
+ """
+
+ def test_training_warning_serialization(self):
+ """
+ Test serialization/deserialization for TrainingWarning
+ """
+
+ # Construct a json representation of a TrainingWarning model
+ training_warning_model_json = {}
+ training_warning_model_json['code'] = 'invalid_audio_files'
+ training_warning_model_json['message'] = 'testString'
+
+ # Construct a model instance of TrainingWarning by calling from_dict on the json representation
+ training_warning_model = TrainingWarning.from_dict(training_warning_model_json)
+ assert training_warning_model != False
+
+ # Construct a model instance of TrainingWarning by calling from_dict on the json representation
+ training_warning_model_dict = TrainingWarning.from_dict(training_warning_model_json).__dict__
+ training_warning_model2 = TrainingWarning(**training_warning_model_dict)
+
+ # Verify the model instances are equivalent
+ assert training_warning_model == training_warning_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ training_warning_model_json2 = training_warning_model.to_dict()
+ assert training_warning_model_json2 == training_warning_model_json
+
+
+class TestModel_Word:
+ """
+ Test Class for Word
+ """
+
+ def test_word_serialization(self):
+ """
+ Test serialization/deserialization for Word
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ word_error_model = {} # WordError
+ word_error_model['element'] = 'testString'
+
+ # Construct a json representation of a Word model
+ word_model_json = {}
+ word_model_json['word'] = 'testString'
+ word_model_json['mapping_only'] = ['testString']
+ word_model_json['sounds_like'] = ['testString']
+ word_model_json['display_as'] = 'testString'
+ word_model_json['count'] = 38
+ word_model_json['source'] = ['testString']
+ word_model_json['error'] = [word_error_model]
+
+ # Construct a model instance of Word by calling from_dict on the json representation
+ word_model = Word.from_dict(word_model_json)
+ assert word_model != False
+
+ # Construct a model instance of Word by calling from_dict on the json representation
+ word_model_dict = Word.from_dict(word_model_json).__dict__
+ word_model2 = Word(**word_model_dict)
+
+ # Verify the model instances are equivalent
+ assert word_model == word_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ word_model_json2 = word_model.to_dict()
+ assert word_model_json2 == word_model_json
+
+
+class TestModel_WordAlternativeResult:
+ """
+ Test Class for WordAlternativeResult
+ """
+
+ def test_word_alternative_result_serialization(self):
+ """
+ Test serialization/deserialization for WordAlternativeResult
+ """
+
+ # Construct a json representation of a WordAlternativeResult model
+ word_alternative_result_model_json = {}
+ word_alternative_result_model_json['confidence'] = 0
+ word_alternative_result_model_json['word'] = 'testString'
+
+ # Construct a model instance of WordAlternativeResult by calling from_dict on the json representation
+ word_alternative_result_model = WordAlternativeResult.from_dict(word_alternative_result_model_json)
+ assert word_alternative_result_model != False
+
+ # Construct a model instance of WordAlternativeResult by calling from_dict on the json representation
+ word_alternative_result_model_dict = WordAlternativeResult.from_dict(word_alternative_result_model_json).__dict__
+ word_alternative_result_model2 = WordAlternativeResult(**word_alternative_result_model_dict)
+
+ # Verify the model instances are equivalent
+ assert word_alternative_result_model == word_alternative_result_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ word_alternative_result_model_json2 = word_alternative_result_model.to_dict()
+ assert word_alternative_result_model_json2 == word_alternative_result_model_json
+
+
+class TestModel_WordAlternativeResults:
+ """
+ Test Class for WordAlternativeResults
+ """
+
+ def test_word_alternative_results_serialization(self):
+ """
+ Test serialization/deserialization for WordAlternativeResults
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ word_alternative_result_model = {} # WordAlternativeResult
+ word_alternative_result_model['confidence'] = 0
+ word_alternative_result_model['word'] = 'testString'
+
+ # Construct a json representation of a WordAlternativeResults model
+ word_alternative_results_model_json = {}
+ word_alternative_results_model_json['start_time'] = 72.5
+ word_alternative_results_model_json['end_time'] = 72.5
+ word_alternative_results_model_json['alternatives'] = [word_alternative_result_model]
+
+ # Construct a model instance of WordAlternativeResults by calling from_dict on the json representation
+ word_alternative_results_model = WordAlternativeResults.from_dict(word_alternative_results_model_json)
+ assert word_alternative_results_model != False
+
+ # Construct a model instance of WordAlternativeResults by calling from_dict on the json representation
+ word_alternative_results_model_dict = WordAlternativeResults.from_dict(word_alternative_results_model_json).__dict__
+ word_alternative_results_model2 = WordAlternativeResults(**word_alternative_results_model_dict)
+
+ # Verify the model instances are equivalent
+ assert word_alternative_results_model == word_alternative_results_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ word_alternative_results_model_json2 = word_alternative_results_model.to_dict()
+ assert word_alternative_results_model_json2 == word_alternative_results_model_json
+
+
+class TestModel_WordError:
+ """
+ Test Class for WordError
+ """
+
+ def test_word_error_serialization(self):
+ """
+ Test serialization/deserialization for WordError
+ """
+
+ # Construct a json representation of a WordError model
+ word_error_model_json = {}
+ word_error_model_json['element'] = 'testString'
+
+ # Construct a model instance of WordError by calling from_dict on the json representation
+ word_error_model = WordError.from_dict(word_error_model_json)
+ assert word_error_model != False
+
+ # Construct a model instance of WordError by calling from_dict on the json representation
+ word_error_model_dict = WordError.from_dict(word_error_model_json).__dict__
+ word_error_model2 = WordError(**word_error_model_dict)
+
+ # Verify the model instances are equivalent
+ assert word_error_model == word_error_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ word_error_model_json2 = word_error_model.to_dict()
+ assert word_error_model_json2 == word_error_model_json
+
+
+class TestModel_Words:
+ """
+ Test Class for Words
+ """
+
+ def test_words_serialization(self):
+ """
+ Test serialization/deserialization for Words
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ word_error_model = {} # WordError
+ word_error_model['element'] = 'testString'
+
+ word_model = {} # Word
+ word_model['word'] = 'testString'
+ word_model['mapping_only'] = ['testString']
+ word_model['sounds_like'] = ['testString']
+ word_model['display_as'] = 'testString'
+ word_model['count'] = 38
+ word_model['source'] = ['testString']
+ word_model['error'] = [word_error_model]
+
+ # Construct a json representation of a Words model
+ words_model_json = {}
+ words_model_json['words'] = [word_model]
+
+ # Construct a model instance of Words by calling from_dict on the json representation
+ words_model = Words.from_dict(words_model_json)
+ assert words_model != False
+
+ # Construct a model instance of Words by calling from_dict on the json representation
+ words_model_dict = Words.from_dict(words_model_json).__dict__
+ words_model2 = Words(**words_model_dict)
+
+ # Verify the model instances are equivalent
+ assert words_model == words_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ words_model_json2 = words_model.to_dict()
+ assert words_model_json2 == words_model_json
+
+
+# endregion
+##############################################################################
+# End of Model Tests
+##############################################################################
diff --git a/test/unit/test_text_to_speech_v1.py b/test/unit/test_text_to_speech_v1.py
index c18f5e629..e151e4503 100644
--- a/test/unit/test_text_to_speech_v1.py
+++ b/test/unit/test_text_to_speech_v1.py
@@ -1,245 +1,2889 @@
-# coding=utf-8
-import responses
-import ibm_watson
+# -*- coding: utf-8 -*-
+# (C) Copyright IBM Corp. 2015, 2024.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit Tests for TextToSpeechV1
+"""
+
+from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
+import inspect
+import io
import json
+import pytest
+import re
+import requests
+import responses
+import tempfile
+import urllib
+from ibm_watson.text_to_speech_v1 import *
+
+
+_service = TextToSpeechV1(
+ authenticator=NoAuthAuthenticator()
+)
+
+_base_url = 'https://api.us-south.text-to-speech.watson.cloud.ibm.com'
+_service.set_service_url(_base_url)
+
+
+def preprocess_url(operation_path: str):
+ """
+ Returns the request url associated with the specified operation path.
+ This will be base_url concatenated with a quoted version of operation_path.
+ The returned request URL is used to register the mock response so it needs
+ to match the request URL that is formed by the requests library.
+ """
+
+ # Form the request URL from the base URL and operation path.
+ request_url = _base_url + operation_path
+
+ # If the request url does NOT end with a /, then just return it as-is.
+ # Otherwise, return a regular expression that matches one or more trailing /.
+ if not request_url.endswith('/'):
+ return request_url
+ return re.compile(request_url.rstrip('/') + '/+')
+
+
+##############################################################################
+# Start of Service: Voices
+##############################################################################
+# region
+
+
+class TestListVoices:
+ """
+ Test Class for list_voices
+ """
+
+ @responses.activate
+ def test_list_voices_all_params(self):
+ """
+ list_voices()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/voices')
+ mock_response = '{"voices": [{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_voices()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_voices_all_params_with_retries(self):
+ # Enable retries and run test_list_voices_all_params.
+ _service.enable_retries()
+ self.test_list_voices_all_params()
+
+ # Disable retries and run test_list_voices_all_params.
+ _service.disable_retries()
+ self.test_list_voices_all_params()
+
+
+class TestGetVoice:
+ """
+ Test Class for get_voice
+ """
+
+ @responses.activate
+ def test_get_voice_all_params(self):
+ """
+ get_voice()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/voices/de-DE_BirgitV3Voice')
+ mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ voice = 'de-DE_BirgitV3Voice'
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.get_voice(
+ voice,
+ customization_id=customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'customization_id={}'.format(customization_id) in query_string
+
+ def test_get_voice_all_params_with_retries(self):
+ # Enable retries and run test_get_voice_all_params.
+ _service.enable_retries()
+ self.test_get_voice_all_params()
+
+ # Disable retries and run test_get_voice_all_params.
+ _service.disable_retries()
+ self.test_get_voice_all_params()
+
+ @responses.activate
+ def test_get_voice_required_params(self):
+ """
+ test_get_voice_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/voices/de-DE_BirgitV3Voice')
+ mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ voice = 'de-DE_BirgitV3Voice'
+
+ # Invoke method
+ response = _service.get_voice(
+ voice,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_voice_required_params_with_retries(self):
+ # Enable retries and run test_get_voice_required_params.
+ _service.enable_retries()
+ self.test_get_voice_required_params()
+
+ # Disable retries and run test_get_voice_required_params.
+ _service.disable_retries()
+ self.test_get_voice_required_params()
+
+ @responses.activate
+ def test_get_voice_value_error(self):
+ """
+ test_get_voice_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/voices/de-DE_BirgitV3Voice')
+ mock_response = '{"url": "url", "gender": "gender", "name": "name", "language": "language", "description": "description", "customizable": true, "supported_features": {"custom_pronunciation": true, "voice_transformation": true}, "customization": {"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ voice = 'de-DE_BirgitV3Voice'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "voice": voice,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_voice(**req_copy)
+
+ def test_get_voice_value_error_with_retries(self):
+ # Enable retries and run test_get_voice_value_error.
+ _service.enable_retries()
+ self.test_get_voice_value_error()
+
+ # Disable retries and run test_get_voice_value_error.
+ _service.disable_retries()
+ self.test_get_voice_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Voices
+##############################################################################
+
+##############################################################################
+# Start of Service: Synthesis
+##############################################################################
+# region
+
+
+class TestSynthesize:
+ """
+ Test Class for synthesize
+ """
+
+ @responses.activate
+ def test_synthesize_all_params(self):
+ """
+ synthesize()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/synthesize')
+ mock_response = 'This is a mock binary response.'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='audio/alaw',
+ status=200,
+ )
+
+ # Set up parameter values
+ text = 'testString'
+ accept = 'audio/ogg;codecs=opus'
+ voice = 'en-US_MichaelV3Voice'
+ customization_id = 'testString'
+ spell_out_mode = 'default'
+ rate_percentage = 0
+ pitch_percentage = 0
+
+ # Invoke method
+ response = _service.synthesize(
+ text,
+ accept=accept,
+ voice=voice,
+ customization_id=customization_id,
+ spell_out_mode=spell_out_mode,
+ rate_percentage=rate_percentage,
+ pitch_percentage=pitch_percentage,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'voice={}'.format(voice) in query_string
+ assert 'customization_id={}'.format(customization_id) in query_string
+ assert 'spell_out_mode={}'.format(spell_out_mode) in query_string
+ assert 'rate_percentage={}'.format(rate_percentage) in query_string
+ assert 'pitch_percentage={}'.format(pitch_percentage) in query_string
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+
+ def test_synthesize_all_params_with_retries(self):
+ # Enable retries and run test_synthesize_all_params.
+ _service.enable_retries()
+ self.test_synthesize_all_params()
+
+ # Disable retries and run test_synthesize_all_params.
+ _service.disable_retries()
+ self.test_synthesize_all_params()
+
+ @responses.activate
+ def test_synthesize_required_params(self):
+ """
+ test_synthesize_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/synthesize')
+ mock_response = 'This is a mock binary response.'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='audio/alaw',
+ status=200,
+ )
+
+ # Set up parameter values
+ text = 'testString'
+
+ # Invoke method
+ response = _service.synthesize(
+ text,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['text'] == 'testString'
+
+ def test_synthesize_required_params_with_retries(self):
+ # Enable retries and run test_synthesize_required_params.
+ _service.enable_retries()
+ self.test_synthesize_required_params()
+
+ # Disable retries and run test_synthesize_required_params.
+ _service.disable_retries()
+ self.test_synthesize_required_params()
+
+ @responses.activate
+ def test_synthesize_value_error(self):
+ """
+ test_synthesize_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/synthesize')
+ mock_response = 'This is a mock binary response.'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='audio/alaw',
+ status=200,
+ )
+
+ # Set up parameter values
+ text = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "text": text,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.synthesize(**req_copy)
+
+ def test_synthesize_value_error_with_retries(self):
+ # Enable retries and run test_synthesize_value_error.
+ _service.enable_retries()
+ self.test_synthesize_value_error()
+
+ # Disable retries and run test_synthesize_value_error.
+ _service.disable_retries()
+ self.test_synthesize_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Synthesis
+##############################################################################
+
+##############################################################################
+# Start of Service: Pronunciation
+##############################################################################
+# region
+
+
+class TestGetPronunciation:
+ """
+ Test Class for get_pronunciation
+ """
+
+ @responses.activate
+ def test_get_pronunciation_all_params(self):
+ """
+ get_pronunciation()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/pronunciation')
+ mock_response = '{"pronunciation": "pronunciation"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ text = 'testString'
+ voice = 'en-US_MichaelV3Voice'
+ format = 'ipa'
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.get_pronunciation(
+ text,
+ voice=voice,
+ format=format,
+ customization_id=customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'text={}'.format(text) in query_string
+ assert 'voice={}'.format(voice) in query_string
+ assert 'format={}'.format(format) in query_string
+ assert 'customization_id={}'.format(customization_id) in query_string
+
+ def test_get_pronunciation_all_params_with_retries(self):
+ # Enable retries and run test_get_pronunciation_all_params.
+ _service.enable_retries()
+ self.test_get_pronunciation_all_params()
+
+ # Disable retries and run test_get_pronunciation_all_params.
+ _service.disable_retries()
+ self.test_get_pronunciation_all_params()
+
+ @responses.activate
+ def test_get_pronunciation_required_params(self):
+ """
+ test_get_pronunciation_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/pronunciation')
+ mock_response = '{"pronunciation": "pronunciation"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ text = 'testString'
+
+ # Invoke method
+ response = _service.get_pronunciation(
+ text,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'text={}'.format(text) in query_string
+
+ def test_get_pronunciation_required_params_with_retries(self):
+ # Enable retries and run test_get_pronunciation_required_params.
+ _service.enable_retries()
+ self.test_get_pronunciation_required_params()
+
+ # Disable retries and run test_get_pronunciation_required_params.
+ _service.disable_retries()
+ self.test_get_pronunciation_required_params()
+
+ @responses.activate
+ def test_get_pronunciation_value_error(self):
+ """
+ test_get_pronunciation_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/pronunciation')
+ mock_response = '{"pronunciation": "pronunciation"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ text = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "text": text,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_pronunciation(**req_copy)
+
+ def test_get_pronunciation_value_error_with_retries(self):
+ # Enable retries and run test_get_pronunciation_value_error.
+ _service.enable_retries()
+ self.test_get_pronunciation_value_error()
+
+ # Disable retries and run test_get_pronunciation_value_error.
+ _service.disable_retries()
+ self.test_get_pronunciation_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: Pronunciation
+##############################################################################
+
+##############################################################################
+# Start of Service: CustomModels
+##############################################################################
+# region
+
+
+class TestCreateCustomModel:
+ """
+ Test Class for create_custom_model
+ """
+
+ @responses.activate
+ def test_create_custom_model_all_params(self):
+ """
+ create_custom_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations')
+ mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ name = 'testString'
+ language = 'en-US'
+ description = 'testString'
+
+ # Invoke method
+ response = _service.create_custom_model(
+ name,
+ language=language,
+ description=description,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['language'] == 'en-US'
+ assert req_body['description'] == 'testString'
+
+ def test_create_custom_model_all_params_with_retries(self):
+ # Enable retries and run test_create_custom_model_all_params.
+ _service.enable_retries()
+ self.test_create_custom_model_all_params()
+
+ # Disable retries and run test_create_custom_model_all_params.
+ _service.disable_retries()
+ self.test_create_custom_model_all_params()
+
+ @responses.activate
+ def test_create_custom_model_value_error(self):
+ """
+ test_create_custom_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations')
+ mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ name = 'testString'
+ language = 'en-US'
+ description = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "name": name,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_custom_model(**req_copy)
+
+ def test_create_custom_model_value_error_with_retries(self):
+ # Enable retries and run test_create_custom_model_value_error.
+ _service.enable_retries()
+ self.test_create_custom_model_value_error()
+
+ # Disable retries and run test_create_custom_model_value_error.
+ _service.disable_retries()
+ self.test_create_custom_model_value_error()
+
+
+class TestListCustomModels:
+ """
+ Test Class for list_custom_models
+ """
+
+ @responses.activate
+ def test_list_custom_models_all_params(self):
+ """
+ list_custom_models()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations')
+ mock_response = '{"customizations": [{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ language = 'de-DE'
+
+ # Invoke method
+ response = _service.list_custom_models(
+ language=language,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'language={}'.format(language) in query_string
+
+ def test_list_custom_models_all_params_with_retries(self):
+ # Enable retries and run test_list_custom_models_all_params.
+ _service.enable_retries()
+ self.test_list_custom_models_all_params()
+
+ # Disable retries and run test_list_custom_models_all_params.
+ _service.disable_retries()
+ self.test_list_custom_models_all_params()
+
+ @responses.activate
+ def test_list_custom_models_required_params(self):
+ """
+ test_list_custom_models_required_params()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations')
+ mock_response = '{"customizations": [{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_custom_models()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_custom_models_required_params_with_retries(self):
+ # Enable retries and run test_list_custom_models_required_params.
+ _service.enable_retries()
+ self.test_list_custom_models_required_params()
+
+ # Disable retries and run test_list_custom_models_required_params.
+ _service.disable_retries()
+ self.test_list_custom_models_required_params()
+
+
+class TestUpdateCustomModel:
+ """
+ Test Class for update_custom_model
+ """
+
+ @responses.activate
+ def test_update_custom_model_all_params(self):
+ """
+ update_custom_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Construct a dict representation of a Word model
+ word_model = {}
+ word_model['word'] = 'testString'
+ word_model['translation'] = 'testString'
+ word_model['part_of_speech'] = 'Dosi'
+
+ # Set up parameter values
+ customization_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ words = [word_model]
+
+ # Invoke method
+ response = _service.update_custom_model(
+ customization_id,
+ name=name,
+ description=description,
+ words=words,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['name'] == 'testString'
+ assert req_body['description'] == 'testString'
+ assert req_body['words'] == [word_model]
+
+ def test_update_custom_model_all_params_with_retries(self):
+ # Enable retries and run test_update_custom_model_all_params.
+ _service.enable_retries()
+ self.test_update_custom_model_all_params()
+
+ # Disable retries and run test_update_custom_model_all_params.
+ _service.disable_retries()
+ self.test_update_custom_model_all_params()
+
+ @responses.activate
+ def test_update_custom_model_value_error(self):
+ """
+ test_update_custom_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Construct a dict representation of a Word model
+ word_model = {}
+ word_model['word'] = 'testString'
+ word_model['translation'] = 'testString'
+ word_model['part_of_speech'] = 'Dosi'
+
+ # Set up parameter values
+ customization_id = 'testString'
+ name = 'testString'
+ description = 'testString'
+ words = [word_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.update_custom_model(**req_copy)
+
+ def test_update_custom_model_value_error_with_retries(self):
+ # Enable retries and run test_update_custom_model_value_error.
+ _service.enable_retries()
+ self.test_update_custom_model_value_error()
+
+ # Disable retries and run test_update_custom_model_value_error.
+ _service.disable_retries()
+ self.test_update_custom_model_value_error()
+
+
+class TestGetCustomModel:
+ """
+ Test Class for get_custom_model
+ """
+
+ @responses.activate
+ def test_get_custom_model_all_params(self):
+ """
+ get_custom_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.get_custom_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_custom_model_all_params_with_retries(self):
+ # Enable retries and run test_get_custom_model_all_params.
+ _service.enable_retries()
+ self.test_get_custom_model_all_params()
+
+ # Disable retries and run test_get_custom_model_all_params.
+ _service.disable_retries()
+ self.test_get_custom_model_all_params()
+
+ @responses.activate
+ def test_get_custom_model_value_error(self):
+ """
+ test_get_custom_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ mock_response = '{"customization_id": "customization_id", "name": "name", "language": "language", "owner": "owner", "created": "created", "last_modified": "last_modified", "description": "description", "words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}], "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_custom_model(**req_copy)
+
+ def test_get_custom_model_value_error_with_retries(self):
+ # Enable retries and run test_get_custom_model_value_error.
+ _service.enable_retries()
+ self.test_get_custom_model_value_error()
+
+ # Disable retries and run test_get_custom_model_value_error.
+ _service.disable_retries()
+ self.test_get_custom_model_value_error()
+
+
+class TestDeleteCustomModel:
+ """
+ Test Class for delete_custom_model
+ """
+
+ @responses.activate
+ def test_delete_custom_model_all_params(self):
+ """
+ delete_custom_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_custom_model(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_custom_model_all_params_with_retries(self):
+ # Enable retries and run test_delete_custom_model_all_params.
+ _service.enable_retries()
+ self.test_delete_custom_model_all_params()
+
+ # Disable retries and run test_delete_custom_model_all_params.
+ _service.disable_retries()
+ self.test_delete_custom_model_all_params()
+
+ @responses.activate
+ def test_delete_custom_model_value_error(self):
+ """
+ test_delete_custom_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_custom_model(**req_copy)
+
+ def test_delete_custom_model_value_error_with_retries(self):
+ # Enable retries and run test_delete_custom_model_value_error.
+ _service.enable_retries()
+ self.test_delete_custom_model_value_error()
+
+ # Disable retries and run test_delete_custom_model_value_error.
+ _service.disable_retries()
+ self.test_delete_custom_model_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: CustomModels
+##############################################################################
+
+##############################################################################
+# Start of Service: CustomWords
+##############################################################################
+# region
+
+
+class TestAddWords:
+ """
+ Test Class for add_words
+ """
+
+ @responses.activate
+ def test_add_words_all_params(self):
+ """
+ add_words()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Construct a dict representation of a Word model
+ word_model = {}
+ word_model['word'] = 'testString'
+ word_model['translation'] = 'testString'
+ word_model['part_of_speech'] = 'Dosi'
+
+ # Set up parameter values
+ customization_id = 'testString'
+ words = [word_model]
+
+ # Invoke method
+ response = _service.add_words(
+ customization_id,
+ words,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['words'] == [word_model]
+
+ def test_add_words_all_params_with_retries(self):
+ # Enable retries and run test_add_words_all_params.
+ _service.enable_retries()
+ self.test_add_words_all_params()
+
+ # Disable retries and run test_add_words_all_params.
+ _service.disable_retries()
+ self.test_add_words_all_params()
+
+ @responses.activate
+ def test_add_words_value_error(self):
+ """
+ test_add_words_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words')
+ responses.add(
+ responses.POST,
+ url,
+ status=200,
+ )
+
+ # Construct a dict representation of a Word model
+ word_model = {}
+ word_model['word'] = 'testString'
+ word_model['translation'] = 'testString'
+ word_model['part_of_speech'] = 'Dosi'
+
+ # Set up parameter values
+ customization_id = 'testString'
+ words = [word_model]
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "words": words,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.add_words(**req_copy)
+
+ def test_add_words_value_error_with_retries(self):
+ # Enable retries and run test_add_words_value_error.
+ _service.enable_retries()
+ self.test_add_words_value_error()
+
+ # Disable retries and run test_add_words_value_error.
+ _service.disable_retries()
+ self.test_add_words_value_error()
+
+
+class TestListWords:
+ """
+ Test Class for list_words
+ """
+
+ @responses.activate
+ def test_list_words_all_params(self):
+ """
+ list_words()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words')
+ mock_response = '{"words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.list_words(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_words_all_params_with_retries(self):
+ # Enable retries and run test_list_words_all_params.
+ _service.enable_retries()
+ self.test_list_words_all_params()
+
+ # Disable retries and run test_list_words_all_params.
+ _service.disable_retries()
+ self.test_list_words_all_params()
+
+ @responses.activate
+ def test_list_words_value_error(self):
+ """
+ test_list_words_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words')
+ mock_response = '{"words": [{"word": "word", "translation": "translation", "part_of_speech": "Dosi"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_words(**req_copy)
+
+ def test_list_words_value_error_with_retries(self):
+ # Enable retries and run test_list_words_value_error.
+ _service.enable_retries()
+ self.test_list_words_value_error()
+
+ # Disable retries and run test_list_words_value_error.
+ _service.disable_retries()
+ self.test_list_words_value_error()
+
+
+class TestAddWord:
+ """
+ Test Class for add_word
+ """
+
+ @responses.activate
+ def test_add_word_all_params(self):
+ """
+ add_word()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ responses.add(
+ responses.PUT,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word = 'testString'
+ translation = 'testString'
+ part_of_speech = 'Dosi'
+
+ # Invoke method
+ response = _service.add_word(
+ customization_id,
+ word,
+ translation,
+ part_of_speech=part_of_speech,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate body params
+ req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
+ assert req_body['translation'] == 'testString'
+ assert req_body['part_of_speech'] == 'Dosi'
+
+ def test_add_word_all_params_with_retries(self):
+ # Enable retries and run test_add_word_all_params.
+ _service.enable_retries()
+ self.test_add_word_all_params()
+
+ # Disable retries and run test_add_word_all_params.
+ _service.disable_retries()
+ self.test_add_word_all_params()
+
+ @responses.activate
+ def test_add_word_value_error(self):
+ """
+ test_add_word_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ responses.add(
+ responses.PUT,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word = 'testString'
+ translation = 'testString'
+ part_of_speech = 'Dosi'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "word": word,
+ "translation": translation,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.add_word(**req_copy)
+
+ def test_add_word_value_error_with_retries(self):
+ # Enable retries and run test_add_word_value_error.
+ _service.enable_retries()
+ self.test_add_word_value_error()
+
+ # Disable retries and run test_add_word_value_error.
+ _service.disable_retries()
+ self.test_add_word_value_error()
+
+
+class TestGetWord:
+ """
+ Test Class for get_word
+ """
+
+ @responses.activate
+ def test_get_word_all_params(self):
+ """
+ get_word()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ mock_response = '{"translation": "translation", "part_of_speech": "Dosi"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word = 'testString'
+
+ # Invoke method
+ response = _service.get_word(
+ customization_id,
+ word,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_word_all_params_with_retries(self):
+ # Enable retries and run test_get_word_all_params.
+ _service.enable_retries()
+ self.test_get_word_all_params()
+
+ # Disable retries and run test_get_word_all_params.
+ _service.disable_retries()
+ self.test_get_word_all_params()
+
+ @responses.activate
+ def test_get_word_value_error(self):
+ """
+ test_get_word_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ mock_response = '{"translation": "translation", "part_of_speech": "Dosi"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "word": word,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_word(**req_copy)
+
+ def test_get_word_value_error_with_retries(self):
+ # Enable retries and run test_get_word_value_error.
+ _service.enable_retries()
+ self.test_get_word_value_error()
+
+ # Disable retries and run test_get_word_value_error.
+ _service.disable_retries()
+ self.test_get_word_value_error()
+
+
+class TestDeleteWord:
+ """
+ Test Class for delete_word
+ """
+
+ @responses.activate
+ def test_delete_word_all_params(self):
+ """
+ delete_word()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word = 'testString'
+
+ # Invoke method
+ response = _service.delete_word(
+ customization_id,
+ word,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_word_all_params_with_retries(self):
+ # Enable retries and run test_delete_word_all_params.
+ _service.enable_retries()
+ self.test_delete_word_all_params()
+
+ # Disable retries and run test_delete_word_all_params.
+ _service.disable_retries()
+ self.test_delete_word_all_params()
+
+ @responses.activate
+ def test_delete_word_value_error(self):
+ """
+ test_delete_word_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/words/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ word = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "word": word,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_word(**req_copy)
+
+ def test_delete_word_value_error_with_retries(self):
+ # Enable retries and run test_delete_word_value_error.
+ _service.enable_retries()
+ self.test_delete_word_value_error()
+
+ # Disable retries and run test_delete_word_value_error.
+ _service.disable_retries()
+ self.test_delete_word_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: CustomWords
+##############################################################################
+
+##############################################################################
+# Start of Service: CustomPrompts
+##############################################################################
+# region
+
+
+class TestListCustomPrompts:
+ """
+ Test Class for list_custom_prompts
+ """
+
+ @responses.activate
+ def test_list_custom_prompts_all_params(self):
+ """
+ list_custom_prompts()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/prompts')
+ mock_response = '{"prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Invoke method
+ response = _service.list_custom_prompts(
+ customization_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_custom_prompts_all_params_with_retries(self):
+ # Enable retries and run test_list_custom_prompts_all_params.
+ _service.enable_retries()
+ self.test_list_custom_prompts_all_params()
+
+ # Disable retries and run test_list_custom_prompts_all_params.
+ _service.disable_retries()
+ self.test_list_custom_prompts_all_params()
+
+ @responses.activate
+ def test_list_custom_prompts_value_error(self):
+ """
+ test_list_custom_prompts_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/prompts')
+ mock_response = '{"prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.list_custom_prompts(**req_copy)
+
+ def test_list_custom_prompts_value_error_with_retries(self):
+ # Enable retries and run test_list_custom_prompts_value_error.
+ _service.enable_retries()
+ self.test_list_custom_prompts_value_error()
+
+ # Disable retries and run test_list_custom_prompts_value_error.
+ _service.disable_retries()
+ self.test_list_custom_prompts_value_error()
+
+
+class TestAddCustomPrompt:
+ """
+ Test Class for add_custom_prompt
+ """
+
+ @responses.activate
+ def test_add_custom_prompt_all_params(self):
+ """
+ add_custom_prompt()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/prompts/testString')
+ mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a PromptMetadata model
+ prompt_metadata_model = {}
+ prompt_metadata_model['prompt_text'] = 'testString'
+ prompt_metadata_model['speaker_id'] = 'testString'
+
+ # Set up parameter values
+ customization_id = 'testString'
+ prompt_id = 'testString'
+ metadata = prompt_metadata_model
+ file = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.add_custom_prompt(
+ customization_id,
+ prompt_id,
+ metadata,
+ file,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+
+ def test_add_custom_prompt_all_params_with_retries(self):
+ # Enable retries and run test_add_custom_prompt_all_params.
+ _service.enable_retries()
+ self.test_add_custom_prompt_all_params()
+
+ # Disable retries and run test_add_custom_prompt_all_params.
+ _service.disable_retries()
+ self.test_add_custom_prompt_all_params()
+
+ @responses.activate
+ def test_add_custom_prompt_value_error(self):
+ """
+ test_add_custom_prompt_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/prompts/testString')
+ mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Construct a dict representation of a PromptMetadata model
+ prompt_metadata_model = {}
+ prompt_metadata_model['prompt_text'] = 'testString'
+ prompt_metadata_model['speaker_id'] = 'testString'
+
+ # Set up parameter values
+ customization_id = 'testString'
+ prompt_id = 'testString'
+ metadata = prompt_metadata_model
+ file = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "prompt_id": prompt_id,
+ "metadata": metadata,
+ "file": file,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.add_custom_prompt(**req_copy)
+
+ def test_add_custom_prompt_value_error_with_retries(self):
+ # Enable retries and run test_add_custom_prompt_value_error.
+ _service.enable_retries()
+ self.test_add_custom_prompt_value_error()
+
+ # Disable retries and run test_add_custom_prompt_value_error.
+ _service.disable_retries()
+ self.test_add_custom_prompt_value_error()
+
+
+class TestGetCustomPrompt:
+ """
+ Test Class for get_custom_prompt
+ """
+
+ @responses.activate
+ def test_get_custom_prompt_all_params(self):
+ """
+ get_custom_prompt()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/prompts/testString')
+ mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ prompt_id = 'testString'
+
+ # Invoke method
+ response = _service.get_custom_prompt(
+ customization_id,
+ prompt_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_custom_prompt_all_params_with_retries(self):
+ # Enable retries and run test_get_custom_prompt_all_params.
+ _service.enable_retries()
+ self.test_get_custom_prompt_all_params()
+
+ # Disable retries and run test_get_custom_prompt_all_params.
+ _service.disable_retries()
+ self.test_get_custom_prompt_all_params()
+
+ @responses.activate
+ def test_get_custom_prompt_value_error(self):
+ """
+ test_get_custom_prompt_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/prompts/testString')
+ mock_response = '{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error", "speaker_id": "speaker_id"}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ prompt_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "prompt_id": prompt_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_custom_prompt(**req_copy)
+
+ def test_get_custom_prompt_value_error_with_retries(self):
+ # Enable retries and run test_get_custom_prompt_value_error.
+ _service.enable_retries()
+ self.test_get_custom_prompt_value_error()
+
+ # Disable retries and run test_get_custom_prompt_value_error.
+ _service.disable_retries()
+ self.test_get_custom_prompt_value_error()
+
+
+class TestDeleteCustomPrompt:
+ """
+ Test Class for delete_custom_prompt
+ """
+
+ @responses.activate
+ def test_delete_custom_prompt_all_params(self):
+ """
+ delete_custom_prompt()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/prompts/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ prompt_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_custom_prompt(
+ customization_id,
+ prompt_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_custom_prompt_all_params_with_retries(self):
+ # Enable retries and run test_delete_custom_prompt_all_params.
+ _service.enable_retries()
+ self.test_delete_custom_prompt_all_params()
+
+ # Disable retries and run test_delete_custom_prompt_all_params.
+ _service.disable_retries()
+ self.test_delete_custom_prompt_all_params()
+
+ @responses.activate
+ def test_delete_custom_prompt_value_error(self):
+ """
+ test_delete_custom_prompt_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/customizations/testString/prompts/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ customization_id = 'testString'
+ prompt_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customization_id": customization_id,
+ "prompt_id": prompt_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_custom_prompt(**req_copy)
+
+ def test_delete_custom_prompt_value_error_with_retries(self):
+ # Enable retries and run test_delete_custom_prompt_value_error.
+ _service.enable_retries()
+ self.test_delete_custom_prompt_value_error()
+
+ # Disable retries and run test_delete_custom_prompt_value_error.
+ _service.disable_retries()
+ self.test_delete_custom_prompt_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: CustomPrompts
+##############################################################################
+
+##############################################################################
+# Start of Service: SpeakerModels
+##############################################################################
+# region
+
+
+class TestListSpeakerModels:
+ """
+ Test Class for list_speaker_models
+ """
+
+ @responses.activate
+ def test_list_speaker_models_all_params(self):
+ """
+ list_speaker_models()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/speakers')
+ mock_response = '{"speakers": [{"speaker_id": "speaker_id", "name": "name"}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Invoke method
+ response = _service.list_speaker_models()
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_list_speaker_models_all_params_with_retries(self):
+ # Enable retries and run test_list_speaker_models_all_params.
+ _service.enable_retries()
+ self.test_list_speaker_models_all_params()
+
+ # Disable retries and run test_list_speaker_models_all_params.
+ _service.disable_retries()
+ self.test_list_speaker_models_all_params()
+
+
+class TestCreateSpeakerModel:
+ """
+ Test Class for create_speaker_model
+ """
+
+ @responses.activate
+ def test_create_speaker_model_all_params(self):
+ """
+ create_speaker_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/speakers')
+ mock_response = '{"speaker_id": "speaker_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ speaker_name = 'testString'
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Invoke method
+ response = _service.create_speaker_model(
+ speaker_name,
+ audio,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 201
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'speaker_name={}'.format(speaker_name) in query_string
+ # Validate body params
+ assert responses.calls[0].request.body == audio
+
+ def test_create_speaker_model_all_params_with_retries(self):
+ # Enable retries and run test_create_speaker_model_all_params.
+ _service.enable_retries()
+ self.test_create_speaker_model_all_params()
+
+ # Disable retries and run test_create_speaker_model_all_params.
+ _service.disable_retries()
+ self.test_create_speaker_model_all_params()
+
+ @responses.activate
+ def test_create_speaker_model_value_error(self):
+ """
+ test_create_speaker_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/speakers')
+ mock_response = '{"speaker_id": "speaker_id"}'
+ responses.add(
+ responses.POST,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=201,
+ )
+
+ # Set up parameter values
+ speaker_name = 'testString'
+ audio = io.BytesIO(b'This is a mock file.').getvalue()
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "speaker_name": speaker_name,
+ "audio": audio,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.create_speaker_model(**req_copy)
+
+ def test_create_speaker_model_value_error_with_retries(self):
+ # Enable retries and run test_create_speaker_model_value_error.
+ _service.enable_retries()
+ self.test_create_speaker_model_value_error()
+
+ # Disable retries and run test_create_speaker_model_value_error.
+ _service.disable_retries()
+ self.test_create_speaker_model_value_error()
+
+
+class TestGetSpeakerModel:
+ """
+ Test Class for get_speaker_model
+ """
+
+ @responses.activate
+ def test_get_speaker_model_all_params(self):
+ """
+ get_speaker_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/speakers/testString')
+ mock_response = '{"customizations": [{"customization_id": "customization_id", "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error"}]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ speaker_id = 'testString'
+
+ # Invoke method
+ response = _service.get_speaker_model(
+ speaker_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+
+ def test_get_speaker_model_all_params_with_retries(self):
+ # Enable retries and run test_get_speaker_model_all_params.
+ _service.enable_retries()
+ self.test_get_speaker_model_all_params()
+
+ # Disable retries and run test_get_speaker_model_all_params.
+ _service.disable_retries()
+ self.test_get_speaker_model_all_params()
+
+ @responses.activate
+ def test_get_speaker_model_value_error(self):
+ """
+ test_get_speaker_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/speakers/testString')
+ mock_response = '{"customizations": [{"customization_id": "customization_id", "prompts": [{"prompt": "prompt", "prompt_id": "prompt_id", "status": "status", "error": "error"}]}]}'
+ responses.add(
+ responses.GET,
+ url,
+ body=mock_response,
+ content_type='application/json',
+ status=200,
+ )
+
+ # Set up parameter values
+ speaker_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "speaker_id": speaker_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.get_speaker_model(**req_copy)
+
+ def test_get_speaker_model_value_error_with_retries(self):
+ # Enable retries and run test_get_speaker_model_value_error.
+ _service.enable_retries()
+ self.test_get_speaker_model_value_error()
+
+ # Disable retries and run test_get_speaker_model_value_error.
+ _service.disable_retries()
+ self.test_get_speaker_model_value_error()
+
+
+class TestDeleteSpeakerModel:
+ """
+ Test Class for delete_speaker_model
+ """
+
+ @responses.activate
+ def test_delete_speaker_model_all_params(self):
+ """
+ delete_speaker_model()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/speakers/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ speaker_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_speaker_model(
+ speaker_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 204
+
+ def test_delete_speaker_model_all_params_with_retries(self):
+ # Enable retries and run test_delete_speaker_model_all_params.
+ _service.enable_retries()
+ self.test_delete_speaker_model_all_params()
+
+ # Disable retries and run test_delete_speaker_model_all_params.
+ _service.disable_retries()
+ self.test_delete_speaker_model_all_params()
+
+ @responses.activate
+ def test_delete_speaker_model_value_error(self):
+ """
+ test_delete_speaker_model_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/speakers/testString')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=204,
+ )
+
+ # Set up parameter values
+ speaker_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "speaker_id": speaker_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_speaker_model(**req_copy)
+
+ def test_delete_speaker_model_value_error_with_retries(self):
+ # Enable retries and run test_delete_speaker_model_value_error.
+ _service.enable_retries()
+ self.test_delete_speaker_model_value_error()
+
+ # Disable retries and run test_delete_speaker_model_value_error.
+ _service.disable_retries()
+ self.test_delete_speaker_model_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: SpeakerModels
+##############################################################################
+
+##############################################################################
+# Start of Service: UserData
+##############################################################################
+# region
+
+
+class TestDeleteUserData:
+ """
+ Test Class for delete_user_data
+ """
+
+ @responses.activate
+ def test_delete_user_data_all_params(self):
+ """
+ delete_user_data()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Invoke method
+ response = _service.delete_user_data(
+ customer_id,
+ headers={},
+ )
+
+ # Check for correct operation
+ assert len(responses.calls) == 1
+ assert response.status_code == 200
+ # Validate query params
+ query_string = responses.calls[0].request.url.split('?', 1)[1]
+ query_string = urllib.parse.unquote_plus(query_string)
+ assert 'customer_id={}'.format(customer_id) in query_string
+
+ def test_delete_user_data_all_params_with_retries(self):
+ # Enable retries and run test_delete_user_data_all_params.
+ _service.enable_retries()
+ self.test_delete_user_data_all_params()
+
+ # Disable retries and run test_delete_user_data_all_params.
+ _service.disable_retries()
+ self.test_delete_user_data_all_params()
+
+ @responses.activate
+ def test_delete_user_data_value_error(self):
+ """
+ test_delete_user_data_value_error()
+ """
+ # Set up mock
+ url = preprocess_url('/v1/user_data')
+ responses.add(
+ responses.DELETE,
+ url,
+ status=200,
+ )
+
+ # Set up parameter values
+ customer_id = 'testString'
+
+ # Pass in all but one required param and check for a ValueError
+ req_param_dict = {
+ "customer_id": customer_id,
+ }
+ for param in req_param_dict.keys():
+ req_copy = {key: val if key is not param else None for (key, val) in req_param_dict.items()}
+ with pytest.raises(ValueError):
+ _service.delete_user_data(**req_copy)
+
+ def test_delete_user_data_value_error_with_retries(self):
+ # Enable retries and run test_delete_user_data_value_error.
+ _service.enable_retries()
+ self.test_delete_user_data_value_error()
+
+ # Disable retries and run test_delete_user_data_value_error.
+ _service.disable_retries()
+ self.test_delete_user_data_value_error()
+
+
+# endregion
+##############################################################################
+# End of Service: UserData
+##############################################################################
+
+
+##############################################################################
+# Start of Model Tests
+##############################################################################
+# region
+
+
+class TestModel_CustomModel:
+ """
+ Test Class for CustomModel
+ """
+
+ def test_custom_model_serialization(self):
+ """
+ Test serialization/deserialization for CustomModel
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ word_model = {} # Word
+ word_model['word'] = 'testString'
+ word_model['translation'] = 'testString'
+ word_model['part_of_speech'] = 'Dosi'
+
+ prompt_model = {} # Prompt
+ prompt_model['prompt'] = 'testString'
+ prompt_model['prompt_id'] = 'testString'
+ prompt_model['status'] = 'testString'
+ prompt_model['error'] = 'testString'
+ prompt_model['speaker_id'] = 'testString'
+
+ # Construct a json representation of a CustomModel model
+ custom_model_model_json = {}
+ custom_model_model_json['customization_id'] = 'testString'
+ custom_model_model_json['name'] = 'testString'
+ custom_model_model_json['language'] = 'testString'
+ custom_model_model_json['owner'] = 'testString'
+ custom_model_model_json['created'] = 'testString'
+ custom_model_model_json['last_modified'] = 'testString'
+ custom_model_model_json['description'] = 'testString'
+ custom_model_model_json['words'] = [word_model]
+ custom_model_model_json['prompts'] = [prompt_model]
+
+ # Construct a model instance of CustomModel by calling from_dict on the json representation
+ custom_model_model = CustomModel.from_dict(custom_model_model_json)
+ assert custom_model_model != False
+
+ # Construct a model instance of CustomModel by calling from_dict on the json representation
+ custom_model_model_dict = CustomModel.from_dict(custom_model_model_json).__dict__
+ custom_model_model2 = CustomModel(**custom_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert custom_model_model == custom_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ custom_model_model_json2 = custom_model_model.to_dict()
+ assert custom_model_model_json2 == custom_model_model_json
+
+
+class TestModel_CustomModels:
+ """
+ Test Class for CustomModels
+ """
+
+ def test_custom_models_serialization(self):
+ """
+ Test serialization/deserialization for CustomModels
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ word_model = {} # Word
+ word_model['word'] = 'testString'
+ word_model['translation'] = 'testString'
+ word_model['part_of_speech'] = 'Dosi'
+
+ prompt_model = {} # Prompt
+ prompt_model['prompt'] = 'testString'
+ prompt_model['prompt_id'] = 'testString'
+ prompt_model['status'] = 'testString'
+ prompt_model['error'] = 'testString'
+ prompt_model['speaker_id'] = 'testString'
+
+ custom_model_model = {} # CustomModel
+ custom_model_model['customization_id'] = 'testString'
+ custom_model_model['name'] = 'testString'
+ custom_model_model['language'] = 'testString'
+ custom_model_model['owner'] = 'testString'
+ custom_model_model['created'] = 'testString'
+ custom_model_model['last_modified'] = 'testString'
+ custom_model_model['description'] = 'testString'
+ custom_model_model['words'] = [word_model]
+ custom_model_model['prompts'] = [prompt_model]
+
+ # Construct a json representation of a CustomModels model
+ custom_models_model_json = {}
+ custom_models_model_json['customizations'] = [custom_model_model]
+
+ # Construct a model instance of CustomModels by calling from_dict on the json representation
+ custom_models_model = CustomModels.from_dict(custom_models_model_json)
+ assert custom_models_model != False
+
+ # Construct a model instance of CustomModels by calling from_dict on the json representation
+ custom_models_model_dict = CustomModels.from_dict(custom_models_model_json).__dict__
+ custom_models_model2 = CustomModels(**custom_models_model_dict)
+
+ # Verify the model instances are equivalent
+ assert custom_models_model == custom_models_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ custom_models_model_json2 = custom_models_model.to_dict()
+ assert custom_models_model_json2 == custom_models_model_json
+
+
+class TestModel_Prompt:
+ """
+ Test Class for Prompt
+ """
+
+ def test_prompt_serialization(self):
+ """
+ Test serialization/deserialization for Prompt
+ """
+
+ # Construct a json representation of a Prompt model
+ prompt_model_json = {}
+ prompt_model_json['prompt'] = 'testString'
+ prompt_model_json['prompt_id'] = 'testString'
+ prompt_model_json['status'] = 'testString'
+ prompt_model_json['error'] = 'testString'
+ prompt_model_json['speaker_id'] = 'testString'
+
+ # Construct a model instance of Prompt by calling from_dict on the json representation
+ prompt_model = Prompt.from_dict(prompt_model_json)
+ assert prompt_model != False
+
+ # Construct a model instance of Prompt by calling from_dict on the json representation
+ prompt_model_dict = Prompt.from_dict(prompt_model_json).__dict__
+ prompt_model2 = Prompt(**prompt_model_dict)
+
+ # Verify the model instances are equivalent
+ assert prompt_model == prompt_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ prompt_model_json2 = prompt_model.to_dict()
+ assert prompt_model_json2 == prompt_model_json
+
+
+class TestModel_PromptMetadata:
+ """
+ Test Class for PromptMetadata
+ """
+
+ def test_prompt_metadata_serialization(self):
+ """
+ Test serialization/deserialization for PromptMetadata
+ """
+
+ # Construct a json representation of a PromptMetadata model
+ prompt_metadata_model_json = {}
+ prompt_metadata_model_json['prompt_text'] = 'testString'
+ prompt_metadata_model_json['speaker_id'] = 'testString'
+
+ # Construct a model instance of PromptMetadata by calling from_dict on the json representation
+ prompt_metadata_model = PromptMetadata.from_dict(prompt_metadata_model_json)
+ assert prompt_metadata_model != False
+
+ # Construct a model instance of PromptMetadata by calling from_dict on the json representation
+ prompt_metadata_model_dict = PromptMetadata.from_dict(prompt_metadata_model_json).__dict__
+ prompt_metadata_model2 = PromptMetadata(**prompt_metadata_model_dict)
+
+ # Verify the model instances are equivalent
+ assert prompt_metadata_model == prompt_metadata_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ prompt_metadata_model_json2 = prompt_metadata_model.to_dict()
+ assert prompt_metadata_model_json2 == prompt_metadata_model_json
+
+
+class TestModel_Prompts:
+ """
+ Test Class for Prompts
+ """
+
+ def test_prompts_serialization(self):
+ """
+ Test serialization/deserialization for Prompts
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ prompt_model = {} # Prompt
+ prompt_model['prompt'] = 'testString'
+ prompt_model['prompt_id'] = 'testString'
+ prompt_model['status'] = 'testString'
+ prompt_model['error'] = 'testString'
+ prompt_model['speaker_id'] = 'testString'
+
+ # Construct a json representation of a Prompts model
+ prompts_model_json = {}
+ prompts_model_json['prompts'] = [prompt_model]
+
+ # Construct a model instance of Prompts by calling from_dict on the json representation
+ prompts_model = Prompts.from_dict(prompts_model_json)
+ assert prompts_model != False
+
+ # Construct a model instance of Prompts by calling from_dict on the json representation
+ prompts_model_dict = Prompts.from_dict(prompts_model_json).__dict__
+ prompts_model2 = Prompts(**prompts_model_dict)
+
+ # Verify the model instances are equivalent
+ assert prompts_model == prompts_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ prompts_model_json2 = prompts_model.to_dict()
+ assert prompts_model_json2 == prompts_model_json
+
+
+class TestModel_Pronunciation:
+ """
+ Test Class for Pronunciation
+ """
+
+ def test_pronunciation_serialization(self):
+ """
+ Test serialization/deserialization for Pronunciation
+ """
+
+ # Construct a json representation of a Pronunciation model
+ pronunciation_model_json = {}
+ pronunciation_model_json['pronunciation'] = 'testString'
+
+ # Construct a model instance of Pronunciation by calling from_dict on the json representation
+ pronunciation_model = Pronunciation.from_dict(pronunciation_model_json)
+ assert pronunciation_model != False
+
+ # Construct a model instance of Pronunciation by calling from_dict on the json representation
+ pronunciation_model_dict = Pronunciation.from_dict(pronunciation_model_json).__dict__
+ pronunciation_model2 = Pronunciation(**pronunciation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert pronunciation_model == pronunciation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ pronunciation_model_json2 = pronunciation_model.to_dict()
+ assert pronunciation_model_json2 == pronunciation_model_json
+
+
+class TestModel_Speaker:
+ """
+ Test Class for Speaker
+ """
+
+ def test_speaker_serialization(self):
+ """
+ Test serialization/deserialization for Speaker
+ """
+
+ # Construct a json representation of a Speaker model
+ speaker_model_json = {}
+ speaker_model_json['speaker_id'] = 'testString'
+ speaker_model_json['name'] = 'testString'
+
+ # Construct a model instance of Speaker by calling from_dict on the json representation
+ speaker_model = Speaker.from_dict(speaker_model_json)
+ assert speaker_model != False
+
+ # Construct a model instance of Speaker by calling from_dict on the json representation
+ speaker_model_dict = Speaker.from_dict(speaker_model_json).__dict__
+ speaker_model2 = Speaker(**speaker_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speaker_model == speaker_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speaker_model_json2 = speaker_model.to_dict()
+ assert speaker_model_json2 == speaker_model_json
+
+
+class TestModel_SpeakerCustomModel:
+ """
+ Test Class for SpeakerCustomModel
+ """
+
+ def test_speaker_custom_model_serialization(self):
+ """
+ Test serialization/deserialization for SpeakerCustomModel
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ speaker_prompt_model = {} # SpeakerPrompt
+ speaker_prompt_model['prompt'] = 'testString'
+ speaker_prompt_model['prompt_id'] = 'testString'
+ speaker_prompt_model['status'] = 'testString'
+ speaker_prompt_model['error'] = 'testString'
+
+ # Construct a json representation of a SpeakerCustomModel model
+ speaker_custom_model_model_json = {}
+ speaker_custom_model_model_json['customization_id'] = 'testString'
+ speaker_custom_model_model_json['prompts'] = [speaker_prompt_model]
+
+ # Construct a model instance of SpeakerCustomModel by calling from_dict on the json representation
+ speaker_custom_model_model = SpeakerCustomModel.from_dict(speaker_custom_model_model_json)
+ assert speaker_custom_model_model != False
+
+ # Construct a model instance of SpeakerCustomModel by calling from_dict on the json representation
+ speaker_custom_model_model_dict = SpeakerCustomModel.from_dict(speaker_custom_model_model_json).__dict__
+ speaker_custom_model_model2 = SpeakerCustomModel(**speaker_custom_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speaker_custom_model_model == speaker_custom_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speaker_custom_model_model_json2 = speaker_custom_model_model.to_dict()
+ assert speaker_custom_model_model_json2 == speaker_custom_model_model_json
+
+
+class TestModel_SpeakerCustomModels:
+ """
+ Test Class for SpeakerCustomModels
+ """
+
+ def test_speaker_custom_models_serialization(self):
+ """
+ Test serialization/deserialization for SpeakerCustomModels
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ speaker_prompt_model = {} # SpeakerPrompt
+ speaker_prompt_model['prompt'] = 'testString'
+ speaker_prompt_model['prompt_id'] = 'testString'
+ speaker_prompt_model['status'] = 'testString'
+ speaker_prompt_model['error'] = 'testString'
+
+ speaker_custom_model_model = {} # SpeakerCustomModel
+ speaker_custom_model_model['customization_id'] = 'testString'
+ speaker_custom_model_model['prompts'] = [speaker_prompt_model]
+
+ # Construct a json representation of a SpeakerCustomModels model
+ speaker_custom_models_model_json = {}
+ speaker_custom_models_model_json['customizations'] = [speaker_custom_model_model]
+
+ # Construct a model instance of SpeakerCustomModels by calling from_dict on the json representation
+ speaker_custom_models_model = SpeakerCustomModels.from_dict(speaker_custom_models_model_json)
+ assert speaker_custom_models_model != False
+
+ # Construct a model instance of SpeakerCustomModels by calling from_dict on the json representation
+ speaker_custom_models_model_dict = SpeakerCustomModels.from_dict(speaker_custom_models_model_json).__dict__
+ speaker_custom_models_model2 = SpeakerCustomModels(**speaker_custom_models_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speaker_custom_models_model == speaker_custom_models_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speaker_custom_models_model_json2 = speaker_custom_models_model.to_dict()
+ assert speaker_custom_models_model_json2 == speaker_custom_models_model_json
+
+
+class TestModel_SpeakerModel:
+ """
+ Test Class for SpeakerModel
+ """
+
+ def test_speaker_model_serialization(self):
+ """
+ Test serialization/deserialization for SpeakerModel
+ """
+
+ # Construct a json representation of a SpeakerModel model
+ speaker_model_model_json = {}
+ speaker_model_model_json['speaker_id'] = 'testString'
+
+ # Construct a model instance of SpeakerModel by calling from_dict on the json representation
+ speaker_model_model = SpeakerModel.from_dict(speaker_model_model_json)
+ assert speaker_model_model != False
+
+ # Construct a model instance of SpeakerModel by calling from_dict on the json representation
+ speaker_model_model_dict = SpeakerModel.from_dict(speaker_model_model_json).__dict__
+ speaker_model_model2 = SpeakerModel(**speaker_model_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speaker_model_model == speaker_model_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speaker_model_model_json2 = speaker_model_model.to_dict()
+ assert speaker_model_model_json2 == speaker_model_model_json
+
+
+class TestModel_SpeakerPrompt:
+ """
+ Test Class for SpeakerPrompt
+ """
+
+ def test_speaker_prompt_serialization(self):
+ """
+ Test serialization/deserialization for SpeakerPrompt
+ """
+
+ # Construct a json representation of a SpeakerPrompt model
+ speaker_prompt_model_json = {}
+ speaker_prompt_model_json['prompt'] = 'testString'
+ speaker_prompt_model_json['prompt_id'] = 'testString'
+ speaker_prompt_model_json['status'] = 'testString'
+ speaker_prompt_model_json['error'] = 'testString'
+
+ # Construct a model instance of SpeakerPrompt by calling from_dict on the json representation
+ speaker_prompt_model = SpeakerPrompt.from_dict(speaker_prompt_model_json)
+ assert speaker_prompt_model != False
+
+ # Construct a model instance of SpeakerPrompt by calling from_dict on the json representation
+ speaker_prompt_model_dict = SpeakerPrompt.from_dict(speaker_prompt_model_json).__dict__
+ speaker_prompt_model2 = SpeakerPrompt(**speaker_prompt_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speaker_prompt_model == speaker_prompt_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speaker_prompt_model_json2 = speaker_prompt_model.to_dict()
+ assert speaker_prompt_model_json2 == speaker_prompt_model_json
+
+
+class TestModel_Speakers:
+ """
+ Test Class for Speakers
+ """
+
+ def test_speakers_serialization(self):
+ """
+ Test serialization/deserialization for Speakers
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ speaker_model = {} # Speaker
+ speaker_model['speaker_id'] = 'testString'
+ speaker_model['name'] = 'testString'
+
+ # Construct a json representation of a Speakers model
+ speakers_model_json = {}
+ speakers_model_json['speakers'] = [speaker_model]
+
+ # Construct a model instance of Speakers by calling from_dict on the json representation
+ speakers_model = Speakers.from_dict(speakers_model_json)
+ assert speakers_model != False
+
+ # Construct a model instance of Speakers by calling from_dict on the json representation
+ speakers_model_dict = Speakers.from_dict(speakers_model_json).__dict__
+ speakers_model2 = Speakers(**speakers_model_dict)
+
+ # Verify the model instances are equivalent
+ assert speakers_model == speakers_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ speakers_model_json2 = speakers_model.to_dict()
+ assert speakers_model_json2 == speakers_model_json
+
+
+class TestModel_SupportedFeatures:
+ """
+ Test Class for SupportedFeatures
+ """
+
+ def test_supported_features_serialization(self):
+ """
+ Test serialization/deserialization for SupportedFeatures
+ """
+
+ # Construct a json representation of a SupportedFeatures model
+ supported_features_model_json = {}
+ supported_features_model_json['custom_pronunciation'] = True
+ supported_features_model_json['voice_transformation'] = True
+
+ # Construct a model instance of SupportedFeatures by calling from_dict on the json representation
+ supported_features_model = SupportedFeatures.from_dict(supported_features_model_json)
+ assert supported_features_model != False
+
+ # Construct a model instance of SupportedFeatures by calling from_dict on the json representation
+ supported_features_model_dict = SupportedFeatures.from_dict(supported_features_model_json).__dict__
+ supported_features_model2 = SupportedFeatures(**supported_features_model_dict)
+
+ # Verify the model instances are equivalent
+ assert supported_features_model == supported_features_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ supported_features_model_json2 = supported_features_model.to_dict()
+ assert supported_features_model_json2 == supported_features_model_json
+
+
+class TestModel_Translation:
+ """
+ Test Class for Translation
+ """
+
+ def test_translation_serialization(self):
+ """
+ Test serialization/deserialization for Translation
+ """
+
+ # Construct a json representation of a Translation model
+ translation_model_json = {}
+ translation_model_json['translation'] = 'testString'
+ translation_model_json['part_of_speech'] = 'Dosi'
+
+ # Construct a model instance of Translation by calling from_dict on the json representation
+ translation_model = Translation.from_dict(translation_model_json)
+ assert translation_model != False
+
+ # Construct a model instance of Translation by calling from_dict on the json representation
+ translation_model_dict = Translation.from_dict(translation_model_json).__dict__
+ translation_model2 = Translation(**translation_model_dict)
+
+ # Verify the model instances are equivalent
+ assert translation_model == translation_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ translation_model_json2 = translation_model.to_dict()
+ assert translation_model_json2 == translation_model_json
+
+
+class TestModel_Voice:
+ """
+ Test Class for Voice
+ """
+
+ def test_voice_serialization(self):
+ """
+ Test serialization/deserialization for Voice
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ supported_features_model = {} # SupportedFeatures
+ supported_features_model['custom_pronunciation'] = True
+ supported_features_model['voice_transformation'] = True
+
+ word_model = {} # Word
+ word_model['word'] = 'testString'
+ word_model['translation'] = 'testString'
+ word_model['part_of_speech'] = 'Dosi'
+
+ prompt_model = {} # Prompt
+ prompt_model['prompt'] = 'testString'
+ prompt_model['prompt_id'] = 'testString'
+ prompt_model['status'] = 'testString'
+ prompt_model['error'] = 'testString'
+ prompt_model['speaker_id'] = 'testString'
+
+ custom_model_model = {} # CustomModel
+ custom_model_model['customization_id'] = 'testString'
+ custom_model_model['name'] = 'testString'
+ custom_model_model['language'] = 'testString'
+ custom_model_model['owner'] = 'testString'
+ custom_model_model['created'] = 'testString'
+ custom_model_model['last_modified'] = 'testString'
+ custom_model_model['description'] = 'testString'
+ custom_model_model['words'] = [word_model]
+ custom_model_model['prompts'] = [prompt_model]
+
+ # Construct a json representation of a Voice model
+ voice_model_json = {}
+ voice_model_json['url'] = 'testString'
+ voice_model_json['gender'] = 'testString'
+ voice_model_json['name'] = 'testString'
+ voice_model_json['language'] = 'testString'
+ voice_model_json['description'] = 'testString'
+ voice_model_json['customizable'] = True
+ voice_model_json['supported_features'] = supported_features_model
+ voice_model_json['customization'] = custom_model_model
+
+ # Construct a model instance of Voice by calling from_dict on the json representation
+ voice_model = Voice.from_dict(voice_model_json)
+ assert voice_model != False
+
+ # Construct a model instance of Voice by calling from_dict on the json representation
+ voice_model_dict = Voice.from_dict(voice_model_json).__dict__
+ voice_model2 = Voice(**voice_model_dict)
+
+ # Verify the model instances are equivalent
+ assert voice_model == voice_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ voice_model_json2 = voice_model.to_dict()
+ assert voice_model_json2 == voice_model_json
+
+
+class TestModel_Voices:
+ """
+ Test Class for Voices
+ """
+
+ def test_voices_serialization(self):
+ """
+ Test serialization/deserialization for Voices
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ supported_features_model = {} # SupportedFeatures
+ supported_features_model['custom_pronunciation'] = True
+ supported_features_model['voice_transformation'] = True
+
+ word_model = {} # Word
+ word_model['word'] = 'testString'
+ word_model['translation'] = 'testString'
+ word_model['part_of_speech'] = 'Dosi'
+
+ prompt_model = {} # Prompt
+ prompt_model['prompt'] = 'testString'
+ prompt_model['prompt_id'] = 'testString'
+ prompt_model['status'] = 'testString'
+ prompt_model['error'] = 'testString'
+ prompt_model['speaker_id'] = 'testString'
+
+ custom_model_model = {} # CustomModel
+ custom_model_model['customization_id'] = 'testString'
+ custom_model_model['name'] = 'testString'
+ custom_model_model['language'] = 'testString'
+ custom_model_model['owner'] = 'testString'
+ custom_model_model['created'] = 'testString'
+ custom_model_model['last_modified'] = 'testString'
+ custom_model_model['description'] = 'testString'
+ custom_model_model['words'] = [word_model]
+ custom_model_model['prompts'] = [prompt_model]
+
+ voice_model = {} # Voice
+ voice_model['url'] = 'testString'
+ voice_model['gender'] = 'testString'
+ voice_model['name'] = 'testString'
+ voice_model['language'] = 'testString'
+ voice_model['description'] = 'testString'
+ voice_model['customizable'] = True
+ voice_model['supported_features'] = supported_features_model
+ voice_model['customization'] = custom_model_model
+
+ # Construct a json representation of a Voices model
+ voices_model_json = {}
+ voices_model_json['voices'] = [voice_model]
+
+ # Construct a model instance of Voices by calling from_dict on the json representation
+ voices_model = Voices.from_dict(voices_model_json)
+ assert voices_model != False
+
+ # Construct a model instance of Voices by calling from_dict on the json representation
+ voices_model_dict = Voices.from_dict(voices_model_json).__dict__
+ voices_model2 = Voices(**voices_model_dict)
+
+ # Verify the model instances are equivalent
+ assert voices_model == voices_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ voices_model_json2 = voices_model.to_dict()
+ assert voices_model_json2 == voices_model_json
+
+
+class TestModel_Word:
+ """
+ Test Class for Word
+ """
+
+ def test_word_serialization(self):
+ """
+ Test serialization/deserialization for Word
+ """
+
+ # Construct a json representation of a Word model
+ word_model_json = {}
+ word_model_json['word'] = 'testString'
+ word_model_json['translation'] = 'testString'
+ word_model_json['part_of_speech'] = 'Dosi'
+
+ # Construct a model instance of Word by calling from_dict on the json representation
+ word_model = Word.from_dict(word_model_json)
+ assert word_model != False
+
+ # Construct a model instance of Word by calling from_dict on the json representation
+ word_model_dict = Word.from_dict(word_model_json).__dict__
+ word_model2 = Word(**word_model_dict)
+
+ # Verify the model instances are equivalent
+ assert word_model == word_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ word_model_json2 = word_model.to_dict()
+ assert word_model_json2 == word_model_json
+
+
+class TestModel_Words:
+ """
+ Test Class for Words
+ """
+
+ def test_words_serialization(self):
+ """
+ Test serialization/deserialization for Words
+ """
+
+ # Construct dict forms of any model objects needed in order to build this model.
+
+ word_model = {} # Word
+ word_model['word'] = 'testString'
+ word_model['translation'] = 'testString'
+ word_model['part_of_speech'] = 'Dosi'
+
+ # Construct a json representation of a Words model
+ words_model_json = {}
+ words_model_json['words'] = [word_model]
+
+ # Construct a model instance of Words by calling from_dict on the json representation
+ words_model = Words.from_dict(words_model_json)
+ assert words_model != False
+
+ # Construct a model instance of Words by calling from_dict on the json representation
+ words_model_dict = Words.from_dict(words_model_json).__dict__
+ words_model2 = Words(**words_model_dict)
+
+ # Verify the model instances are equivalent
+ assert words_model == words_model2
+
+ # Convert model instance back to dict and verify no loss of data
+ words_model_json2 = words_model.to_dict()
+ assert words_model_json2 == words_model_json
-@responses.activate
-def test_success():
- voices_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/voices'
- voices_response = {
- "voices": [{
- "url":
- "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsLisa",
- "gender":
- "female",
- "name":
- "VoiceEnUsLisa",
- "language":
- "en-US"
- }, {
- "url":
- "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEsEsEnrique",
- "gender":
- "male",
- "name":
- "VoiceEsEsEnrique",
- "language":
- "es-ES"
- }, {
- "url":
- "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsMichael",
- "gender":
- "male",
- "name":
- "VoiceEnUsMichael",
- "language":
- "en-US"
- }, {
- "url":
- "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsAllison",
- "gender":
- "female",
- "name":
- "VoiceEnUsAllison",
- "language":
- "en-US"
- }]
- }
- voice_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/voices/en-us_AllisonVoice'
- voice_response = {
- "url":
- "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/en-US_AllisonVoice",
- "name":
- "en-US_AllisonVoice",
- "language":
- "en-US",
- "customizable":
- True,
- "gender":
- "female",
- "description":
- "Allison: American English female voice.",
- "supported_features": {
- "custom_pronunciation": True,
- "voice_transformation": True
- }
- }
- synthesize_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/synthesize'
- synthesize_response_body = ''
-
- responses.add(
- responses.GET,
- voices_url,
- body=json.dumps(voices_response),
- status=200,
- content_type='application/json')
- responses.add(
- responses.GET,
- voice_url,
- body=json.dumps(voice_response),
- status=200,
- content_type='application/json')
- responses.add(
- responses.POST,
- synthesize_url,
- body=synthesize_response_body,
- status=200,
- content_type='application/json',
- match_querystring=True)
-
- text_to_speech = ibm_watson.TextToSpeechV1(
- username="username", password="password")
-
- text_to_speech.list_voices()
- assert responses.calls[0].request.url == voices_url
- assert responses.calls[0].response.text == json.dumps(voices_response)
-
- text_to_speech.get_voice('en-us_AllisonVoice')
- assert responses.calls[1].request.url == voice_url
- assert responses.calls[1].response.text == json.dumps(voice_response)
-
- text_to_speech.synthesize('hello')
- assert responses.calls[2].request.url == synthesize_url
- assert responses.calls[2].response.text == synthesize_response_body
-
- assert len(responses.calls) == 3
-
-
-@responses.activate
-def test_get_pronunciation():
-
- responses.add(
- responses.GET,
- 'https://stream.watsonplatform.net/text-to-speech/api/v1/pronunciation',
- body='{"pronunciation": "pronunciation info" }',
- status=200,
- content_type='application_json')
-
- text_to_speech = ibm_watson.TextToSpeechV1(
- username="username", password="password")
-
- text_to_speech.get_pronunciation(text="this is some text")
- text_to_speech.get_pronunciation(text="yo", voice="VoiceEnUsLisa")
- text_to_speech.get_pronunciation(
- text="yo", voice="VoiceEnUsLisa", format='ipa')
-
- assert len(responses.calls) == 3
-
-
-@responses.activate
-def test_custom_voice_models():
- responses.add(
- responses.GET,
- 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations',
- body='{"customizations": "yep" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.POST,
- 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations',
- body='{"customizations": "yep" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.GET,
- 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid',
- body='{"customization": "yep, just one" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.POST,
- 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid',
- body='{"customizations": "yep" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.DELETE,
- 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid',
- body='{"customizations": "yep" }',
- status=200,
- content_type='application_json')
-
- text_to_speech = ibm_watson.TextToSpeechV1(
- username="username", password="password")
- text_to_speech.list_voice_models()
- text_to_speech.list_voice_models(language="en-US")
- assert len(responses.calls) == 2
-
- text_to_speech.create_voice_model(name="name", description="description")
- text_to_speech.get_voice_model(customization_id='custid')
- text_to_speech.update_voice_model(
- customization_id="custid", name="name", description="description")
- text_to_speech.delete_voice_model(customization_id="custid")
-
- assert len(responses.calls) == 6
-
-
-@responses.activate
-def test_custom_words():
- base_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations'
- responses.add(
- responses.GET,
- "{0}/{1}/words".format(base_url, "custid"),
- body='{"customizations": "yep" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.POST,
- "{0}/{1}/words".format(base_url, "custid"),
- body='{"customizations": "yep" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.GET,
- "{0}/{1}/words/{2}".format(base_url, "custid", "word"),
- body='{"customization": "yep, just one" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.POST,
- "{0}/{1}/words/{2}".format(base_url, "custid", "word"),
- body='{"customizations": "yep" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.PUT,
- "{0}/{1}/words/{2}".format(base_url, "custid", "word"),
- body='{"customizations": "yep" }',
- status=200,
- content_type='application_json')
- responses.add(
- responses.DELETE,
- "{0}/{1}/words/{2}".format(base_url, "custid", "word"),
- body='{"customizations": "yep" }',
- status=200,
- content_type='application_json')
-
- text_to_speech = ibm_watson.TextToSpeechV1(
- username="username", password="password")
-
- text_to_speech.list_words(customization_id="custid")
- text_to_speech.add_words(
- customization_id="custid", words=[{"word": "one", "translation": "one"}, {"word": "two", "translation": "two"}])
- text_to_speech.get_word(customization_id="custid", word="word")
- text_to_speech.add_word(
- customization_id='custid', word="word", translation="I'm translated")
- text_to_speech.delete_word(customization_id="custid", word="word")
-
- assert len(responses.calls) == 5
-
-@responses.activate
-
-def test_delete_user_data():
- url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/user_data'
- responses.add(
- responses.DELETE,
- url,
- body='{"description": "success" }',
- status=204,
- content_type='application_json')
-
- text_to_speech = ibm_watson.TextToSpeechV1(username="username", password="password")
- response = text_to_speech.delete_user_data('id').get_result()
- assert response is None
- assert len(responses.calls) == 1
+# endregion
+##############################################################################
+# End of Model Tests
+##############################################################################
diff --git a/test/unit/test_tone_analyzer_v3.py b/test/unit/test_tone_analyzer_v3.py
deleted file mode 100755
index d0ae45ff2..000000000
--- a/test/unit/test_tone_analyzer_v3.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# coding: utf-8
-import responses
-import ibm_watson
-from ibm_watson import ApiException
-import os
-import json
-
-
-@responses.activate
-# Simple test, just calling tone() with some text
-def test_tone():
- tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone'
- tone_args = '?version=2016-05-19'
- tone_response = None
- with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect1.json')) as response_json:
- tone_response = response_json.read()
-
- responses.add(responses.POST, tone_url,
- body=tone_response, status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text:
- tone_analyzer = ibm_watson.ToneAnalyzerV3("2016-05-19",
- username="username",
- password="password")
- tone_analyzer.tone(tone_text.read(), content_type='application/json')
-
- assert responses.calls[0].request.url == tone_url + tone_args
- assert responses.calls[0].response.text == tone_response
-
- assert len(responses.calls) == 1
-
-
-@responses.activate
-# Invoking tone() with some modifiers given in 'params': sentences skipped
-def test_tone_with_args():
- tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone'
- tone_args = {'version': '2016-05-19', 'sentences': 'false'}
- tone_response = None
- with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect1.json')) as response_json:
- tone_response = response_json.read()
-
- responses.add(responses.POST, tone_url,
- body=tone_response, status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text:
- tone_analyzer = ibm_watson.ToneAnalyzerV3("2016-05-19", username="username", password="password")
- tone_analyzer.tone(tone_text.read(), content_type='application/json', sentences=False)
-
- assert responses.calls[0].request.url.split('?')[0] == tone_url
- # Compare args. Order is not deterministic!
- actualArgs = {}
- for arg in responses.calls[0].request.url.split('?')[1].split('&'):
- actualArgs[arg.split('=')[0]] = arg.split('=')[1]
- assert actualArgs == tone_args
- assert responses.calls[0].response.text == tone_response
- assert len(responses.calls) == 1
-
-
-@responses.activate
-# Invoking tone() with some modifiers specified as positional parameters: sentences is false
-def test_tone_with_positional_args():
- tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone'
- tone_args = {'version': '2016-05-19', 'sentences': 'false'}
- tone_response = None
- with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect1.json')) as response_json:
- tone_response = response_json.read()
-
- responses.add(responses.POST, tone_url,
- body=tone_response, status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text:
- tone_analyzer = ibm_watson.ToneAnalyzerV3("2016-05-19", username="username", password="password")
- tone_analyzer.tone(tone_text.read(), content_type='application/json', sentences=False)
-
- assert responses.calls[0].request.url.split('?')[0] == tone_url
- # Compare args. Order is not deterministic!
- actualArgs = {}
- for arg in responses.calls[0].request.url.split('?')[1].split('&'):
- actualArgs[arg.split('=')[0]] = arg.split('=')[1]
- assert actualArgs == tone_args
- assert responses.calls[0].response.text == tone_response
- assert len(responses.calls) == 1
-
-
-@responses.activate
-# Invoking tone_chat()
-def test_tone_chat():
- tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone_chat'
- tone_args = '?version=2016-05-19'
- tone_response = None
- with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect2.json')) as response_json:
- tone_response = response_json.read()
-
- responses.add(responses.POST, tone_url,
- body=tone_response, status=200,
- content_type='application/json')
-
- tone_analyzer = ibm_watson.ToneAnalyzerV3("2016-05-19",
- username="username",
- password="password")
- utterances = [{'text': 'I am very happy', 'user': 'glenn'}]
- tone_analyzer.tone_chat(utterances)
-
- assert responses.calls[0].request.url == tone_url + tone_args
- assert responses.calls[0].response.text == tone_response
- assert len(responses.calls) == 1
-
-
-#########################
-# error response
-#########################
-
-
-@responses.activate
-def test_error():
- tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone'
- error_code = 400
- error_message = "Invalid JSON input at line 2, column 12"
- tone_response = {
- "code": error_code,
- "sub_code": "C00012",
- "error": error_message
- }
- responses.add(responses.POST,
- tone_url,
- body=json.dumps(tone_response),
- status=error_code,
- content_type='application/json')
-
- tone_analyzer = ibm_watson.ToneAnalyzerV3('2016-05-19',
- username='username',
- password='password')
- text = 'Team, I know that times are tough!'
- try:
- tone_analyzer.tone(text, 'application/json')
- except ApiException as ex:
- assert len(responses.calls) == 1
- assert isinstance(ex, ApiException)
- assert ex.code == error_code
- assert ex.message == error_message
diff --git a/test/unit/test_visual_recognition_v3.py b/test/unit/test_visual_recognition_v3.py
deleted file mode 100644
index d81de2ba1..000000000
--- a/test/unit/test_visual_recognition_v3.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# coding: utf-8
-import responses
-import ibm_watson
-import json
-import os
-
-from unittest import TestCase
-
-base_url = "https://gateway.watsonplatform.net/visual-recognition/api/"
-
-class TestVisualRecognitionV3(TestCase):
- @classmethod
- def setUp(cls):
- iam_url = "https://iam.cloud.ibm.com/identity/token"
- iam_token_response = """{
- "access_token": "oAeisG8yqPY7sFR_x66Z15",
- "token_type": "Bearer",
- "expires_in": 3600,
- "expiration": 1524167011,
- "refresh_token": "jy4gl91BQ"
- }"""
- responses.add(responses.POST, url=iam_url, body=iam_token_response, status=200)
-
- @responses.activate
- def test_get_classifier(self):
- vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey')
-
- gc_url = "{0}{1}".format(base_url, 'v3/classifiers/bogusnumber')
-
- response = {
- "classifier_id": "bogusnumber",
- "name": "Dog Breeds",
- "owner": "58b61352-678c-44d1-9f40-40edf4ea8d19",
- "status": "failed",
- "created": "2017-08-25T06:39:01.968Z",
- "classes": [{"class": "goldenretriever"}]
- }
-
- responses.add(responses.GET,
- gc_url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- vr_service.get_classifier(classifier_id='bogusnumber')
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_delete_classifier(self):
- vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey')
-
- gc_url = "{0}{1}".format(base_url, 'v3/classifiers/bogusnumber')
-
- responses.add(responses.DELETE,
- gc_url,
- body=json.dumps({'response': 200}),
- status=200,
- content_type='application/json')
- vr_service.delete_classifier(classifier_id='bogusnumber')
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_list_classifiers(self):
- vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey')
-
- gc_url = "{0}{1}".format(base_url, 'v3/classifiers')
-
- response = {"classifiers": [
- {
- "classifier_id": "InsuranceClaims_1362331461",
- "name": "Insurance Claims",
- "status": "ready"
- },
- {
- "classifier_id": "DogBreeds_1539707331",
- "name": "Dog Breeds",
- "status": "ready"
- }
- ]}
-
- responses.add(responses.GET,
- gc_url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- vr_service.list_classifiers()
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_create_classifier(self):
- vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey')
-
- gc_url = "{0}{1}".format(base_url, 'v3/classifiers')
-
- response = {
- "classifier_id": "DogBreeds_2014254824",
- "name": "Dog Breeds",
- "owner": "58b61352-678c-44d1-9f40-40edf4ea8d19",
- "status": "failed",
- "created": "2017-08-25T06:39:01.968Z",
- "classes": [{"class": "goldenretriever"}]
- }
-
- responses.add(responses.POST,
- gc_url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/cars.zip'), 'rb') as cars, \
- open(os.path.join(os.path.dirname(__file__), '../../resources/trucks.zip'), 'rb') as trucks:
- vr_service.create_classifier('Cars vs Trucks', positive_examples={'cars': cars}, negative_examples=trucks)
-
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_update_classifier(self):
- vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey')
-
- gc_url = "{0}{1}".format(base_url, 'v3/classifiers/bogusid')
-
- response = {
- "classifier_id": "bogusid",
- "name": "Insurance Claims",
- "owner": "58b61352-678c-44d1-9f40-40edf4ea8d19",
- "status": "ready",
- "created": "2017-07-17T22:17:14.860Z",
- "classes": [
- {"class": "motorcycleaccident"},
- {"class": "flattire"},
- {"class": "brokenwinshield"}
- ]
- }
-
- responses.add(responses.POST,
- gc_url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- vr_service.update_classifier(classifier_id="bogusid")
- assert len(responses.calls) == 2
-
- @responses.activate
- def test_classify(self):
- vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey')
-
- gc_url = "{0}{1}".format(base_url, 'v3/classify')
-
- response = {"images": [
- {"image": "test.jpg",
- "classifiers": [
- {"classes": [
- {"score": 0.95, "class": "tiger", "type_hierarchy": "/animal/mammal/carnivore/feline/big cat/tiger"},
- {"score": 0.997, "class": "big cat"},
- {"score": 0.998, "class": "feline"},
- {"score": 0.998, "class": "carnivore"},
- {"score": 0.998, "class": "mammal"},
- {"score": 0.999, "class": "animal"}
- ],
- "classifier_id": "default",
- "name": "default"}
- ]
- }
- ],
- "custom_classes": 0,
- "images_processed": 1
- }
-
- responses.add(responses.GET,
- gc_url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
- responses.add(responses.POST,
- gc_url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- vr_service.classify(parameters='{"url": "http://google.com"}')
-
- vr_service.classify(parameters=json.dumps({'url': 'http://google.com', 'classifier_ids': ['one', 'two', 'three']}))
- vr_service.classify(parameters=json.dumps({'url': 'http://google.com', 'owners': ['me', 'IBM']}))
-
- with open(os.path.join(os.path.dirname(__file__), '../../resources/test.jpg'), 'rb') as image_file:
- vr_service.classify(images_file=image_file)
- assert len(responses.calls) == 8
-
- @responses.activate
- def test_detect_faces(self):
- vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey')
-
- gc_url = "{0}{1}".format(base_url, 'v3/detect_faces')
-
- response = {
- "images": [
- {
- "faces": [
- {
- "age": {
- "max": 44,
- "min": 35,
- "score": 0.446989
- },
- "face_location": {
- "height": 159,
- "left": 256,
- "top": 64,
- "width": 92
- },
- "gender": {
- "gender": "MALE",
- "score": 0.99593
- },
- "identity": {
- "name": "Barack Obama",
- "score": 0.970688,
- "type_hierarchy": "/people/politicians/democrats/barack obama"
- }
- }
- ],
- "resolved_url": "https://watson-developer-cloud.github.io/doc-tutorial-downloads/visual-recognition/prez.jpg",
- "source_url": "https://watson-developer-cloud.github.io/doc-tutorial-downloads/visual-recognition/prez.jpg"
- }
- ],
- "images_processed": 1
- }
-
- responses.add(responses.GET,
- gc_url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- responses.add(responses.POST,
- gc_url,
- body=json.dumps(response),
- status=200,
- content_type='application/json')
-
- vr_service.detect_faces(parameters='{"url": "http://google.com"}')
- with open(os.path.join(os.path.dirname(__file__), '../../resources/test.jpg'), 'rb') as image_file:
- vr_service.detect_faces(images_file=image_file)
- assert len(responses.calls) == 4
-
- @responses.activate
- def test_delete_user_data(self):
- url = "{0}{1}".format(base_url, 'v3/user_data')
- responses.add(
- responses.DELETE,
- url,
- body='{"description": "success" }',
- status=204,
- content_type='application_json')
-
- vr_service = ibm_watson.VisualRecognitionV3('2016-10-20', iam_apikey='bogusapikey')
- response = vr_service.delete_user_data('id').get_result()
- assert response is None
- assert len(responses.calls) == 2
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index 1f0396717..000000000
--- a/tox.ini
+++ /dev/null
@@ -1,18 +0,0 @@
-[tox]
-envlist = lint, py27, py35, py36, py37
-
-[testenv:lint]
-basepython = python3.7
-deps = pylint
-commands = pylint ibm_watson test examples
-
-[testenv]
-passenv = TOXENV CI TRAVIS*
-commands =
- py.test --reruns 3 --cov=ibm_watson
- codecov -e TOXENV
-deps =
- -r{toxinidir}/requirements.txt
- -r{toxinidir}/requirements-dev.txt
-usedevelop = True
-exclude = .venv,.git,.tox,docs