dream
289 строк · 10.8 Кб
1import logging2import re3
4from nltk.stem import WordNetLemmatizer5
6from dff.script import Context7from dff.pipeline import Pipeline8
9import common.greeting as common_greeting10import common.utils as common_utils11import common.universal_templates as universal_templates12import common.dff_api_v1.integration.context as int_ctx13from common.acknowledgements import GENERAL_ACKNOWLEDGEMENTS14from common.constants import CAN_CONTINUE_SCENARIO, CAN_NOT_CONTINUE15from .facts_utils import provide_facts_request16
17logger = logging.getLogger(__name__)18
19wnl = WordNetLemmatizer()20
21
22# vars is described in README.md
23
24
25def was_clarification_request(ctx: Context, _) -> bool:26flag = ctx.misc["agent"]["clarification_request_flag"] if not ctx.validation else False27logger.debug(f"was_clarification_request = {flag}")28return bool(flag)29
30
31def is_opinion_request(ctx: Context, pipeline: Pipeline) -> bool:32flag = common_utils.is_opinion_request(int_ctx.get_last_human_utterance(ctx, pipeline))33logger.debug(f"is_opinion_request = {flag}")34return bool(flag)35
36
37def is_opinion_expression(ctx: Context, pipeline: Pipeline) -> bool:38flag = common_utils.is_opinion_expression(int_ctx.get_last_human_utterance(ctx, pipeline))39logger.debug(f"is_opinion_expression = {flag}")40return bool(flag)41
42
43def is_previous_turn_dff_suspended(ctx: Context, _) -> bool:44flag = ctx.misc["agent"].get("previous_turn_dff_suspended", False) if not ctx.validation else False45logger.debug(f"is_previous_turn_dff_suspended = {flag}")46return bool(flag)47
48
49def is_current_turn_dff_suspended(ctx: Context, _) -> bool:50flag = ctx.misc["agent"].get("current_turn_dff_suspended", False) if not ctx.validation else False51logger.debug(f"is_current_turn_dff_suspended = {flag}")52return bool(flag)53
54
55def is_switch_topic(ctx: Context, pipeline: Pipeline) -> bool:56flag = universal_templates.is_switch_topic(int_ctx.get_last_human_utterance(ctx, pipeline))57logger.debug(f"is_switch_topic = {flag}")58return bool(flag)59
60
61def is_question(ctx: Context, pipeline: Pipeline) -> bool:62text = int_ctx.get_last_human_utterance(ctx, pipeline)["text"]63flag = common_utils.is_question(text)64logger.debug(f"is_question = {flag}")65return bool(flag)66
67
68def is_lets_chat_about_topic_human_initiative(ctx: Context, pipeline: Pipeline) -> bool:69flag = universal_templates.if_chat_about_particular_topic(70int_ctx.get_last_human_utterance(ctx, pipeline), int_ctx.get_last_bot_utterance(ctx, pipeline)71)72logger.debug(f"is_lets_chat_about_topic_human_initiative = {flag}")73return bool(flag)74
75
76def is_lets_chat_about_topic(ctx: Context, pipeline: Pipeline) -> bool:77flag = is_lets_chat_about_topic_human_initiative(ctx, pipeline)78
79last_human_uttr = int_ctx.get_last_human_utterance(ctx, pipeline)80last_bot_uttr_text = int_ctx.get_last_bot_utterance(ctx, pipeline)["text"]81is_bot_initiative = bool(re.search(universal_templates.COMPILE_WHAT_TO_TALK_ABOUT, last_bot_uttr_text))82flag = flag or (is_bot_initiative and not common_utils.is_no(last_human_uttr))83logger.debug(f"is_lets_chat_about_topic = {flag}")84return bool(flag)85
86
87def is_begin_of_dialog(ctx: Context, pipeline: Pipeline, begin_dialog_n=10) -> bool:88flag = int_ctx.get_human_utter_index(ctx, pipeline) < begin_dialog_n89logger.debug(f"is_begin_of_dialog = {flag}")90return bool(flag)91
92
93def is_interrupted(ctx: Context, pipeline: Pipeline) -> bool:94flag = (95int_ctx.get_human_utter_index(ctx, pipeline) - int_ctx.get_previous_human_utter_index(ctx, pipeline)96) != 1 and not was_clarification_request(ctx, pipeline)97logger.debug(f"is_interrupted = {flag}")98return bool(flag)99
100
101def is_long_interrupted(ctx: Context, pipeline: Pipeline, how_long=3) -> bool:102flag = (103int_ctx.get_human_utter_index(ctx, pipeline) - int_ctx.get_previous_human_utter_index(ctx, pipeline)104) > how_long and not was_clarification_request(ctx, pipeline)105logger.debug(f"is_long_interrupted = {flag}")106return bool(flag)107
108
109def is_new_human_entity(ctx: Context, pipeline: Pipeline) -> bool:110new_entities = int_ctx.get_new_human_labeled_noun_phrase(ctx, pipeline)111flag = bool(new_entities)112logger.debug(f"is_new_human_entity = {flag}")113return bool(flag)114
115
116def is_last_state(ctx: Context, pipeline: Pipeline, state) -> bool:117flag = False118if not ctx.validation:119history = list(int_ctx.get_history(ctx, pipeline).items())120if history:121history_sorted = sorted(history, key=lambda x: x[0])122last_state = history_sorted[-1][1]123if last_state == state:124flag = True125return bool(flag)126
127
128def is_first_time_of_state(ctx: Context, pipeline: Pipeline, state) -> bool:129flag = state not in list(int_ctx.get_history(ctx, pipeline).values())130logger.debug(f"is_first_time_of_state {state} = {flag}")131return bool(flag)132
133
134def if_was_prev_active(ctx: Context, pipeline: Pipeline) -> bool:135flag = False136skill_uttr_indices = set(int_ctx.get_history(ctx, pipeline).keys())137if not ctx.validation:138human_uttr_index = str(ctx.misc["agent"]["human_utter_index"] - 1)139if human_uttr_index in skill_uttr_indices:140flag = True141return bool(flag)142
143
144def is_plural(word) -> bool:145lemma = wnl.lemmatize(word, "n")146plural = True if word is not lemma else False147return plural148
149
150def is_first_our_response(ctx: Context, pipeline: Pipeline) -> bool:151flag = len(list(int_ctx.get_history(ctx, pipeline).values())) == 0152logger.debug(f"is_first_our_response = {flag}")153return bool(flag)154
155
156def is_no_human_abandon(ctx: Context, pipeline: Pipeline) -> bool:157"""Is dialog breakdown in human utterance or no. Uses MIDAS hold/abandon classes."""158midas_classes = common_utils.get_intents(int_ctx.get_last_human_utterance(ctx, pipeline), which="midas")159if "abandon" not in midas_classes:160return True161return False162
163
164def no_special_switch_off_requests(ctx: Context, pipeline: Pipeline) -> bool:165"""Function to determine if166- user didn't asked to switch topic,
167- user didn't ask to talk about something particular,
168- user didn't requested high priority intents (like what_is_your_name)
169"""
170intents_by_catcher = common_utils.get_intents(171int_ctx.get_last_human_utterance(ctx, pipeline), probs=False, which="intent_catcher"172)173is_high_priority_intent = any([intent not in common_utils.service_intents for intent in intents_by_catcher])174is_switch = is_switch_topic(ctx, pipeline)175is_lets_chat = is_lets_chat_about_topic_human_initiative(ctx, pipeline)176
177if not (is_high_priority_intent or is_switch or is_lets_chat):178return True179return False180
181
182def no_requests(ctx: Context, pipeline: Pipeline) -> bool:183"""Function to determine if184- user didn't asked to switch topic,
185- user didn't ask to talk about something particular,
186- user didn't requested high priority intents (like what_is_your_name)
187- user didn't requested any special intents
188- user didn't ask questions
189"""
190contain_no_special_requests = no_special_switch_off_requests(ctx, pipeline)191
192request_intents = [193"opinion_request",194"topic_switching",195"lets_chat_about",196"what_are_you_talking_about",197"Information_RequestIntent",198"Topic_SwitchIntent",199"Opinion_RequestIntent",200]201intents = common_utils.get_intents(int_ctx.get_last_human_utterance(ctx, pipeline), which="all")202is_not_request_intent = all([intent not in request_intents for intent in intents])203is_no_question = "?" not in int_ctx.get_last_human_utterance(ctx, pipeline)["text"]204
205if contain_no_special_requests and is_not_request_intent and is_no_question:206return True207return False208
209
210def is_yes_vars(ctx: Context, pipeline: Pipeline) -> bool:211flag = True212flag = flag and common_utils.is_yes(int_ctx.get_last_human_utterance(ctx, pipeline))213return bool(flag)214
215
216def is_no_vars(ctx: Context, pipeline: Pipeline) -> bool:217flag = True218flag = flag and common_utils.is_no(int_ctx.get_last_human_utterance(ctx, pipeline))219return bool(flag)220
221
222def is_do_not_know_vars(ctx: Context, pipeline: Pipeline) -> bool:223flag = True224flag = flag and common_utils.is_donot_know(int_ctx.get_last_human_utterance(ctx, pipeline))225return bool(flag)226
227
228def is_passive_user(ctx: Context, pipeline: Pipeline, passive_threshold=3, history_len=2) -> bool:229"""Check history_len last human utterances on the number of tokens.230If number of tokens in ALL history_len uterances is less or equal than the given threshold,
231then consider user passive - return True.
232"""
233user_utterances = int_ctx.get_human_utterances(ctx, pipeline)[-history_len:]234user_utterances = [utt["text"] for utt in user_utterances]235
236uttrs_lens = [len(uttr.split()) <= passive_threshold for uttr in user_utterances]237if all(uttrs_lens):238return True239return False240
241
242def get_not_used_and_save_sentiment_acknowledgement(ctx: Context, pipeline: Pipeline, sentiment=None, lang="EN"):243if sentiment is None:244sentiment = int_ctx.get_human_sentiment(ctx, pipeline)245if is_yes_vars(ctx, pipeline) or is_no_vars(ctx, pipeline):246sentiment = "neutral"247
248shared_memory = int_ctx.get_shared_memory(ctx, pipeline)249last_acknowledgements = shared_memory.get("last_acknowledgements", [])250
251ack = common_utils.get_not_used_template(252used_templates=last_acknowledgements, all_templates=GENERAL_ACKNOWLEDGEMENTS[lang][sentiment]253)254
255used_acks = last_acknowledgements + [ack]256int_ctx.save_to_shared_memory(ctx, pipeline, last_acknowledgements=used_acks[-2:])257return ack258
259
260def set_conf_and_can_cont_by_universal_policy(ctx: Context, pipeline: Pipeline):261DIALOG_BEGINNING_START_CONFIDENCE = 0.98262DIALOG_BEGINNING_CONTINUE_CONFIDENCE = 0.9263DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE = 0.98264MIDDLE_DIALOG_START_CONFIDENCE = 0.7265
266if not is_begin_of_dialog(ctx, pipeline, begin_dialog_n=10):267confidence = 0.0268can_continue_flag = CAN_NOT_CONTINUE269elif is_first_our_response(ctx, pipeline):270confidence = DIALOG_BEGINNING_START_CONFIDENCE271can_continue_flag = CAN_CONTINUE_SCENARIO272elif not is_interrupted(ctx, pipeline) and common_greeting.dont_tell_you_answer(273int_ctx.get_last_human_utterance(ctx, pipeline)274):275confidence = DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE276can_continue_flag = CAN_CONTINUE_SCENARIO277elif not is_interrupted(ctx, pipeline):278confidence = DIALOG_BEGINNING_CONTINUE_CONFIDENCE279can_continue_flag = CAN_CONTINUE_SCENARIO280else:281confidence = MIDDLE_DIALOG_START_CONFIDENCE282can_continue_flag = CAN_CONTINUE_SCENARIO283
284int_ctx.set_can_continue(ctx, pipeline, can_continue_flag)285int_ctx.set_confidence(ctx, pipeline, confidence)286
287
288def facts(ctx, pipeline):289return provide_facts_request(ctx, pipeline)290