dream

Форк
0
289 строк · 10.8 Кб
1
import logging
2
import re
3

4
from nltk.stem import WordNetLemmatizer
5

6
from dff.script import Context
7
from dff.pipeline import Pipeline
8

9
import common.greeting as common_greeting
10
import common.utils as common_utils
11
import common.universal_templates as universal_templates
12
import common.dff_api_v1.integration.context as int_ctx
13
from common.acknowledgements import GENERAL_ACKNOWLEDGEMENTS
14
from common.constants import CAN_CONTINUE_SCENARIO, CAN_NOT_CONTINUE
15
from .facts_utils import provide_facts_request
16

17
logger = logging.getLogger(__name__)
18

19
wnl = WordNetLemmatizer()
20

21

22
#  vars is described in README.md
23

24

25
def was_clarification_request(ctx: Context, _) -> bool:
26
    flag = ctx.misc["agent"]["clarification_request_flag"] if not ctx.validation else False
27
    logger.debug(f"was_clarification_request = {flag}")
28
    return bool(flag)
29

30

31
def is_opinion_request(ctx: Context, pipeline: Pipeline) -> bool:
32
    flag = common_utils.is_opinion_request(int_ctx.get_last_human_utterance(ctx, pipeline))
33
    logger.debug(f"is_opinion_request = {flag}")
34
    return bool(flag)
35

36

37
def is_opinion_expression(ctx: Context, pipeline: Pipeline) -> bool:
38
    flag = common_utils.is_opinion_expression(int_ctx.get_last_human_utterance(ctx, pipeline))
39
    logger.debug(f"is_opinion_expression = {flag}")
40
    return bool(flag)
41

42

43
def is_previous_turn_dff_suspended(ctx: Context, _) -> bool:
44
    flag = ctx.misc["agent"].get("previous_turn_dff_suspended", False) if not ctx.validation else False
45
    logger.debug(f"is_previous_turn_dff_suspended = {flag}")
46
    return bool(flag)
47

48

49
def is_current_turn_dff_suspended(ctx: Context, _) -> bool:
50
    flag = ctx.misc["agent"].get("current_turn_dff_suspended", False) if not ctx.validation else False
51
    logger.debug(f"is_current_turn_dff_suspended = {flag}")
52
    return bool(flag)
53

54

55
def is_switch_topic(ctx: Context, pipeline: Pipeline) -> bool:
56
    flag = universal_templates.is_switch_topic(int_ctx.get_last_human_utterance(ctx, pipeline))
57
    logger.debug(f"is_switch_topic = {flag}")
58
    return bool(flag)
59

60

61
def is_question(ctx: Context, pipeline: Pipeline) -> bool:
62
    text = int_ctx.get_last_human_utterance(ctx, pipeline)["text"]
63
    flag = common_utils.is_question(text)
64
    logger.debug(f"is_question = {flag}")
65
    return bool(flag)
66

67

68
def is_lets_chat_about_topic_human_initiative(ctx: Context, pipeline: Pipeline) -> bool:
69
    flag = universal_templates.if_chat_about_particular_topic(
70
        int_ctx.get_last_human_utterance(ctx, pipeline), int_ctx.get_last_bot_utterance(ctx, pipeline)
71
    )
72
    logger.debug(f"is_lets_chat_about_topic_human_initiative = {flag}")
73
    return bool(flag)
74

75

76
def is_lets_chat_about_topic(ctx: Context, pipeline: Pipeline) -> bool:
77
    flag = is_lets_chat_about_topic_human_initiative(ctx, pipeline)
78

79
    last_human_uttr = int_ctx.get_last_human_utterance(ctx, pipeline)
80
    last_bot_uttr_text = int_ctx.get_last_bot_utterance(ctx, pipeline)["text"]
81
    is_bot_initiative = bool(re.search(universal_templates.COMPILE_WHAT_TO_TALK_ABOUT, last_bot_uttr_text))
82
    flag = flag or (is_bot_initiative and not common_utils.is_no(last_human_uttr))
83
    logger.debug(f"is_lets_chat_about_topic = {flag}")
84
    return bool(flag)
85

86

87
def is_begin_of_dialog(ctx: Context, pipeline: Pipeline, begin_dialog_n=10) -> bool:
88
    flag = int_ctx.get_human_utter_index(ctx, pipeline) < begin_dialog_n
89
    logger.debug(f"is_begin_of_dialog = {flag}")
90
    return bool(flag)
91

92

93
def is_interrupted(ctx: Context, pipeline: Pipeline) -> bool:
94
    flag = (
95
        int_ctx.get_human_utter_index(ctx, pipeline) - int_ctx.get_previous_human_utter_index(ctx, pipeline)
96
    ) != 1 and not was_clarification_request(ctx, pipeline)
97
    logger.debug(f"is_interrupted = {flag}")
98
    return bool(flag)
99

100

101
def is_long_interrupted(ctx: Context, pipeline: Pipeline, how_long=3) -> bool:
102
    flag = (
103
        int_ctx.get_human_utter_index(ctx, pipeline) - int_ctx.get_previous_human_utter_index(ctx, pipeline)
104
    ) > how_long and not was_clarification_request(ctx, pipeline)
105
    logger.debug(f"is_long_interrupted = {flag}")
106
    return bool(flag)
107

108

109
def is_new_human_entity(ctx: Context, pipeline: Pipeline) -> bool:
110
    new_entities = int_ctx.get_new_human_labeled_noun_phrase(ctx, pipeline)
111
    flag = bool(new_entities)
112
    logger.debug(f"is_new_human_entity = {flag}")
113
    return bool(flag)
114

115

116
def is_last_state(ctx: Context, pipeline: Pipeline, state) -> bool:
117
    flag = False
118
    if not ctx.validation:
119
        history = list(int_ctx.get_history(ctx, pipeline).items())
120
        if history:
121
            history_sorted = sorted(history, key=lambda x: x[0])
122
            last_state = history_sorted[-1][1]
123
            if last_state == state:
124
                flag = True
125
    return bool(flag)
126

127

128
def is_first_time_of_state(ctx: Context, pipeline: Pipeline, state) -> bool:
129
    flag = state not in list(int_ctx.get_history(ctx, pipeline).values())
130
    logger.debug(f"is_first_time_of_state {state} = {flag}")
131
    return bool(flag)
132

133

134
def if_was_prev_active(ctx: Context, pipeline: Pipeline) -> bool:
135
    flag = False
136
    skill_uttr_indices = set(int_ctx.get_history(ctx, pipeline).keys())
137
    if not ctx.validation:
138
        human_uttr_index = str(ctx.misc["agent"]["human_utter_index"] - 1)
139
        if human_uttr_index in skill_uttr_indices:
140
            flag = True
141
    return bool(flag)
142

143

144
def is_plural(word) -> bool:
145
    lemma = wnl.lemmatize(word, "n")
146
    plural = True if word is not lemma else False
147
    return plural
148

149

150
def is_first_our_response(ctx: Context, pipeline: Pipeline) -> bool:
151
    flag = len(list(int_ctx.get_history(ctx, pipeline).values())) == 0
152
    logger.debug(f"is_first_our_response = {flag}")
153
    return bool(flag)
154

155

156
def is_no_human_abandon(ctx: Context, pipeline: Pipeline) -> bool:
157
    """Is dialog breakdown in human utterance or no. Uses MIDAS hold/abandon classes."""
158
    midas_classes = common_utils.get_intents(int_ctx.get_last_human_utterance(ctx, pipeline), which="midas")
159
    if "abandon" not in midas_classes:
160
        return True
161
    return False
162

163

164
def no_special_switch_off_requests(ctx: Context, pipeline: Pipeline) -> bool:
165
    """Function to determine if
166
    - user didn't asked to switch topic,
167
    - user didn't ask to talk about something particular,
168
    - user didn't requested high priority intents (like what_is_your_name)
169
    """
170
    intents_by_catcher = common_utils.get_intents(
171
        int_ctx.get_last_human_utterance(ctx, pipeline), probs=False, which="intent_catcher"
172
    )
173
    is_high_priority_intent = any([intent not in common_utils.service_intents for intent in intents_by_catcher])
174
    is_switch = is_switch_topic(ctx, pipeline)
175
    is_lets_chat = is_lets_chat_about_topic_human_initiative(ctx, pipeline)
176

177
    if not (is_high_priority_intent or is_switch or is_lets_chat):
178
        return True
179
    return False
180

181

182
def no_requests(ctx: Context, pipeline: Pipeline) -> bool:
183
    """Function to determine if
184
    - user didn't asked to switch topic,
185
    - user didn't ask to talk about something particular,
186
    - user didn't requested high priority intents (like what_is_your_name)
187
    - user didn't requested any special intents
188
    - user didn't ask questions
189
    """
190
    contain_no_special_requests = no_special_switch_off_requests(ctx, pipeline)
191

192
    request_intents = [
193
        "opinion_request",
194
        "topic_switching",
195
        "lets_chat_about",
196
        "what_are_you_talking_about",
197
        "Information_RequestIntent",
198
        "Topic_SwitchIntent",
199
        "Opinion_RequestIntent",
200
    ]
201
    intents = common_utils.get_intents(int_ctx.get_last_human_utterance(ctx, pipeline), which="all")
202
    is_not_request_intent = all([intent not in request_intents for intent in intents])
203
    is_no_question = "?" not in int_ctx.get_last_human_utterance(ctx, pipeline)["text"]
204

205
    if contain_no_special_requests and is_not_request_intent and is_no_question:
206
        return True
207
    return False
208

209

210
def is_yes_vars(ctx: Context, pipeline: Pipeline) -> bool:
211
    flag = True
212
    flag = flag and common_utils.is_yes(int_ctx.get_last_human_utterance(ctx, pipeline))
213
    return bool(flag)
214

215

216
def is_no_vars(ctx: Context, pipeline: Pipeline) -> bool:
217
    flag = True
218
    flag = flag and common_utils.is_no(int_ctx.get_last_human_utterance(ctx, pipeline))
219
    return bool(flag)
220

221

222
def is_do_not_know_vars(ctx: Context, pipeline: Pipeline) -> bool:
223
    flag = True
224
    flag = flag and common_utils.is_donot_know(int_ctx.get_last_human_utterance(ctx, pipeline))
225
    return bool(flag)
226

227

228
def is_passive_user(ctx: Context, pipeline: Pipeline, passive_threshold=3, history_len=2) -> bool:
229
    """Check history_len last human utterances on the number of tokens.
230
    If number of tokens in ALL history_len uterances is less or equal than the given threshold,
231
    then consider user passive - return True.
232
    """
233
    user_utterances = int_ctx.get_human_utterances(ctx, pipeline)[-history_len:]
234
    user_utterances = [utt["text"] for utt in user_utterances]
235

236
    uttrs_lens = [len(uttr.split()) <= passive_threshold for uttr in user_utterances]
237
    if all(uttrs_lens):
238
        return True
239
    return False
240

241

242
def get_not_used_and_save_sentiment_acknowledgement(ctx: Context, pipeline: Pipeline, sentiment=None, lang="EN"):
243
    if sentiment is None:
244
        sentiment = int_ctx.get_human_sentiment(ctx, pipeline)
245
        if is_yes_vars(ctx, pipeline) or is_no_vars(ctx, pipeline):
246
            sentiment = "neutral"
247

248
    shared_memory = int_ctx.get_shared_memory(ctx, pipeline)
249
    last_acknowledgements = shared_memory.get("last_acknowledgements", [])
250

251
    ack = common_utils.get_not_used_template(
252
        used_templates=last_acknowledgements, all_templates=GENERAL_ACKNOWLEDGEMENTS[lang][sentiment]
253
    )
254

255
    used_acks = last_acknowledgements + [ack]
256
    int_ctx.save_to_shared_memory(ctx, pipeline, last_acknowledgements=used_acks[-2:])
257
    return ack
258

259

260
def set_conf_and_can_cont_by_universal_policy(ctx: Context, pipeline: Pipeline):
261
    DIALOG_BEGINNING_START_CONFIDENCE = 0.98
262
    DIALOG_BEGINNING_CONTINUE_CONFIDENCE = 0.9
263
    DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE = 0.98
264
    MIDDLE_DIALOG_START_CONFIDENCE = 0.7
265

266
    if not is_begin_of_dialog(ctx, pipeline, begin_dialog_n=10):
267
        confidence = 0.0
268
        can_continue_flag = CAN_NOT_CONTINUE
269
    elif is_first_our_response(ctx, pipeline):
270
        confidence = DIALOG_BEGINNING_START_CONFIDENCE
271
        can_continue_flag = CAN_CONTINUE_SCENARIO
272
    elif not is_interrupted(ctx, pipeline) and common_greeting.dont_tell_you_answer(
273
        int_ctx.get_last_human_utterance(ctx, pipeline)
274
    ):
275
        confidence = DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE
276
        can_continue_flag = CAN_CONTINUE_SCENARIO
277
    elif not is_interrupted(ctx, pipeline):
278
        confidence = DIALOG_BEGINNING_CONTINUE_CONFIDENCE
279
        can_continue_flag = CAN_CONTINUE_SCENARIO
280
    else:
281
        confidence = MIDDLE_DIALOG_START_CONFIDENCE
282
        can_continue_flag = CAN_CONTINUE_SCENARIO
283

284
    int_ctx.set_can_continue(ctx, pipeline, can_continue_flag)
285
    int_ctx.set_confidence(ctx, pipeline, confidence)
286

287

288
def facts(ctx, pipeline):
289
    return provide_facts_request(ctx, pipeline)
290

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.