-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSentience.py
More file actions
6642 lines (5881 loc) · 316 KB
/
Sentience.py
File metadata and controls
6642 lines (5881 loc) · 316 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import sys
import os
import subprocess
import datetime
import time
import threading
import speech_recognition as sr
import pyttsx3
from chatterbot import ChatBot
import shutil
import cProfile
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.core.window import Window
from kivy.app import App
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.factory import Factory
from kivy.uix.actionbar import ActionItem, ActionButton
from kivy.config import Config
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.popup import Popup
from kivy.properties import ObjectProperty, StringProperty, ListProperty, ConfigParserProperty
import kivy.utils
from kivy.config import ConfigParser
from kivy.uix.settings import SettingsWithSidebar
from SettingsMenu import my_settings
__author__ = 'Aaron Johnson'
__copyright__ = 'Copyright (c) 2018 Copyright Holder All Rights Reserved.'
__license__ = 'GPL'
__version__ = '2.1'
__maintainer__ = 'Aaron Johnson'
__email__ = 'Aaronjohnson@protonmail.ch'
class PrintDialog(FloatLayout):
'''
PrintDialog(FloatLayout):
Parameters
----------
param1 : FloatLayout
The first parameter. Will hold the widgets in the Popup window which
creates a PrinterDialog window. Allowing the user to navigate to and
select a file for printing.
Attributes
----------
print_files = ObjectProperty(None)
print_files binds to the SentienceScreen().print_files() function.
Cancel = ObjectProperty(None)
Cancel binds to the SentienceScreen().dissmis_popup() function.
Members
-------
None
Private Members
---------------
None
Exceptions
----------
None
Returns
-------
None
Notes
-----
This class is essentially a container for the Popup() that's created
in SentienceScreen() class. The purpose of the Popup() is to allow
the user to have a graphical window to navigate to, and select from,
a list of files that they want to print out. Rather than automatically
printing out the files for the user. This prevents potential issues
and also allows the user the freedom to print out different files
created by this program.
'''
print_files = ObjectProperty(None)
Cancel = ObjectProperty(None)
class DeleteDialog(FloatLayout):
'''
DeleteDialog(FloatLayout):
Parameters
----------
param1 : FloatLayout
This is pretty much exactly what it looks like. When this
is used later on it will automatically add a float layout.
Attributes
----------
delete_file
This will be used along side an ObjectProperty to register
it for use with the SentienceScreen.delete_file() and
SentienceScreen().open_delete_file_dialog() functions.
Cancel
This will be used along side an ObjectProperty to register
it for use with the SentienceScreen.dismiss_popup() function.
ObjectProperty(None)
Initializes the two attributes to ObjectProperty. This is
a built in feature of kivy to reduce code and make it
easier to create/manipulate/initialize/instantiate
both variables and functions. By making these two
attributes object properties, in this case, we're
literally binding them to the two functions calls
listed above.
Members
-------
None
Private Members
----------------
None
Exceptions
----------
None
Returns
-------
None
Notes
-----
We use this with our popup window for deleting specific files.
This is our dialog. A FloatLayout is provided by default and
two other layouts are added to it in the kv design language.
The ObjectProperty delete_file refers to a SentienceScreen()
function: SentienceScreen.open_delete_file_dialog(). Clicking
the button "Delete File" calls the open_delete_file_dialog()
function which then opens a popup window.
The ObjectProperty Cancel refers to the the button "Cancel"
which is contained in the above mentioned popup window.
'''
delete_file = ObjectProperty(None)
Cancel = ObjectProperty(None)
class SentienceScreen(Screen):
'''
SentienceScreen(Screen):
Parameters
----------
param1 : Screen
The first parameter creates a new Screen, which will function
as a "page". This page is our only "Screen". It's the Main
Window. It does everything. Now the actual designer code is
done in the kv design language. But, this widget holds it
all. It's the core of the program.
Attributes
----------
self.chatbot
The chatbot is the core feature here. It's the bot that the
user communicates with. It's initialized and trained in the
__init__ function. It's training can be continued throughout
the program. Or expanded on by creating and adding new databases
to its training regiment.
self.engine
The engine object refers to the python3 text to speeh engine
. It's what enables the chat bot to have a voice. From this
engine we derive the ability to pass a string to the chat
bot which can then access the systems text to speech software
and read it back with an apropriate voice.
self.record
The record object comes from speech_engine.Recognizer().
This object allows us the ability to use programs such as
CMU Sphinx voice recognition. Essentially we use this to
transcribe recored audio to text which we can then store
in a string. I make use of this by transcribing the recored
audio to string vairables and passing them to the chat bot
so that it can accurately respond to the user.
self.mic
This object allows us to access and use any connected or
onboard microphone if one is available. With this we can
record a users voice, store it in a variable then send it
to the Recognizer() to be transcribed and passed as a string
to the chat bot.
self.audio_threshold
This is used to automatically set the level at which the
microphone accepts audio input. The higher the level the
less sensitive the microphone is. Or rather the it's less
likely that ambient noise will be treated as intentional
audio being sent through the microphone.
self.record_dynamic_energy_threshold
This applies to self.record and is a boolean variable. By
setting this to False we can ensure that the energy_threshold
doesn't dynamically set its energy_threshold level. Note:
That the energy_threshold is what enables us to searate
between ambient noise and the users intended voice commands.
self.master_log
This is a string variable that I use to store all of the
conversation that takes place between the user and the chat
bot.
self.voice_enabled
If self.voice_enabled is set to True then the user is able
to use their microphone to communicate with the chat bot.
Note: The user can only use a microphone if they have one.
This can be either a connected microphone and or an onboard
microphone.
self.voice_disabled
If self.voice_disabled is set to True then the user can only
communicate with the chat bot through text. Note: The chat
bot can access its audio functions even if
self.voice_disabled == True. This function only effects the
users ability to use their microphone.
self.user_input
This is a string variable which I use to store the input
from the user the data here is passed to the chat bot,
stored in various files and variables/data structures.
Note: This variable is redundant and will in the future be
removed. It can be ommited and replaced by the TextInput
widgets return function.
self.audio_enabled
if self.audio_enabled == True the chat bot can use the systems
text to speech software (espeak, spai5, or nsss) to access the
softwares built in voices and read back any strings that the
chat bot comes up with as a response to the user. Note: This
boolean vairable only effects the chat bots ability to use
sound as a medium for communication. It does not effect the
users ability to use their microphone.
self.audio_disabled
If self.audio_disabled == True then the chat bot can only
communicate with the user via text.
self.__user_profile
self.__user_profile is a dictionary and stores three specific
keys. 1) Username, 2) Age, 3) Gender. These are optional
variables. The user doesn't need to create a user profile.
Though it's encouraged that they do for better logging of
the data. Note: If the user elects to not create a user profile
this information is by default set.
Members
-------
def __init__(self, **kwargs)
Initalizes SentienceScreen() a more in depth analysis will
be given under the SentienceScreen().__init__(self, **kwargs)
functions documentation.
def quick_check_os(self)
This function is called when the user clicks on the
"Check Operating System" button which is represented by
an image of a computer on the menu bar. This function
when clicked checks to see if the user is running either
windows or Linux. If the user is running windows it makes
three new TextInput Widgets visible by changing the opacity.
If the user is using a Linux operating system clicking on
this button does nothing. A more in depth analysis will be
given in the SentienceScreen().quick_check_os() functions
documentation.
def get_user_text_response(self)
This function is called when the user hits the "enter key"
on their keyboard while inside of the user_input TextInput
Widget. A string variable is returned from this and passed
to the chat bot so that it can form a response to what the
users statement was. A more in depth analysis of this will
be given in the SentienceScreen().get_user_text_response()
functions documentation.
def get_caprica_text_response(self)
This function is called after the user inputs a text
response. And that response is sent to the chat bot. The
response that the user input is used by this function to
generate a response from the chat bot. A more in depth
analysis will be given in the
SentienceScreen().get_caprica_text_response() functions
documentation.
def get_user_voice_response(self)
This function is called when the user clicks the
"Record user" button. Which is located on the menu bar and
is represented by the image of a blue talking head. If
self.voice_disabled == True then the image will be a red
talking head. If the user clicks the button when it's red a
warning message will be displayed informing the user that
he/she needs to first enable their microphone by clicking on
the set_enable_disable_voice button. A More in depth
analysis of this function will be given in the
SentienceScreen().get_caprica_voice_response() function
documentation.
def get_caprica_voice_response(self, words)
This function is called after the user inputs a text string
in the proper TextInput widget; or
if self.voice_enabled == True. A more in depth analysis of
this function will be given in the
SentienceScreen().get_caprica_voice_response(self, words)
function documentation.
def set_gender(self):
This function is called in
SentienceScreen().__init__(self, **kwargs). Through this
function we set the voice property of self.engine to use
the systems female voice option. A more in depth analysis
of this function will be given in the
SentienceScreen().set_gender(self) function documentation.
def set_speech_rate(self):
This function is called in
SentienceScreen().__init__(self, **kwargs). Through this
function we can set the self.engine speech rate property.
This function can in effect lower or increase the number
of words spoken by the chat bot per minute. A more in
depth analysis of this function will be given in the
SentienceScreen().set_speech_rate() functions
documentation.
def caprica_speak(self, words)
This function is called from a variety of locations for the
purpose of activating the voice feature of the chat bot
which is derived from self.engine. A more in depth analysis
of this function will be given in the
SentienceScreen().caprica_speak(self, words) functions
documentation.
def onEnd(self, name, completed)
This function is called everytime
self.caprica_speak(self, words) is called. This function is
fired when the self.caprica_speak event has ended. This is a
callabck which terminates the event queue of the
self.engine. A more in depth analysis of this function will
be given in the
SentienceScreen().onEnd(self, name, completed) functions
documentation.
def clear_viewport(self)
This function is caleld whenever the user clicks the
"Erase logs" button. Which is represented by the eraser on
the menu bar. This button only erases the text in the
viewport TextInput Widget. A more in depth analysis of
this function will be given in
SentienceScreen().clear_viewport(self) function
documentation.
def create_user_profile(self)
This function is highly redundant and will be removed in
the future. This function is called when ever the user inputs
their username for the first time. It runs some checks and
then simply calls self.caprica_speak() to speak the users
input username. A more in depth analysis of this function
will be given in the
SentienceScreen().create_user_profile(self) function
documentation.
def set_enable_disable_audio(self)
This function is called when the user clicks the
self.set_enable_disable_audio button which is represented by
either a red or blue speaker image on the menu bar. If
self.audio_enabled == True the chat bot can use audio to
communicate with the user and the image is a blue speaker.
If self.audio_disabled == True then the chat bot can only
communicate with the user via text. The button is also
then represented by a red speaker. This function will
update the image on the menu bar to reflect its current
status. A more in depth analysis of this function will
be given in the SentienceScreen().set_enable_disable_audio(self)
function documentation.
def set_enable_disable_voice(self)
This function is called when the user clicks the
self.set_enable_disable_voice button which is represented by
either a red or blue microphone image on the menu bar.
If self.voice_enabled == True the user can use their microphone
to communicate with the chat bot and the image is a blue
microphone. If self.voice_disabled == True then the user can
only communicate with the chat bot via text. The button is
also then represented by a red microphone. This function
will update the image on the menu bar to reflect its current
status. A more in depth analysis of this function will be
given in the SentienceScreen().set_enable_disable_voice(self)
function documentation.
def set_username(self)
This function is called from two locations both involve the
user inputting a desired username into a TextInput Widget
and hitting the "Enter" key on their keyboard. This function
sets the user name for the current user and can be changed
at any time. A more in depth analysis of this function will
be given in SentienceScreen().set_username() function
documentation.
def set_sex(self)
This function is called from two locations both involve the
user inputting their gender into a TextInput Widget and
hitting the "Enter" key on their keyboard. This function sets
the gender for the current user and can be changed at any time.
A more in depth analysis of this function will be given in
SentienceScreen().set_sex() function documentation.
def set_age(self)
This function is called from two locations both involve the
user inputting their age into a TextInput Widget and hitting
the "Enter" key on their keyboard. This function sets the users
age for the current user and can be changed at any time. A more
in depth analysis of this function will be given in
SentienceScreen().set_username() function documentation.
def print_files(self, path, filename)
This function is called when the user clicks on the "Print"
button on the menu bar. When called a Popup() window is
created and allows the user to navigate to any file that
they wish to print. within that window are two buttons.
Clicking the "Print" button will print the selected file
while clicking the "Close" button will close the Popup()
window. A more in depth analysis of this function will be
given in SentienceScreen().print_files(self, path, filename)
function documentation.
def create_dir(self, path)
This function is caleld from within
SentienceScreen().__init__(self, **kwargs). When executed it
checks to see if a specific system relative directory exists.
If it does the function returns nothing. If it doesn't exist
the function creates the directory and then calls the private
function self.__create_files(self, path). A more in depth
analysis of this function will given in
SentienceScreen().create_dir(self, path) function
documentation.
def write_logs(self)
This function is caleld when the user clicks the "Write Logs"
button on the menu bar which is represented by a pencil
image. It creates and writes the contents of self.master_log
to a text file which is either
"Users input username + _Conversations"
.txt or simply "Username_Conversations".txt.
A more in depth analysis of this function will be given in
SentienceScreen().write_logs(self) function documentation.
def open_print_file_dialog(self)
This function is caleld when the user clicks the "Print"
button on the menu bar. This is the function that calls
the Popup() window and allows the user to print a specific
chosen file after navigating to it; and then by clicking the
"Print files" button on that Popup() window.
def dismiss_popup(self)
This function is called when the user clicks the "close"
button on the PrintDialog() Popup() window. It closes the
Popup() window. A more in depth analysis of this function
will be given in the SentienceScreen().dismiss_popup()
function documentation.
def on_mouse_pos(self, instance pos):
This function is called everytime that the user moves
his or her mouse. If the mouse collides with any of the
the buttons on the menu bar (Action Bar) this function
checks the positions against the various if statements
which relate to the specific button. When the position
of the users mouse matches the positions outlined in
the statements. A tool tip is displayed, which presents
at leat the name of the button.
def display_tooltip(self, *args):
When this function is called the tooltip that relates
to the button (as explain in on_mouse_pos) is created
and added to the users screen. A clock event is then
scheduled to delete the tooltip from the screen automaticaly
after five seconds.
def close_tooltip(self, dt):
This function is called by the clock event described in
display_tooltip(). When this event is executed five seconds
after it's been registered. The tooltip widget is
deleted from the users screen.
def set_tooltip_text(self, text):
We call this function and supply a string to the text
parameter. This text relates to which ever button the users
mouse colldied with. The text is then set and that's what's
displayed to the user when the tooltip widget is added to
the screen.
def caprica_timer(self, _time):
This function is not currently in use. It's purpose
was to function as an independent threaded timer. The time
was based on the number supplied to the _time parameter.
This function ticks down until _time is == 0 displaying
the text ...Thinking... until _time is == 0; at which
time the text displayed is then ...Inactive...
def start_timer_thread(self, _time):
This function is not currently being used. But, it's
purpose was to setup and run the caprica_timer function.
def check_timer(self, _time):
This function is not being used. But, it's purpose was
to check the status of self.caprica_timer(_time). To
ensure that it ended when _time == 0 instead of counting
down beyond that into negative numbers.
def get_caprica_response(self):
This function is used to generate a response from the
user. It combines all but the voice input/output
responses. Basically, when you enter text into the
user_input TextInput this function is called after
the user hits the enter key. It then begins the
process of the chatbot generating a response. It
also runs as an independent thread.
def get_caprica_voice_thread(self, words):
This function is called when the users has activated the
voice option, then recorded their voice. Once that
recording process is completed this function is called.
This function then generates the chatbots response. It
also runs as an independent thread.
def start_get_response_thread(self):
We call this function after the user types some text
into the self.ids.user_input TextInput widget, and
then hits the enter key on their keyboard. This function
changes the text of the notification_widget to
'...Thinking...'. It then creates and runs the
self.get_caprica_response() thread.
def start_voice_response_thread(self):
We call this function after the voice option has been
activated, and the user has hit the record button. Once
the record button has been clicked, the user can begin
speaking into their microphone. Once done speaking
we create and run the self.get_caprica_voice_thread().
# TODO: Fix notification text.
def _is_thread_stopped(self):
We call this function to check if there are
any active threads running.
# TODO: This function is useless and should be removed.
def _stop_threading(self):
This function is called when an active thread is
supposed to be terminated. The idea is that the thread
will be interupted and thus die.
# TODO: Remove this because it doesn't do anything.
def get_user_text(self):
This function is called to return the current
text contained in the user_input TextInput widget.
def open_delete_file_dialog(self):
This function is called when the users clicks on the
delete file button which is located under the settings
submenu on the menu bar. It opens a Popup() window. Which
contains a filebrowser and allows the user to navigate to
the file that they wish to delete. They can then select
the file by clicking on it, and then clicking the delete
button on the Popup() window. Or click the cancel button
at any time which closes the window.
def delete_file(self, path, filename):
This function is called after the user has slected a
file in the Popup() window file browser and then clicked
the delete button. The file the user selected is then
deleted if it exists. If it doesn't exist the user is
informed.
# TODO: Remove path parameter as it does nothing at all.
def delete_all(self):
We call this function if the user clicks on the
**Delete All** button which is located in the
settings submenu on the menu bar. Clicking this
button deletes all files and folders generated by the
this program. It also then exits the program.
def display_user_conversation(self):
This function is called when the user clicks on
the display conversation button. It outputs the
contents of self.master_log into the view_port
Widget.
def increase_chatbot_voume(self, vol):
This function can be called to increase the volume
of self.engine. The volume is increased by vol. The
values it can take are between 0-1. With 0 being the
lowest and one being the highest. # TODO: Re-implement
def decrease_chatbot_voume(self, vol):
This function can be called to decrease the volume
of self.engine. The volume is idecreased by vol. The
values it can take are between 0-1. With 0 being the
lowest and one being the highest. # TODO: Re-implement
def set_volume(self, vol):
This function is called to set the volume of
self.engine. The volume is set to vol; vol can be
any value between 0-1.
def increase_rate_of_speech(self, value):
This funciton is called when the user increases
the rate of speech using the settings menu. The
current rate of self.engine is increased by value.
def decrease_rate_of_speech(self, value):
This funciton is called when the user decreases
the rate of speech using the settings menu. The
current rate of self.engine is decreased by value.
Private Members
---------------
def __create_files(self, path)
This function is called from within the
self.create_dir(self, path) function
which is called first by the
SentienceScreen().__init__(self, **kwargs) function.
This function when called checks to see if specific files
exist and if they don't
it creates them. If they do already exist if essentially
returns none. It's also called from one other function if a
search does not find the required files which means that
they were intentionally or unintentionally deleted. A more
in depth analysis of this function will be given in
SentienceScreen().__create_files(self, path) function
documentation.
def __append_file(self, world, path)
This function is caleld every time the user speaks to the
chat bot and every time that the chat bot responds. The data
passed to words is the response from both parties which is
then appened to a specific file(s) which path comes from
the path parameter. A more in depth analysis of this funciton
will be given in
SentienceScreen().__append_file(self, world, path) Note: The
"World" param is a typo and needs to be changed to "word/words"
def __set_thinking_text(self, bool):
This function is called to change the text and the
color of the text of the notification_widget TextInput
to reflect the current status of the program. Ie,
if the chatbot is about to generate a response it
says '...Thinking...' in red text. If the chatbot has
already generated a response it says '...Inactive...'
in blue text.
def __currently_thinking(self, bool):
This function is called to determine the current
status of the program and the chatbot. If it's
thinking or inactive.
# TODO: This function is redundant
Notes
-----
This is the essential widget. It's where everything happens.
'''
def __init__(self, **kwargs):
'''
def __init__(self, **kwargs):
Parameters
----------
param1 : self
Denotes this as being a member of the SentienceScree()
class.
param2 : **kwargs
**kwargs stands for keyword arguements. This
allows an arbitrary number of keyword arguements to
be passed to the self.SentienceScreen().__init__()
function.
Attributes
----------
mouse_pos
mouse_pos is an optional, though required for our
purposes, parameter of the Window.bind() function.
We call this function which is a member of the Window()
class. To register a mouse event. We bind the
traditional mouse_pos event to our own
self.on_mouse_pos(). The mouse (pointer) is always
tracked were simply binding it to one of our
functions so that we can monitor the position and
inctance of the pointer and call the bound function
when it's appropriate.
self.tooltip_open
self.tooltip_open is a member of the SentienceScreen()
class. We use this as a flag to determine whether or
not the ToolTipLabel widget is being shown.
self.mic
self.mic is a member of the SentienceScreen() class.
We use this to create our sr.Microphone() object.
This object allows us the ability to access and
manipulate the users microphone, assuming that
they have one. For later use in our program.
self.chatbot
self.chatbot is a member of the SentienceScreen()
class. We use this to create our ChatBot() object.
We can then manipulate self.chatbot, which we do,
throughout the rest of our program. This is one of
the core objects. Without this we have no chatbot.
self.audio_threshold
self.audio_threshold is a member of the
SentienceScreen() class. It stores an integer
value. This value enables us to force the users
microphone to ignore noises below a certain range.
self.audio_enabled
self.audio_enabled is a member of the SentienceScreen()
class. We use this boolean variable as a flag to tell
us if the user has enabled the audio option. The user
can enable the audio option by clicking on the red
speaker button on the menu bar (Action Bar). This
sets self.audio_enabled == True and changes the color
of the icon of the speaker button to blue.
self.audio_disabled
self.audio_disabled is a member of the SentienceScreen()
class. We use this boolean variable as a flag to tell
us if the user has disabled the audio option. The user
can disable the audio option by clicking on the blue
speaker button on the menu bar (Action Bar). This
sets self.audio_enabled == False and changes the color
of the icon of the speaker button to red.
self.record.dynamic_energy_threshold
We use this to prevent self.record from dynamically
Ie, contantly, checking the and setting the
energy_threshold of self.record. Idealy, this should
be left as a dynamic process but because no one
microphone was created equal. Things get annoying
really fast. So I've simply set it to a static
variable for windows operating systems. And
dynamically set it once for linux operating
systems.
self.master_log
self.master_log is a member of the SentienceScreen()
class. It's a string variable that we use to store
the users conversation with the chatbot. Every time
that the user and the chatbot say something. Their
responses are added to this string. We use this
string to write data to files.
self.voice_enabled
self.voice_enabled is a member of the SentienceScreen()
class. We use this boolean variable as a flag to tell
us if the user has enabled the voice option. The user
can enable the voice option by clicking on the red
microphone button on the menu bar (Action Bar). This
sets self.voice_enabled == True and changes the color
of the icon of the microphone button to blue. It also
sets self.voice_disabled == False.
self.voice_disabled
self.voice_disabled is a member of the SentienceScreen()
class. We use this boolean variable as a flag to tell
us if the user has disabled the voice option. The user
can disable the voice option by clicking on the blue
microphone button on the menu bar (Action Bar). This
sets self.voice_enabled == False and changes the color
of the icon of the microphone button to red.
self.user_input
self.user_input is a member of the SentienceScreen()
class. We use this string variable to temporarily
store the contents of self.ids.user_input.text. Which
is the TextInput widget that contains the users text
comment to the chat bot. The data is returned to
self.user_input when the user enters some text and hits
the enter key on their keyboard while in the TexTInput
widget.
self.__user_profile
self.user_profile is a member of the SentienceScreen()
class. We use this dictionary data structure to store
the users information if they choose to give it. It
stores their desired username, age, and gender. It's
not a required thing. It's optional but personalizes
a few things and helps to maintain more efficient logs
of the conversations that the chatbots has. If there
are multiple people speaking to it.
self.username
self.username is a member of the SentienceScreen()
class. We use this string variable to store the users
desired username. Or, if the user elects not to supply
a username we give this a default value of 'User: '
and display it in the view_port TextInput widget
to display the current conversation to the user.
self.on_mouse_pos
self.on_mouse_pos is a member of the SentienceScreen()
class. It's a function that we use to track and handle
mouse events. If the user hovers their mouse over a
button on the Menu bar (Action Bar). This function is
called, which locates teh mouses position and instance
of the mouse when it collided with a button. It then
executes the appropriate if statements which then
create a ToolTipLabel widget, change the text to
reflect the button the user collided with. And then
displays that label as a tooltip over the button.
self.engine
self.engine is a member of the SentienceScreen()
class. We use this to create our object of the pyttsx3
class. This allows us to access the users systems text
to speech software so that the response generated by
the chatbot can be verbally delivered to the user. If
they elected to active either the audio or voice
options.
self.record()
self.record() is a member of the SentienceScreen()
class. It's the object of the sr.Recognizer()
class. This allows us to accept, transcribe and later
manipulate an audio recording of the user. This
occurs when the user has activated the voice option.
self.__is_thinking
self.__is_thinking is a member of the SentienceScreen()
class. We use this boolean variable as a flag to tell
us whether or not the chatbot is preparing to generate
a response for the user. Or has just finished generating
a response to the user. When the chatbot is generating
a response the text of the notification_widget is set
to '...Thinking...' and the color of that text is red.
When the chatbot finishes generating a response and has
sent it to the user the text is set to '...Inactive...'
and is blue.
self.current_conversation
This is a member of the SentienceScreen() class.
We use this string variable to store the current
contents of the view_port Widget. When a tooltip
is displayed. We do this to prevent the loss of
the information that was previously being displayed.
Members
-------
super(SentienceScreen, self).__init__(**kwargs)
Here we're calling super dynamically to allow the
use of inheritance. This applies to the
sentienceScreenManager() class. It allows us to
work with the various widgets and screens.
Window.bind(mouse_pos = self.on_mouse_pos)
We call Window.bind() to bind the base Window
classes mouse_pos event to our mouse event. Which
in this case is self.on_mouse_pos()
threading.Event()
threading.Event() is a member of the threading()
class. We use this to create a threading event
which we'll use to interupt active threads later on.
Factory.ToolTipLabel(text = (string))
We use this to register and instantiate
classes anywhere anytime. In our case though
we're just setting this up and setting the text
field to '', Ie, an empty string.
Config.set('input', 'mouse', 'mouse', disable_multitouch)
Config is a member of the kivy base class. We call
this in our SentienceScreen.__init__() method
to disable kivys multitouch ability. This shuts off
users ability to interact via touch screen on touch
screen capable systems.
sys.platform.startswith(string)
This is a member of the sys() class. We call this
function to dertmine whether what operating system
the user is using. It returns a boolean value, if
the version matches either 'linux' or 'win'.
pyttsx3.init(string)
This is a member of the pyttsx3() class. We call
this function when we declare and instantiate
our object of this class. It also serves to
set the driver for the systems text to speech
software based on the users operating system.
sr.Recognizer()
This is a member of the speech_recognition() class.
We call this when we declare and instance our
self.record object. Which then allows us to
accept user input from a microphone and then
transcribe that uadio respone as a string for
later manipulation.
sr.Microphone()
This is a member of the speech_recognition() class.
We call this when we declare and instantiate our
self.mic object. Which then allows us to manipulate
the users microphone if they have one.
ChatBot()
Here we setup the ChatBot. We do wo when we decalre
and instantiate our self.chatbot object. We create and
supply the required filters and adapters which dictate
how this chatbot will learn.
self.set_gender()
This is a member of the SentienceScreen() class. We
call this function to set the gener of self.engine
to a female. This has the effect of changing the
default voice from a male, to female voice.
self.set_speech_rate()
This is a member of the SentienceScreen() class. We
call this function to set the speech rate of the
users systems speech to text software. In our
case we lower it so that when self.caprica_speak()
is called the resulting spoken string is done
so in a manner that the user can understand.
leng()
We call the built in python leng() or length
function to dertmine the length of self.username.
If the length is less than or equal to zero we
supply self.username with the default value of
'User: '. If the user elects later on to set their
own username then the self.user_profile overrides
this variable.
self.create_dir(path)
This is a member of the SentienceScreen() class.
We call this function to create a series of files
and folders that the user needs to operate
this program.
self.engine.connect(string, event)
We call this function to bind our events
to the pyttsx3 events. We connect self.onEnd
to the pyttsx3 'finished-utterance' event. This
event is fired when the pyttsx3 finishes speaking
whatever string was supplied to it. We also connect
self.caprica_speak to 'started-utterance' which is
fired when the systems text to speech software
begins speaking a supplied string.
Private Members
---------------
None
Exceptions
----------
None
Returns
-------
None
Notes
-----
This is the initalization method of SentienceScreen().
It's relatively comprehensive so I'm not going to explain
it again. It's easy enough to understand whats happening
when you reference the above comments.
'''
super(SentienceScreen, self).__init__(**kwargs)
Window.bind(mouse_pos=self.on_mouse_pos)
self.__is_thinking = False
self.tooltip_open = False
self.tooltip = Factory.ToolTipLabel(text=(''))
Config.set('input', 'mouse', 'mouse, disable_multitouch')
if sys.platform.startswith('linux'):
self.engine = pyttsx3.init('espeak')
elif sys.platform.startswith('win'):
self.engine = pyttsx3.init()
self.record = sr.Recognizer()
self.mic = sr.Microphone()
self.chatbot = ChatBot('Caprica',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=['chatterbot.logic.BestMatch', 'chatterbot.logic.TimeLogicAdapter', 'chatterbot.logic.MathematicalEvaluation'],
input_adapter='chatterbot.input.VariableInputTypeAdapter',
output_adapter='chatterbot.output.OutputAdapter',
filters=["chatterbot.filters.RepetitiveResponseFilter"],
database='RC_2001-06.db',
trainer='chatterbot.trainers.ChatterBotCorpusTrainer')
self.set_gender()
self.set_speech_rate()
self.audio_threshold = 400
self.record.dynamic_energy_threshold = False
self.master_log = str()
self.voice_enabled = False
self.voice_disabled = True
self.user_input = str()
self.audio_enabled = False
self.audio_disabled = True
self.user_profile = {1: 'Username', 2: 'Age', 3: 'Sex'}
self.username = str()