Skip to content
Snippets Groups Projects
Commit 57910821 authored by SurajSSingh's avatar SurajSSingh
Browse files

Cleaned up alarm scripts

parent 3949ebee
No related branches found
No related tags found
No related merge requests found
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
.data/ .data/
*.pyc *.pyc
colab_copy.ipynb colab_copy.ipynb
.model/Viennese Poets.mp3 Viennese Poets.mp3
%%%%%%%%%%%%%%%%%%%%%
@inproceedings{de-marneffe-etal-2009-simple,
title = {Not a Simple Yes or No: Uncertainty in Indirect Answers},
author = {de Marneffe, Marie-Catherine and
Grimm, Scott and
Potts, Christopher},
booktitle = {Proceedings of the {SIGDIAL} 2009 Conference},
month = sep,
year = {2009},
address = {London, UK},
publisher = {Association for Computational Linguistics},
url = {https://aclanthology.org/W09-3920},
pages = {136--143}
}
@inproceedings{mihaylov-etal-2018-suit,
title = {Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering},
author = {Mihaylov, Todor and
Clark, Peter and
Khot, Tushar and
Sabharwal, Ashish},
booktitle = {Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing},
month = oct # {-} # nov,
year = {2018},
address = {Brussels, Belgium},
publisher = {Association for Computational Linguistics},
url = {https://aclanthology.org/D18-1260},
doi = {10.18653/v1/D18-1260},
pages = {2381--2391},
abstract = {We present a new kind of question answering dataset, OpenBookQA, modeled after open book exams for assessing human understanding of a subject. The open book that comes with our questions is a set of 1326 elementary level science facts. Roughly 6000 questions probe an understanding of these facts and their application to novel situations. This requires combining an open book fact (e.g., metals conduct electricity) with broad common knowledge (e.g., a suit of armor is made of metal) obtained from other sources. While existing QA datasets over documents or knowledge bases, being generally self-contained, focus on linguistic understanding, OpenBookQA probes a deeper understanding of both the topic{---}in the context of common knowledge{---}and the language it is expressed in. Human performance on OpenBookQA is close to 92{\%}, but many state-of-the-art pre-trained QA methods perform surprisingly poorly, worse than several simple neural baselines we develop. Our oracle experiments designed to circumvent the knowledge retrieval bottleneck demonstrate the value of both the open book and additional facts. We leave it as a challenge to solve the retrieval problem in this multi-hop setting and to close the large gap to human performance.}
}
@inproceedings{min-etal-2020-ambigqa,
title = {{A}mbig{QA}: Answering Ambiguous Open-domain Questions},
author = {Min, Sewon and
Michael, Julian and
Hajishirzi, Hannaneh and
Zettlemoyer, Luke},
booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
month = nov,
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
url = {https://aclanthology.org/2020.emnlp-main.466},
doi = {10.18653/v1/2020.emnlp-main.466},
pages = {5783--5797},
abstract = {Ambiguity is inherent to open-domain question answering; especially when exploring new topics, it can be difficult to ask questions that have a single, unambiguous answer. In this paper, we introduce AmbigQA, a new open-domain question answering task which involves finding every plausible answer, and then rewriting the question for each one to resolve the ambiguity. To study this task, we construct AmbigNQ, a dataset covering 14,042 questions from NQ-open, an existing open-domain QA benchmark. We find that over half of the questions in NQ-open are ambiguous, with diverse sources of ambiguity such as event and entity references. We also present strong baseline models for AmbigQA which we show benefit from weakly supervised learning that incorporates NQ-open, strongly suggesting our new task and data will support significant future research effort. Our data and baselines are available at https://nlp.cs.washington.edu/ambigqa.}
}
@inproceedings{louis-etal-2020-id,
title = "{``}{I}{'}d rather just go to bed{''}: Understanding Indirect Answers",
author = "Louis, Annie and
Roth, Dan and
Radlinski, Filip",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-main.601",
doi = "10.18653/v1/2020.emnlp-main.601",
pages = "7411--7425",
abstract = "We revisit a pragmatic inference problem in dialog: Understanding indirect responses to questions. Humans can interpret {`}I{'}m starving.{'} in response to {`}Hungry?{'}, even without direct cue words such as {`}yes{'} and {`}no{'}. In dialog systems, allowing natural responses rather than closed vocabularies would be similarly beneficial. However, today{'}s systems are only as sensitive to these pragmatic moves as their language model allows. We create and release the first large-scale English language corpus {`}Circa{'} with 34,268 (polar question, indirect answer) pairs to enable progress on this task. The data was collected via elaborate crowdsourcing, and contains utterances with yes/no meaning, as well as uncertain, middle-ground, and conditional responses. We also present BERT-based neural models to predict such categories for a question-answer pair. We find that while transfer learning from entailment works reasonably, performance is not yet sufficient for robust dialog. Our models reach 82-88{\%} accuracy for a 4-class distinction, and 74-85{\%} for 6 classes.",
}
@inproceedings{rajpurkar-etal-2018-know,
title = {Know What You Don{'}t Know: Unanswerable Questions for {SQ}u{AD}},
author = {Rajpurkar, Pranav and
Jia, Robin and
Liang, Percy},
booktitle = {Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
month = jul,
year = {2018},
address = {Melbourne, Australia},
publisher = {Association for Computational Linguistics},
url = {https://aclanthology.org/P18-2124},
doi = {10.18653/v1/P18-2124},
pages = {784--789},
abstract = {Extractive reading comprehension systems can often locate the correct answer to a question in a context document, but they also tend to make unreliable guesses on questions for which the correct answer is not stated in the context. Existing datasets either focus exclusively on answerable questions, or use automatically generated unanswerable questions that are easy to identify. To address these weaknesses, we present SQuADRUn, a new dataset that combines the existing Stanford Question Answering Dataset (SQuAD) with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuADRUn, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. SQuADRUn is a challenging natural language understanding task for existing models: a strong neural system that gets 86{\%} F1 on SQuAD achieves only 66{\%} F1 on SQuADRUn. We release SQuADRUn to the community as the successor to SQuAD.}
}
@inproceedings{xu-etal-2019-asking,
title = {Asking Clarification Questions in Knowledge-Based Question Answering},
author = {Xu, Jingjing and
Wang, Yuechen and
Tang, Duyu and
Duan, Nan and
Yang, Pengcheng and
Zeng, Qi and
Zhou, Ming and
Sun, Xu},
booktitle = {Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
month = nov,
year = {2019},
address = {Hong Kong, China},
publisher = {Association for Computational Linguistics},
url = {https://aclanthology.org/D19-1172},
doi = {10.18653/v1/D19-1172},
pages = {1618--1629},
abstract = {The ability to ask clarification questions is essential for knowledge-based question answering (KBQA) systems, especially for handling ambiguous phenomena. Despite its importance, clarification has not been well explored in current KBQA systems. Further progress requires supervised resources for training and evaluation, and powerful models for clarification-related text understanding and generation. In this paper, we construct a new clarification dataset, CLAQUA, with nearly 40K open-domain examples. The dataset supports three serial tasks: given a question, identify whether clarification is needed; if yes, generate a clarification question; then predict answers base on external user feedback. We provide representative baselines for these tasks and further introduce a coarse-to-fine model for clarification question generation. Experiments show that the proposed model achieves better performance than strong baselines. The further analysis demonstrates that our dataset brings new challenges and there still remain several unsolved problems, like reasonable automatic evaluation metrics for clarification question generation and powerful models for handling entity sparsity.}
}
@article{anjali2014ambiguities,
title={Ambiguities in natural language processing},
author={Anjali, MK and Babu, Anto P},
journal={International Journal of Innovative Research in Computer and Communication Engineering},
volume={2},
number={5},
pages={392--394},
year={2014}
}
@book{jurafsky2009speech,
title={Speech and Language Processing: An Introduction to Natural Language Processing, Computational Linguistics, and Speech Recognition},
author={Jurafsky, D. and Martin, J.H.},
isbn={9780131873216},
lccn={2008010335},
series={Prentice Hall series in artificial intelligence},
url={https://books.google.com.ua/books?id=fZmj5UNK8AQC},
year={2009},
publisher={Pearson Prentice Hall}
}
from enum import Enum, auto from enum import Enum, auto
from datetime import time, datetime from datetime import datetime
from typing import Optional from typing import Optional
...@@ -28,27 +28,20 @@ class BaseAlarmClock: ...@@ -28,27 +28,20 @@ class BaseAlarmClock:
return self._wake_time return self._wake_time
def set_alarm(self, wake_time: datetime) -> datetime: def set_alarm(self, wake_time: datetime) -> datetime:
# print(f"SETTING ALARM to: {wake_time}")
self._current_state = AlarmState.SET self._current_state = AlarmState.SET
self._wake_time = wake_time self._wake_time = wake_time
return self._wake_time return self._wake_time
def start_alarm(self) -> None: def start_alarm(self) -> None:
# print("STARTING ALARM")
self._current_state = AlarmState.RUNNING self._current_state = AlarmState.RUNNING
def sound_alarm(self) -> None: def sound_alarm(self) -> None:
# print("SOUNDING ALARM")
self._current_state = AlarmState.PLAYING self._current_state = AlarmState.PLAYING
def snooze_alarm(self) -> None: def snooze_alarm(self) -> None:
# print("SNOOZING ALARM")
self._current_state = AlarmState.SNOOZED self._current_state = AlarmState.SNOOZED
def stop_alarm(self, deactivate: bool = True) -> None: def stop_alarm(self, deactivate: bool = True) -> None:
# print("STOPPING ALARM")
# if self._current_state is AlarmState.PLAYING:
# print("STOPPED PLAYING ALARM")
self._current_state = AlarmState.DEACTIVATED if deactivate else AlarmState.SET self._current_state = AlarmState.DEACTIVATED if deactivate else AlarmState.SET
def alarm_check_reached(self, current_time: datetime) -> bool: def alarm_check_reached(self, current_time: datetime) -> bool:
......
import subprocess
from typing import Optional from typing import Optional
from alarm import BaseAlarmClock, AlarmState from alarm import BaseAlarmClock, AlarmState
from datetime import time, datetime, timezone, timedelta from datetime import time, datetime, timezone, timedelta
from time import sleep from time import sleep
# from pynput import keyboard
import keyboard as kb import keyboard as kb
# SONG = ".model/Viennese_Poets.mp3"
# Simple Alarm Clock has 5 functionalities: # Simple Alarm Clock has 5 functionalities:
# 1. Set Alarm Time - Set the time for the alarm to sound, does NOT make the alarm active # 1. Set Alarm Time - Set the time for the alarm to sound, does NOT make the alarm active
...@@ -15,20 +12,12 @@ import keyboard as kb ...@@ -15,20 +12,12 @@ import keyboard as kb
# 4. Snooze Alarm - Stop the alarm sound and wait for some time to play the alarm again # 4. Snooze Alarm - Stop the alarm sound and wait for some time to play the alarm again
# 5. Stop Alarm - If the alarm is active or is playing, stop the alarm (deactivate state) # 5. Stop Alarm - If the alarm is active or is playing, stop the alarm (deactivate state)
# ALARM_TIME = datetime.combine(
# datetime.today(),
# # (datetime.today() + timedelta(days=1)).date(),
# time(16, 40, 0)
# ).astimezone(tz=timezone.utc)
# SNOOZE_KEY = keyboard.Key.up
# ALARM_OFF_KEY = keyboard.Key.esc
SNOOZE_SEC = 5 SNOOZE_SEC = 5
class SimpleAlarmClock(BaseAlarmClock): class SimpleAlarmClock(BaseAlarmClock):
def __init__(self, wake_time: Optional[datetime] = None): def __init__(self, wake_time: Optional[datetime] = None):
super().__init__(wake_time) super().__init__(wake_time)
# self._alarm = None
def set_alarm(self, wake_time: datetime) -> datetime: def set_alarm(self, wake_time: datetime) -> datetime:
print(f"SETTING ALARM to: {wake_time}") print(f"SETTING ALARM to: {wake_time}")
...@@ -40,17 +29,11 @@ class SimpleAlarmClock(BaseAlarmClock): ...@@ -40,17 +29,11 @@ class SimpleAlarmClock(BaseAlarmClock):
def sound_alarm(self) -> None: def sound_alarm(self) -> None:
print("SOUNDING ALARM") print("SOUNDING ALARM")
# if not self._alarm:
# self._alarm = subprocess.Popen(["omxplayer", "--no-keys", SONG, "&"])
super(SimpleAlarmClock, self).sound_alarm() super(SimpleAlarmClock, self).sound_alarm()
def snooze_alarm(self) -> None: def snooze_alarm(self) -> None:
if self.current_state is AlarmState.PLAYING: if self.current_state is AlarmState.PLAYING:
print("SNOOZING ALARM") print("SNOOZING ALARM")
# print(f"{self._alarm.pid}")
# if self._alarm is not None:
# subprocess.Popen(["sudo", "kill", f"{self._alarm.pid}"])
# self._alarm = None
super(SimpleAlarmClock, self).snooze_alarm() super(SimpleAlarmClock, self).snooze_alarm()
sleep(SNOOZE_SEC) sleep(SNOOZE_SEC)
print("UN-SNOOZING ALARM") print("UN-SNOOZING ALARM")
...@@ -58,9 +41,6 @@ class SimpleAlarmClock(BaseAlarmClock): ...@@ -58,9 +41,6 @@ class SimpleAlarmClock(BaseAlarmClock):
def stop_alarm(self, deactivate: bool = True) -> None: def stop_alarm(self, deactivate: bool = True) -> None:
print("STOPPING ALARM") print("STOPPING ALARM")
# if self._alarm is not None:
# subprocess.Popen(["sudo", "kill", f"{self._alarm.pid}"])
# self._alarm = None
if self.current_state is AlarmState.PLAYING: if self.current_state is AlarmState.PLAYING:
print("STOPPED PLAYING ALARM") print("STOPPED PLAYING ALARM")
super(SimpleAlarmClock, self).stop_alarm() super(SimpleAlarmClock, self).stop_alarm()
...@@ -80,14 +60,6 @@ class SimpleAlarmClock(BaseAlarmClock): ...@@ -80,14 +60,6 @@ class SimpleAlarmClock(BaseAlarmClock):
self.sound_alarm() self.sound_alarm()
while self.current_state is not AlarmState.DEACTIVATED: while self.current_state is not AlarmState.DEACTIVATED:
pass pass
# with keyboard.Events() as events:
# for event in events:
# if event.key == ALARM_OFF_KEY:
# self.stop_alarm()
# break
# elif event.key == SNOOZE_KEY:
# self.snooze_alarm()
# break
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -19,13 +19,13 @@ ALARM_OFF_KEY = keyboard.Key.esc ...@@ -19,13 +19,13 @@ ALARM_OFF_KEY = keyboard.Key.esc
MODEL_MEMORY: int = 5 MODEL_MEMORY: int = 5
SECONDS_IN_MINUTE: float = 5 SECONDS_IN_MINUTE: float = 5
SNOOZE_SEC: float = 60 SNOOZE_SEC: float = 5
WAIT_SEC: float = 1 WAIT_SEC: float = 1
MODEL_PATH: str = ".model/attention" MODEL_PATH: str = ".model/attention"
VLC_INSTANCE = vlc.Instance("--input-repeat=999") VLC_INSTANCE = vlc.Instance("--input-repeat=999")
VLC_PLAYER = VLC_INSTANCE.media_player_new() VLC_PLAYER = VLC_INSTANCE.media_player_new()
SONG = VLC_INSTANCE.media_new(".model/Viennese Poets.mp3" ) SONG = VLC_INSTANCE.media_new("Viennese Poets.mp3")
# Input Array: [minutes_since_start, current_hour_utc, current_minute_utc, awake_prob, rem_prob, light_prob, deep_prob] # Input Array: [minutes_since_start, current_hour_utc, current_minute_utc, awake_prob, rem_prob, light_prob, deep_prob]
# Output Array: [awake_prob, rem_prob, light_prob, deep_prob] # Output Array: [awake_prob, rem_prob, light_prob, deep_prob]
...@@ -115,7 +115,7 @@ class SmartAlarmClock(BaseAlarmClock): ...@@ -115,7 +115,7 @@ class SmartAlarmClock(BaseAlarmClock):
while len(time_queue) < time_queue.maxlen: while len(time_queue) < time_queue.maxlen:
current_utc = datetime.utcnow() current_utc = datetime.utcnow()
time_queue.append(np.array( time_queue.append(np.array(
[minutes_since_start, current_utc.hour, current_utc.minute, 1.0, 0.0, 0.0, 0.0] [minutes_since_start, current_utc.hour, current_utc.minute, 0.0, 0.75, 0.15, 0.05]
)) ))
# Wait a minute for next timestep # Wait a minute for next timestep
while (datetime.utcnow() - current_utc).seconds < SECONDS_IN_MINUTE: while (datetime.utcnow() - current_utc).seconds < SECONDS_IN_MINUTE:
......
...@@ -4,21 +4,18 @@ import sys ...@@ -4,21 +4,18 @@ import sys
from alarm import BaseAlarmClock, AlarmState from alarm import BaseAlarmClock, AlarmState
from datetime import time, datetime, timedelta, timezone from datetime import time, datetime, timedelta, timezone
from time import sleep from time import sleep
# from pynput import keyboard
import keyboard as kb import keyboard as kb
from collections import deque from collections import deque
import numpy as np import numpy as np
# import tensorflow as tf
import tflite_runtime.interpreter as tflite import tflite_runtime.interpreter as tflite
import vlc import vlc
# Lower threshold means it only needs to be somewhat probable to be early wake up # Lower threshold means it only needs to be somewhat probable to be early wake up
# >= 1 means never allow early wake up in that time # >= 1 means never allow early wake up in that time
WAKE_THRESHOLD = np.array([0.5, 0.75, 0.9, 1.0]) # Awake, REM, Light, Deep
WAKE_THRESHOLD = np.array([0.65, 0.85, 0.9, 1.0])
# SNOOZE_KEY = keyboard.Key.up
SNOOZE_KEY = "space" SNOOZE_KEY = "space"
# ALARM_OFF_KEY = keyboard.Key.esc
ALARM_OFF_KEY = "esc" ALARM_OFF_KEY = "esc"
MODEL_MEMORY: int = 5 MODEL_MEMORY: int = 5
...@@ -29,7 +26,7 @@ WAIT_SEC: float = 1 ...@@ -29,7 +26,7 @@ WAIT_SEC: float = 1
MODEL_PATH: str = ".model/attention_model.tflite" MODEL_PATH: str = ".model/attention_model.tflite"
VLC_INSTANCE = vlc.Instance("--input-repeat=999") VLC_INSTANCE = vlc.Instance("--input-repeat=999")
VLC_PLAYER = VLC_INSTANCE.media_player_new() VLC_PLAYER = VLC_INSTANCE.media_player_new()
SONG = VLC_INSTANCE.media_new(".model/Viennese Poets.mp3" ) SONG = VLC_INSTANCE.media_new("Viennese Poets.mp3")
# Input Array: [minutes_since_start, current_hour_utc, current_minute_utc, awake_prob, rem_prob, light_prob, deep_prob] # Input Array: [minutes_since_start, current_hour_utc, current_minute_utc, awake_prob, rem_prob, light_prob, deep_prob]
# Output Array: [awake_prob, rem_prob, light_prob, deep_prob] # Output Array: [awake_prob, rem_prob, light_prob, deep_prob]
...@@ -41,14 +38,8 @@ def softmax(arr): ...@@ -41,14 +38,8 @@ def softmax(arr):
def model_prediction(model_func, current_times_prob: Deque[np.array]) -> np.array: def model_prediction(model_func, current_times_prob: Deque[np.array]) -> np.array:
# print(f"Prob shape: {np.array([current_times_prob]).shape}")
if model_func: if model_func:
# prediction = model.predict(np.array([current_times_prob]))[0] return softmax(model_func(x=np.array([current_times_prob]))[0])
prediction = model_func(x=np.array([current_times_prob]))
print(prediction)
# print(f"prediction: {prediction}")
# print(f"softmax: {softmax(prediction)}")
return softmax(prediction)
else: else:
return np.zeros(4) return np.zeros(4)
...@@ -158,22 +149,10 @@ class SmartAlarmClock(BaseAlarmClock): ...@@ -158,22 +149,10 @@ class SmartAlarmClock(BaseAlarmClock):
print(f"{datetime.utcnow()}: last_prediction = {last_prediction}") print(f"{datetime.utcnow()}: last_prediction = {last_prediction}")
sleep(WAIT_SEC) sleep(WAIT_SEC)
# While the alarm clock is sounding off # While the alarm clock is not turned off
self.sound_alarm() self.sound_alarm()
while self.current_state is not AlarmState.DEACTIVATED: while self.current_state is not AlarmState.DEACTIVATED:
pass pass
# event = kb.read_event()
# if event.event_type == kb.KEY_DOWN:
# # with keyboard.Events() as events:
# # for event in events:
# # if event.key == ALARM_OFF_KEY:
# if event.name == ALARM_OFF_KEY:
# self.stop_alarm()
# break
# elif event.name == SNOOZE_KEY:
# # elif event.key == SNOOZE_KEY:
# self.snooze_alarm()
# break
if __name__ == '__main__': if __name__ == '__main__':
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment