This repository has been archived by the owner on Sep 1, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
319 lines (275 loc) · 10.9 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
import base64
import datetime as dt
from datetime import datetime
import logging
import os
import requests
import sys
import time
import traceback
from bs4 import BeautifulSoup
from googleapiclient.discovery import build
from google.oauth2 import service_account
import pandas as pd
from sqlalchemy import inspect
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from sqlsorcery import MSSQL
from tenacity import retry, stop_after_attempt, wait_exponential, TryAgain
from mailer import Mailer
def configure_logging():
logging.basicConfig(
handlers=[
logging.FileHandler(filename="app.log", mode="w+"),
logging.StreamHandler(sys.stdout),
],
level=logging.DEBUG if int(os.getenv("DEBUG_MODE")) else logging.INFO,
format="%(asctime)s | %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %I:%M:%S%p %Z",
)
logging.getLogger("google_auth_oauthlib").setLevel(logging.ERROR)
logging.getLogger("googleapiclient").setLevel(logging.ERROR)
logging.getLogger("google").setLevel(logging.ERROR)
def request_report_export():
"""Selenium steps to request report export in SeeSaw."""
browser = create_driver()
browser.implicitly_wait(5)
login(browser)
get_student_activity_report(browser)
logging.debug("Requested report export.")
def create_driver():
"""Create browser driver."""
profile = webdriver.FirefoxProfile()
return webdriver.Firefox(firefox_profile=profile)
def login(browser):
"""Log into SeeSaw application as an administrator."""
browser.get("https://app.seesaw.me/#/login?role=org_admin")
time.sleep(5)
user_field = browser.find_element_by_id("sign_in_email")
user_field.send_keys(os.getenv("SEESAW_USER"))
password_field = browser.find_element_by_id("sign_in_password")
password_field.send_keys(os.getenv("SEESAW_PASSWORD"))
submit_button = browser.find_element_by_xpath(
"//button[text()=' Administrator Sign In ']"
)
submit_button.click()
def get_student_activity_report(browser):
"""Click to request student activity report export"""
WebDriverWait(browser, 100).until(
EC.element_to_be_clickable(
(By.XPATH, "//a[@ng-click='startStudentActivityReportForDistrict()']")
)
).click()
browser.implicitly_wait(5)
WebDriverWait(browser, 100).until(
EC.element_to_be_clickable(
(By.XPATH, "//div[text()='Get Student Activity Report']")
)
).click()
def get_credentials():
"""Generate service account credentials object (used for Google API)"""
SCOPES = [
"https://www.googleapis.com/auth/gmail.readonly",
]
return service_account.Credentials.from_service_account_file(
"service.json", scopes=SCOPES, subject=os.getenv("SERVICE_ACCOUNT_EMAIL")
)
def save_data_file(link_text):
"""Use requests to download the file through the link."""
r = requests.get(link_text)
with open("activity_data.csv", "wb") as f:
f.write(r.content)
logging.info("Downloaded SeeSaw Activity file from email.")
def retrieve_activity_data(gmail_service):
"""Find the download link in email and get the file"""
message_id = retrieve_message_id(gmail_service)
link_text = parse_email_message(gmail_service, message_id)
save_data_file(link_text)
df = pd.read_csv("activity_data.csv", sep=",", header=1)
logging.info(f"Read {len(df)} records from csv into df.")
return df
@retry(
wait=wait_exponential(multiplier=2, min=30, max=120), stop=stop_after_attempt(10)
)
def retrieve_message_id(service):
"""Find the message id from today that matches the subject."""
today = dt.datetime.now().strftime("%A, %B %d, %Y")
query = f"from:do-not-reply@seesaw.me Student Activity Report for KIPP Bay Area Schools on {today}"
results = (
service.users()
.messages()
.list(
userId="me",
q=query,
) # same user as service account login
.execute()
)
if results.get("resultSizeEstimate") != 0:
# only need one message if there are multiple within a day
# SeeSaw data returns from previous day onward
logging.info(f"Found email message for {today}.")
return results.get("messages")[0].get("id")
else:
raise Exception("Email message not found in inbox.")
@retry(wait=wait_exponential(multiplier=2, min=10, max=40), stop=stop_after_attempt(5))
def parse_email_message(gmail_service, message_id):
"""Get download link from message parts of the given message id."""
results = gmail_service.users().messages().get(userId="me", id=message_id).execute()
parts = results.get("payload").get("parts")
if not parts:
raise TryAgain # sometimes the message is found but parts returns empty
else:
link_text = None
for part in parts:
if part.get("mimeType") == "text/html":
link_text = find_download_link(part)
return link_text
def find_download_link(part):
"""Find download link within the email html body"""
# decode base64 message part
body_data = part.get("body").get("data").replace("-", "+").replace("_", "/")
message = base64.b64decode(bytes(body_data, "UTF-8"))
# use beautifulsoup to find the csv hyperlink text
soup = BeautifulSoup(message, features="html.parser")
message_body = soup.body()
links = soup.find_all("a")
for link in links:
if ".csv" in link.get("href"):
return link.get("href")
return None
def create_extract_date(df):
"""Function that will create a column in dataframe that specifies when file was uploaded"""
df["Date Uploaded"] = datetime.date(datetime.now())
return df
def process_daily_activity(sql, df):
"""ETL daily activity columns from df into data warehouse."""
df = df.drop(
columns=[
"Days Active in Past Week",
"Posts Added to Student Journal in Past Week",
"Comments in Past Week",
"Posts Added to Student Journal Yesterday",
"Comments Yesterday",
"Days with Posts Added to Student Journal In Past Week",
"Days Commented in Past Week",
"Connected Family Members",
"Active Yesterday (1 = yes)",
"Active in Last 7 Days (1 = yes)",
],
)
df.columns = df.columns.str.replace(" ", "_")
df = pivot_by_date(df)
df = reformat_active_date(df)
load_newest_table_data(sql, df, "SeeSaw_Student_Activity")
def pivot_by_date(df):
"""Function that will pivot the table to have different rows for different active dates for students"""
active_dates = []
for col in df.columns:
if col.startswith("Active"): # only gets columns with 'Active_MM/DD'
active_dates.append(col)
df = df.melt(
id_vars=[
"School_Name",
"Student_Name",
"Student_ID",
"Grade_Level",
"Last_Active_Date",
"Date_Uploaded",
"Link_to_Student_Portfolio",
],
value_vars=active_dates,
var_name="Active_Date",
value_name="WasActive",
)
# rearrange the columns
df = df[
[
"School_Name",
"Student_Name",
"Student_ID",
"Grade_Level",
"Active_Date",
"WasActive",
"Last_Active_Date",
"Date_Uploaded",
"Link_to_Student_Portfolio",
]
]
return df
def reformat_active_date(df):
"""reformat the 'Active Date' values to only date
Before the transformation, 'Active Date' had 'Active_MM/DD' but we want to strip it down to only the date
Example: 'Active_08/16' is now '08/16/2020'"""
df["Active_Date"] = df["Active_Date"].str.replace("Active_", "")
df["Active_Date"] = df["Active_Date"].apply(
lambda x: f"{x}/{str(datetime.date(datetime.now()).year)}"
)
not_active = 0
df["WasActive"] = df["WasActive"].fillna(not_active)
df["Active_Date"] = df["Active_Date"].astype("datetime64[ns]")
return df
def process_weekly_activity(sql, df):
"""ETL weekly activity columns from df into data warehouse."""
columns = {
"School Name": "School_Name",
"Student Name": "Student_Name",
"Student ID": "Student_ID",
"Grade Level": "Grade_Level",
"Last Active Date": "Last_Active_Date",
"Days Active in Past Week": "Days_Active_Past_Week",
"Posts Added to Student Journal in Past Week": "Posts_Added_Past_Week",
"Comments in Past Week": "Comments_Past_Week",
"Posts Added to Student Journal Yesterday": "Days_with_Posts_Added_Past_Week",
"Days Commented in Past Week": "Days_Commented_Past_Week",
"Date Uploaded": "Date_Uploaded",
}
df = df[columns.keys()].copy()
df.rename(columns=columns, inplace=True)
df = read_week_date_range_from_file(df)
df["SchoolYear4Digit"] = os.getenv("SCHOOLYEAR_4DIGIT")
sql.insert_into("SeeSaw_Student_Activity_Weekly", df)
logging.info(f"Inserted {len(df)} new records into SeeSaw_Student_Activity_Weekly.")
def read_week_date_range_from_file(df):
"""Get the week range from the file and store in df.
The first row of the file is a string that indicates the date range.
The csv data doesn't start until the second row.
"""
with open("activity_data.csv") as f:
first_line = f.readline()
dates = [string.strip() for string in first_line.split(" - ")]
df["WeekStart"] = dt.datetime.strptime(dates[0], "%Y-%m-%d %H:%M %Z%z").date()
df["WeekEnd"] = dt.datetime.strptime(dates[1], "%Y-%m-%d %H:%M %Z%z").date()
return df
def load_newest_table_data(sql, df, table_name):
"""Insert the newest data into the given database table, based on Last_Active_Date column.
table_name: the name of the table that we're inserting data into
"""
if inspect(sql.engine).has_table(table_name, schema="custom"):
time = sql.query(
f"SELECT MAX(Last_Active_Date) AS Last_Active_Date FROM custom.{table_name}"
)
latest_timestamp = time["Last_Active_Date"][0]
if latest_timestamp != None:
df = df[df["Last_Active_Date"] > latest_timestamp]
sql.insert_into(table_name, df)
logging.info(f"Inserted {len(df)} new records into {table_name}.")
def main():
sql = MSSQL()
configure_logging()
creds = get_credentials()
gmail_service = build("gmail", "v1", credentials=creds)
request_report_export()
df = retrieve_activity_data(gmail_service)
df = create_extract_date(df)
process_daily_activity(sql, df)
process_weekly_activity(sql, df)
if __name__ == "__main__":
try:
main()
error_message = None
except Exception as e:
logging.exception(e)
error_message = traceback.format_exc()
Mailer("SeeSaw_Activity").notify(error_message=error_message)