DAG: dc202312_safety_inspection

schedule: 0 15 * * *


Task Instance: getMongoDB


Task Instance Details

Dependencies Blocking Task From Getting Scheduled
Dependency Reason
Dagrun Running Task instance's dagrun was not in the 'running' state but in the state 'success'.
Task Instance State Task is in the 'success' state which is not a valid state for execution. The task must be cleared in order to be run.
Attribute: python_callable
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
def getMongoDB(**context):
    token = context.get("ti").xcom_pull(key="token")
    response_s01 = requests.get(
        url=f"{dRoW_api_end_url}/api/module/document-export/airflow/workflow/6597889461a8f490bf96667f?export_type=0",
        headers={
            "x-access-token": f"Bearer {token}",
            "ICWPxAccessKey": "nd@201907ICWP_[1AG:4UdI){n=b~"
        }
    )

    response_s02 = requests.get(
        url=f"{dRoW_api_end_url}/api/module/document-export/airflow/workflow/65ae1f219aad62a7971a07bb?export_type=0",
        headers={
            "x-access-token": f"Bearer {token}",
            "ICWPxAccessKey": "nd@201907ICWP_[1AG:4UdI){n=b~"
        }
    )

    RISC_Data_01 = json.loads(response_s01.text)
    RISC_Data_02 = json.loads(response_s02.text)

    host                  = 'drowdatewarehouse.crlwwhgepgi7.ap-east-1.rds.amazonaws.com'  
    # User name of the database server
    dbUserName            = 'dRowAdmin'  
    # Password for the database user
    dbUserPassword        = 'drowsuper'  
    # Name of the database 
    database              = 'drowDateWareHouse'
    # Character set
    charSet               = "utf8mb4"  

    port                  = "5432"

    # #cursor Type
    # cusrsorType            = pymysql.cursors.DictCursor

    conn_string = ('postgres://' +
                           dbUserName + ':' + 
                           dbUserPassword +
                           '@' + host + ':' + port +
                           '/' + database)
    
    db = create_engine(conn_string)
    conn = db.connect()

    full_df = pd.DataFrame()
    monthly_summary = {}
    with conn:
        for entry in RISC_Data_01:
            df_nested_list = json_normalize(entry['data'])

            # List to hold object for each table
            df_list = []
            # Get total number of tables
            total_tables = len([key for key, val in df_nested_list.items() if 'Table' in key])

            # Inspection date
            date_of_inspection = df_nested_list['Date of Inspection'][0]
            if (date_of_inspection == None):
                continue
            # Contract title
            contract_title = df_nested_list['Contract Title'][0]

            # Process each table dynamically
            for i in range(1, total_tables):
                table_key = f"Table {i}"
                if table_key not in df_nested_list:
                    continue
                
                df_table = df_nested_list[table_key]

                for record in df_table[0]:
                    item_no = list(record.values())[0].split(" ")[0]
                    group_key = list(record.keys())[0]

                    dict_record = {
                        'Date of Inspection': date_of_inspection,
                        'Month': date_of_inspection[:7],
                        'Contract Title': contract_title,
                        'Group No.': str(i),
                        'Group': group_key,
                        'Item No.': item_no,
                        'Description': record[group_key].replace(f"{item_no} ", ""),
                        'Template': 'S01_Daily Site Safety Inspection Checklist',
                    }

                    record.pop(list(record.keys())[0])
                    for k, v in record.items():
                        dict_record[k.replace(f'{i}. ', "")] = v

                    if 'Date completed' not in dict_record or 'Agreed date for completion' not in dict_record:
                        dict_record['On Time'] = None
                    elif not dict_record['Date completed'] or not dict_record['Agreed date for completion']:
                        dict_record['On Time'] = None
                    elif dict_record['Date completed'] <= dict_record['Agreed date for completion']:
                        dict_record['On Time'] = "On-Time"
                    else:
                        dict_record['On Time'] = "Late"

                    df_list.append(dict_record)

                    if date_of_inspection[:7] in monthly_summary:
                        monthly_summary[date_of_inspection[:7]]['items'] += 1
                        if dict_record['Safety Compliance'] == 'No':
                            monthly_summary[date_of_inspection[:7]]['concern'] += 1
                    else:
                        monthly_summary[date_of_inspection[:7]] = {
                            'items': 1,
                            'concern': 1 if dict_record['Safety Compliance'] == 'No' else 0
                        }

            df_combined = pd.DataFrame(data=df_list)

            # Append non-compliant records
            if not full_df.empty and not df_combined.empty:
                full_df = pd.concat([full_df, df_combined], ignore_index=True)
            elif not df_combined.empty:
                full_df = df_combined

        for entry in RISC_Data_02:
            df_nested_list = json_normalize(entry['data'])

            # List to hold object for each table
            df_list = []
            # Get total number of tables
            total_tables = len([key for key, val in df_nested_list.items() if 'Table' in key])

            # Inspection date
            date_of_inspection = df_nested_list['Date of Inspection'][0]
            if (date_of_inspection == None):
                continue
            # Contract title
            contract_title = df_nested_list['Contract Title'][0]

            # Process each table dynamically
            for i in range(1, total_tables):
                table_key = f"Table {i}"
                if table_key not in df_nested_list:
                    continue
                
                df_table = df_nested_list[table_key]

                for record in df_table[0]:
                    item_no = list(record.values())[0].split(" ")[0]
                    group_key = list(record.keys())[0]

                    dict_record = {
                        'Date of Inspection': date_of_inspection,
                        'Month': date_of_inspection[:7],
                        'Contract Title': contract_title,
                        'Group No.': str(i),
                        'Group': group_key,
                        'Item No.': item_no,
                        'Description': record[group_key].replace(f"{item_no} ", ""),
                        'Template': 'S02_Weekly Site Safety Inspection Checklist',
                    }

                    record.pop(list(record.keys())[0])
                    for k, v in record.items():
                        dict_record[k.replace(f'{i}. ', "")] = v

                    if 'Date completed' not in dict_record or 'Agreed date for completion' not in dict_record:
                        dict_record['On Time'] = None
                    elif not dict_record['Date completed'] or not dict_record['Agreed date for completion']:
                        dict_record['On Time'] = None
                    elif dict_record['Date completed'] <= dict_record['Agreed date for completion']:
                        dict_record['On Time'] = "On-Time"
                    else:
                        dict_record['On Time'] = "Late"

                    df_list.append(dict_record)
                    if date_of_inspection[:7] in monthly_summary:
                        monthly_summary[date_of_inspection[:7]]['items'] += 1
                        if dict_record['Safety Compliance'] == 'No':
                            monthly_summary[date_of_inspection[:7]]['concern'] += 1
                    else:
                        monthly_summary[date_of_inspection[:7]] = {
                            'items': 1,
                            'concern': 1 if dict_record['Safety Compliance'] == 'No' else 0
                        }

            df_combined = pd.DataFrame(data=df_list)

            # Append non-compliant records
            if not full_df.empty and not df_combined.empty:
                full_df = pd.concat([full_df, df_combined], ignore_index=True)
            elif not df_combined.empty:
                full_df = df_combined            

        # Sort by date of inspection
        non_compliant_df = full_df[full_df['Safety Compliance'] == 'No']
        non_compliant_df.sort_values(by=['Date of Inspection', 'Contract Title', 'Item No.'], inplace=True)
        # Clean up column names for SQL
        non_compliant_df.columns = non_compliant_df.columns.str.replace(' ', '_').str.replace(r'[().%]', '', regex=True).str.replace('/', '_')

        # Retrieve only relevant columns
        final_df = non_compliant_df[['Date_of_Inspection', 'Month', 'Contract_Title', 'Template', 'Group_No', 'Group', 'Item_No', 'Description', 'Location', 'Safety_Compliance', 'Date_completed', 'Agreed_date_for_completion', 'On_Time']]

        # Write to SQL database
        final_df.to_sql('safety_inspection_dc202312', con=conn, if_exists='replace', index=False)

        # Create a summary df
        summary_dict = []
        for k, v in monthly_summary.items():
            summary_dict.append({
                'Month': k,
                'Items': v['items'],
                'Concerns': v['concern']
            })
        summary_df = pd.DataFrame(data=summary_dict)
        summary_df.to_sql('safety_inspection_summary_dc202312', con=conn, if_exists='replace', index=False)
Task Instance Attributes
Attribute Value
dag_id dc202312_safety_inspection
duration 143.335293
end_date 2024-09-12 04:59:41.254032+00:00
execution_date 2024-09-10T15:00:00+00:00
executor_config {}
generate_command <function TaskInstance.generate_command at 0x7f152f9bf320>
hostname 63fbafbc3109
is_premature False
job_id 173
key ('dc202312_safety_inspection', 'getMongoDB', <Pendulum [2024-09-10T15:00:00+00:00]>, 2)
log <Logger airflow.task (INFO)>
log_filepath /usr/local/airflow/logs/dc202312_safety_inspection/getMongoDB/2024-09-10T15:00:00+00:00.log
log_url http://localhost:8080/admin/airflow/log?execution_date=2024-09-10T15%3A00%3A00%2B00%3A00&task_id=getMongoDB&dag_id=dc202312_safety_inspection
logger <Logger airflow.task (INFO)>
mark_success_url http://localhost:8080/success?task_id=getMongoDB&dag_id=dc202312_safety_inspection&execution_date=2024-09-10T15%3A00%3A00%2B00%3A00&upstream=false&downstream=false
max_tries 1
metadata MetaData(bind=None)
next_try_number 2
operator PythonOperator
pid 3413
pool default_pool
prev_attempted_tries 1
previous_execution_date_success None
previous_start_date_success None
previous_ti None
previous_ti_success None
priority_weight 1
queue default
queued_dttm 2024-09-12 04:56:58.450331+00:00
raw False
run_as_user None
start_date 2024-09-12 04:57:17.918739+00:00
state success
task <Task(PythonOperator): getMongoDB>
task_id getMongoDB
test_mode False
try_number 2
unixname airflow
Task Attributes
Attribute Value
dag <DAG: dc202312_safety_inspection>
dag_id dc202312_safety_inspection
depends_on_past False
deps {<TIDep(Not In Retry Period)>, <TIDep(Trigger Rule)>, <TIDep(Previous Dagrun State)>}
do_xcom_push True
downstream_list []
downstream_task_ids set()
email None
email_on_failure True
email_on_retry True
end_date None
execution_timeout None
executor_config {}
extra_links []
global_operator_extra_link_dict {}
inlets []
lineage_data None
log <Logger airflow.task.operators (INFO)>
logger <Logger airflow.task.operators (INFO)>
max_retry_delay None
on_failure_callback None
on_retry_callback None
on_success_callback None
op_args []
op_kwargs {'name': 'Dylan'}
operator_extra_link_dict {}
operator_extra_links ()
outlets []
owner airflow
params {}
pool default_pool
priority_weight 1
priority_weight_total 1
provide_context True
queue default
resources None
retries 1
retry_delay 0:05:00
retry_exponential_backoff False
run_as_user None
schedule_interval 0 15 * * *
shallow_copy_attrs ('python_callable', 'op_kwargs')
sla None
start_date 2023-01-17T00:00:00+00:00
subdag None
task_concurrency None
task_id getMongoDB
task_type PythonOperator
template_ext []
template_fields ('templates_dict', 'op_args', 'op_kwargs')
templates_dict None
trigger_rule all_success
ui_color #ffefeb
ui_fgcolor #000
upstream_list [<Task(PythonOperator): getDrowToken>]
upstream_task_ids {'getDrowToken'}
wait_for_downstream False
weight_rule downstream