mastergopote44 commited on
Commit
70b2bd0
1 Parent(s): 71f5686
Files changed (1) hide show
  1. Long-Term-Care-Aggregated-Data.py +45 -37
Long-Term-Care-Aggregated-Data.py CHANGED
@@ -122,67 +122,75 @@ class LongTermCareAggregatedData(datasets.GeneratorBasedBuilder):
122
  )
123
 
124
  def _split_generators(self, dl_manager):
125
- # URLs of the raw CSV files on GitHub using the raw content feature
126
- urls_to_download = {
127
- "train_incidence": "https://raw.githubusercontent.com/justinkao44/STA663_Project_1/main/train_filtered_incidence_df.csv",
128
- "train_termination": "https://raw.githubusercontent.com/justinkao44/STA663_Project_1/main/train_filtered_termination_df.csv",
129
- "validation_incidence": "https://raw.githubusercontent.com/justinkao44/STA663_Project_1/main/validation_filtered_incidence_df.csv",
130
- "validation_termination": "https://raw.githubusercontent.com/justinkao44/STA663_Project_1/main/validation_filtered_termination_df.csv",
131
- }
132
 
133
- # Use dl_manager to download and extract files
134
- downloaded_files = dl_manager.download(urls_to_download)
135
 
136
  return [
137
  datasets.SplitGenerator(
138
  name="train_incidence",
139
  gen_kwargs={
140
- "filepath": downloaded_files["train_incidence"],
141
- "split": "incidence"
142
  },
143
  ),
144
  datasets.SplitGenerator(
145
  name="validation_incidence",
146
  gen_kwargs={
147
- "filepath": downloaded_files["validation_incidence"],
148
- "split": "incidence"
149
  },
150
  ),
151
  datasets.SplitGenerator(
152
  name="train_termination",
153
  gen_kwargs={
154
- "filepath": downloaded_files["train_termination"],
155
- "split": "termination"
156
  },
157
  ),
158
  datasets.SplitGenerator(
159
  name="validation_termination",
160
  gen_kwargs={
161
- "filepath": downloaded_files["validation_termination"],
162
- "split": "termination"
163
  },
164
  ),
165
  ]
166
- def _generate_examples(self, incidence_filepath, termination_filepath, split):
167
- if self.config.name == "incidence":
168
- dataframe = pd.read_csv(incidence_filepath)
169
- feature_columns = [
170
- "Group_Indicator", "Gender", "Issue_Age_Bucket", "Incurred_Age_Bucket",
171
- "Issue_Year_Bucket", "Policy_Year", "Marital_Status", "Premium_Class",
172
- "Underwriting_Type", "Coverage_Type_Bucket", "Tax_Qualification_Status",
173
- "Inflation_Rider", "Rate_Increase_Flag", "Restoration_of_Benefits",
174
- "NH_Orig_Daily_Ben_Bucket", "ALF_Orig_Daily_Ben_Bucket", "HHC_Orig_Daily_Ben_Bucket",
175
- "NH_Ben_Period_Bucket", "ALF_Ben_Period_Bucket", "HHC_Ben_Period_Bucket",
176
- "NH_EP_Bucket", "ALF_EP_Bucket", "HHC_EP_Bucket", "Region",
177
- "Active_Exposure", "Total_Exposure", "Claim_Count", "Count_NH", "Count_ALF", "Count_HHC", "Count_Unk",
178
- ]
179
- elif 'termination' in split:
180
- feature_columns = [
181
- "Gender", "Incurred_Age_Bucket", "Incurred_Year_Bucket", "Claim_Type",
182
- "Region", "Diagnosis_Category", "Claim_Duration", "Exposure", "Deaths",
183
- "Recovery", "Terminations", "Benefit_Expiry", "Others_Terminations",
184
- ]
185
 
 
 
 
 
 
 
 
 
186
  for idx, row in dataframe.iterrows():
187
  feature_dict = {column: row[column] for column in feature_columns}
188
- yield idx, feature_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  )
123
 
124
  def _split_generators(self, dl_manager):
 
 
 
 
 
 
 
125
 
126
+ downloaded_files = dl_manager.download(_URLS)
 
127
 
128
  return [
129
  datasets.SplitGenerator(
130
  name="train_incidence",
131
  gen_kwargs={
132
+ "data_file": downloaded_files["train_incidence"],
133
+ "split": "incidence",
134
  },
135
  ),
136
  datasets.SplitGenerator(
137
  name="validation_incidence",
138
  gen_kwargs={
139
+ "data_file": downloaded_files["validation_incidence"],
140
+ "split": "incidence",
141
  },
142
  ),
143
  datasets.SplitGenerator(
144
  name="train_termination",
145
  gen_kwargs={
146
+ "data_file": downloaded_files["train_termination"],
147
+ "split": "termination",
148
  },
149
  ),
150
  datasets.SplitGenerator(
151
  name="validation_termination",
152
  gen_kwargs={
153
+ "data_file": downloaded_files["validation_termination"],
154
+ "split": "termination",
155
  },
156
  ),
157
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
+ def _generate_examples(self, data_file, split):
160
+ # Read the CSV file for the given split
161
+ dataframe = pd.read_csv(data_file)
162
+
163
+ # Determine the feature columns based on the split type
164
+ feature_columns = self._get_feature_columns(split)
165
+
166
+ # Yield examples
167
  for idx, row in dataframe.iterrows():
168
  feature_dict = {column: row[column] for column in feature_columns}
169
+ yield idx, feature_dict
170
+
171
+ def _get_feature_columns(self, split):
172
+ # Define the feature columns for 'incidence'
173
+ incidence_columns = [
174
+ "Group_Indicator", "Gender", "Issue_Age_Bucket", "Incurred_Age_Bucket",
175
+ "Issue_Year_Bucket", "Policy_Year", "Marital_Status", "Premium_Class",
176
+ "Underwriting_Type", "Coverage_Type_Bucket", "Tax_Qualification_Status",
177
+ "Inflation_Rider", "Rate_Increase_Flag", "Restoration_of_Benefits",
178
+ "NH_Orig_Daily_Ben_Bucket", "ALF_Orig_Daily_Ben_Bucket", "HHC_Orig_Daily_Ben_Bucket",
179
+ "NH_Ben_Period_Bucket", "ALF_Ben_Period_Bucket", "HHC_Ben_Period_Bucket",
180
+ "NH_EP_Bucket", "ALF_EP_Bucket", "HHC_EP_Bucket", "Region",
181
+ "Active_Exposure", "Total_Exposure", "Claim_Count", "Count_NH", "Count_ALF", "Count_HHC", "Count_Unk",
182
+ ]
183
+
184
+ # Define the feature columns for 'termination'
185
+ termination_columns = [
186
+ "Gender", "Incurred_Age_Bucket", "Incurred_Year_Bucket", "Claim_Type",
187
+ "Region", "Diagnosis_Category", "Claim_Duration", "Exposure", "Deaths",
188
+ "Recovery", "Terminations", "Benefit_Expiry", "Others_Terminations",
189
+ ]
190
+
191
+ if split == "incidence":
192
+ return incidence_columns
193
+ elif split == "termination":
194
+ return termination_columns
195
+ else:
196
+ raise ValueError(f"Split name not recognized: {split}")