|
45 | 45 | )
|
46 | 46 |
|
47 | 47 | import bigframes._config as config
|
| 48 | +import bigframes.constants as constants |
| 49 | +import bigframes.core.blocks |
48 | 50 | import bigframes.core.global_session as global_session
|
49 | 51 | import bigframes.core.indexes
|
50 | 52 | import bigframes.core.reshape
|
51 | 53 | import bigframes.dataframe
|
| 54 | +import bigframes.operations as ops |
52 | 55 | import bigframes.series
|
53 | 56 | import bigframes.session
|
54 | 57 | import bigframes.session.clients
|
55 | 58 | import third_party.bigframes_vendored.pandas.core.reshape.concat as vendored_pandas_concat
|
| 59 | +import third_party.bigframes_vendored.pandas.core.reshape.encoding as vendored_pandas_encoding |
56 | 60 | import third_party.bigframes_vendored.pandas.core.reshape.merge as vendored_pandas_merge
|
57 | 61 | import third_party.bigframes_vendored.pandas.core.reshape.tile as vendored_pandas_tile
|
58 | 62 |
|
@@ -134,6 +138,179 @@ def cut(
|
134 | 138 | cut.__doc__ = vendored_pandas_tile.cut.__doc__
|
135 | 139 |
|
136 | 140 |
|
| 141 | +def get_dummies( |
| 142 | + data: Union[DataFrame, Series], |
| 143 | + prefix: Union[List, dict, str, None] = None, |
| 144 | + prefix_sep: Union[List, dict, str, None] = "_", |
| 145 | + dummy_na: bool = False, |
| 146 | + columns: Optional[List] = None, |
| 147 | + drop_first: bool = False, |
| 148 | + dtype: Any = None, |
| 149 | +) -> DataFrame: |
| 150 | + # simplify input parameters into per-input-label lists |
| 151 | + # also raise errors for invalid parameters |
| 152 | + column_labels, prefixes, prefix_seps = _standardize_get_dummies_params( |
| 153 | + data, prefix, prefix_sep, columns, dtype |
| 154 | + ) |
| 155 | + |
| 156 | + # combine prefixes into per-column-id list |
| 157 | + full_columns_prefixes, columns_ids = _determine_get_dummies_columns_from_labels( |
| 158 | + data, column_labels, prefix is not None, prefixes, prefix_seps |
| 159 | + ) |
| 160 | + |
| 161 | + # run queries to compute unique values |
| 162 | + block = data._block |
| 163 | + max_unique_value = ( |
| 164 | + bigframes.core.blocks._BQ_MAX_COLUMNS |
| 165 | + - len(block.value_columns) |
| 166 | + - len(block.index_columns) |
| 167 | + - 1 |
| 168 | + ) // len(column_labels) |
| 169 | + columns_values = [ |
| 170 | + block._get_unique_values([col_id], max_unique_value) for col_id in columns_ids |
| 171 | + ] |
| 172 | + |
| 173 | + # for each dummified column, add the content of the output columns via block operations |
| 174 | + intermediate_col_ids = [] |
| 175 | + for i in range(len(columns_values)): |
| 176 | + level = columns_values[i].get_level_values(0).sort_values().dropna() |
| 177 | + if drop_first: |
| 178 | + level = level[1:] |
| 179 | + column_label = full_columns_prefixes[i] |
| 180 | + column_id = columns_ids[i] |
| 181 | + block, new_intermediate_col_ids = _perform_get_dummies_block_operations( |
| 182 | + block, level, column_label, column_id, dummy_na |
| 183 | + ) |
| 184 | + intermediate_col_ids.extend(new_intermediate_col_ids) |
| 185 | + |
| 186 | + # drop dummified columns (and the intermediate columns we added) |
| 187 | + block = block.drop_columns(columns_ids + intermediate_col_ids) |
| 188 | + return DataFrame(block) |
| 189 | + |
| 190 | + |
| 191 | +get_dummies.__doc__ = vendored_pandas_encoding.get_dummies.__doc__ |
| 192 | + |
| 193 | + |
| 194 | +def _standardize_get_dummies_params( |
| 195 | + data: Union[DataFrame, Series], |
| 196 | + prefix: Union[List, dict, str, None], |
| 197 | + prefix_sep: Union[List, dict, str, None], |
| 198 | + columns: Optional[List], |
| 199 | + dtype: Any, |
| 200 | +) -> Tuple[List, List[str], List[str]]: |
| 201 | + block = data._block |
| 202 | + |
| 203 | + if isinstance(data, Series): |
| 204 | + columns = [block.column_labels[0]] |
| 205 | + if columns is not None and not pandas.api.types.is_list_like(columns): |
| 206 | + raise TypeError("Input must be a list-like for parameter `columns`") |
| 207 | + if dtype is not None and dtype not in [ |
| 208 | + pandas.BooleanDtype, |
| 209 | + bool, |
| 210 | + "Boolean", |
| 211 | + "boolean", |
| 212 | + "bool", |
| 213 | + ]: |
| 214 | + raise NotImplementedError( |
| 215 | + f"Only Boolean dtype is currently supported. {constants.FEEDBACK_LINK}" |
| 216 | + ) |
| 217 | + |
| 218 | + if columns is None: |
| 219 | + default_dummy_types = [pandas.StringDtype, "string[pyarrow]"] |
| 220 | + columns = [] |
| 221 | + columns_set = set() |
| 222 | + for col_id in block.value_columns: |
| 223 | + label = block.col_id_to_label[col_id] |
| 224 | + if ( |
| 225 | + label not in columns_set |
| 226 | + and block.expr.get_column_type(col_id) in default_dummy_types |
| 227 | + ): |
| 228 | + columns.append(label) |
| 229 | + columns_set.add(label) |
| 230 | + |
| 231 | + column_labels: List = typing.cast(List, columns) |
| 232 | + |
| 233 | + def parse_prefix_kwarg(kwarg, kwarg_name) -> Optional[List[str]]: |
| 234 | + if kwarg is None: |
| 235 | + return None |
| 236 | + if isinstance(kwarg, str): |
| 237 | + return [kwarg] * len(column_labels) |
| 238 | + if isinstance(kwarg, dict): |
| 239 | + return [kwarg[column] for column in column_labels] |
| 240 | + kwarg = typing.cast(List, kwarg) |
| 241 | + if pandas.api.types.is_list_like(kwarg) and len(kwarg) != len(column_labels): |
| 242 | + raise ValueError( |
| 243 | + f"Length of '{kwarg_name}' ({len(kwarg)}) did not match " |
| 244 | + f"the length of the columns being encoded ({len(column_labels)})." |
| 245 | + ) |
| 246 | + if pandas.api.types.is_list_like(kwarg): |
| 247 | + return list(map(str, kwarg)) |
| 248 | + raise TypeError(f"{kwarg_name} kwarg must be a string, list, or dictionary") |
| 249 | + |
| 250 | + prefix_seps = parse_prefix_kwarg(prefix_sep or "_", "prefix_sep") |
| 251 | + prefix_seps = typing.cast(List, prefix_seps) |
| 252 | + prefixes = parse_prefix_kwarg(prefix, "prefix") |
| 253 | + if prefixes is None: |
| 254 | + prefixes = column_labels |
| 255 | + prefixes = typing.cast(List, prefixes) |
| 256 | + |
| 257 | + return column_labels, prefixes, prefix_seps |
| 258 | + |
| 259 | + |
| 260 | +def _determine_get_dummies_columns_from_labels( |
| 261 | + data: Union[DataFrame, Series], |
| 262 | + column_labels: List, |
| 263 | + prefix_given: bool, |
| 264 | + prefixes: List[str], |
| 265 | + prefix_seps: List[str], |
| 266 | +) -> Tuple[List[str], List[str]]: |
| 267 | + block = data._block |
| 268 | + |
| 269 | + columns_ids = [] |
| 270 | + columns_prefixes = [] |
| 271 | + for i in range(len(column_labels)): |
| 272 | + label = column_labels[i] |
| 273 | + empty_prefix = label is None or (isinstance(data, Series) and not prefix_given) |
| 274 | + full_prefix = "" if empty_prefix else prefixes[i] + prefix_seps[i] |
| 275 | + |
| 276 | + for col_id in block.label_to_col_id[label]: |
| 277 | + columns_ids.append(col_id) |
| 278 | + columns_prefixes.append(full_prefix) |
| 279 | + |
| 280 | + return columns_prefixes, columns_ids |
| 281 | + |
| 282 | + |
| 283 | +def _perform_get_dummies_block_operations( |
| 284 | + block: bigframes.core.blocks.Block, |
| 285 | + level: pandas.Index, |
| 286 | + column_label: str, |
| 287 | + column_id: str, |
| 288 | + dummy_na: bool, |
| 289 | +) -> Tuple[bigframes.core.blocks.Block, List[str]]: |
| 290 | + intermediate_col_ids = [] |
| 291 | + for value in level: |
| 292 | + new_column_label = f"{column_label}{value}" |
| 293 | + if column_label == "": |
| 294 | + new_column_label = value |
| 295 | + new_block, new_id = block.apply_unary_op( |
| 296 | + column_id, ops.BinopPartialLeft(ops.eq_op, value) |
| 297 | + ) |
| 298 | + intermediate_col_ids.append(new_id) |
| 299 | + block, _ = new_block.apply_unary_op( |
| 300 | + new_id, |
| 301 | + ops.BinopPartialRight(ops.fillna_op, False), |
| 302 | + result_label=new_column_label, |
| 303 | + ) |
| 304 | + if dummy_na: |
| 305 | + # dummy column name for na depends on the dtype |
| 306 | + na_string = str(pandas.Index([None], dtype=level.dtype)[0]) |
| 307 | + new_column_label = f"{column_label}{na_string}" |
| 308 | + block, _ = block.apply_unary_op( |
| 309 | + column_id, ops.isnull_op, result_label=new_column_label |
| 310 | + ) |
| 311 | + return block, intermediate_col_ids |
| 312 | + |
| 313 | + |
137 | 314 | def qcut(
|
138 | 315 | x: bigframes.series.Series,
|
139 | 316 | q: int,
|
|
0 commit comments