Skip to content

Index

APIReader

Bases: BaseReader

Utility class for reading an API into a DataFrame with pagination support.

This class uses an APIClient to fetch paginated data from an API and load it into a Spark DataFrame.

Attributes:

Name Type Description
api_client

The client for making API requests.

Source code in src/cloe_nessy/integration/reader/api_reader.py
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
class APIReader(BaseReader):
    """Utility class for reading an API into a DataFrame with pagination support.

    This class uses an APIClient to fetch paginated data from an API and load it into a Spark DataFrame.

    Attributes:
        api_client: The client for making API requests.
    """

    OUTPUT_SCHEMA = T.StructType(
        [
            T.StructField(
                "json_response",
                T.ArrayType(
                    T.StructType(
                        [
                            T.StructField("response", T.StringType(), True),
                            T.StructField(
                                "__metadata",
                                T.StructType(
                                    [
                                        T.StructField("base_url", T.StringType(), True),
                                        T.StructField("elapsed", T.DoubleType(), True),
                                        T.StructField("reason", T.StringType(), True),
                                        T.StructField("status_code", T.LongType(), True),
                                        T.StructField("timestamp", T.StringType(), True),
                                        T.StructField("url", T.StringType(), True),
                                        T.StructField("endpoint", T.StringType(), True),
                                        T.StructField(
                                            "query_parameters",
                                            T.MapType(T.StringType(), T.StringType(), True),
                                            True,
                                        ),
                                    ]
                                ),
                                True,
                            ),
                        ]
                    )
                ),
                True,
            )
        ]
    )

    def __init__(
        self,
        base_url: str,
        auth: AuthBase | None = None,
        default_headers: dict[str, str] | None = None,
        max_concurrent_requests: int = 8,
    ):
        """Initializes the APIReader object.

        Args:
            base_url: The base URL for the API.
            auth: The authentication method for the API.
            default_headers: Default headers to include in requests.
            max_concurrent_requests: The maximum concurrent requests. Defaults to 8.
        """
        super().__init__()
        self.base_url = base_url
        self.auth = auth
        self.default_headers = default_headers
        self.max_concurrent_requests = max_concurrent_requests

    @staticmethod
    def _get_pagination_strategy(config: PaginationConfig | dict[str, str]) -> PaginationStrategy:
        """Return the appropriate pagination strategy."""
        if isinstance(config, PaginationConfig):
            config = config.model_dump()  # PaginationStrategy expects a dict

        pagination_strategy: PaginationStrategy = PaginationStrategyType[config["strategy"]].value(config)
        return pagination_strategy

    @staticmethod
    def _get_metadata(
        response: APIResponse, base_url: str, endpoint: str, params: dict[str, Any] | None = None
    ) -> ResponseMetadata:
        """Creates a dictionary with metadata from an APIResponse.

        Creates a dictionary containing metadata related to an API response. The metadata includes the current timestamp,
        the base URL of the API, the URL of the request, the HTTP status code, the reason phrase,
        and the elapsed time of the request in seconds.

        Args:
            response: The API response object containing the metadata to be added.
            base_url: The base url.
            endpoint: The endpoint.
            params: The parameters to be passed to the query.

        Returns:
            The dictionary containing metadata of API response.
        """
        params = params or {}
        metadata: ResponseMetadata = {
            "__metadata": {
                "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
                "base_url": base_url,
                "url": response.url,
                "status_code": response.status_code,
                "reason": response.reason,
                "elapsed": response.elapsed.total_seconds(),
                "endpoint": endpoint,
                "query_parameters": params.copy(),
            }
        }
        return metadata

    @staticmethod
    def _paginate(
        api_client: APIClient,
        endpoint: str,
        method: str,
        key: str | None,
        params: dict[str, Any],
        headers: dict[str, Any] | None,
        data: dict[str, Any] | None,
        json_body: dict[str, Any] | None,
        timeout: int,
        max_retries: int,
        backoff_factor: int,
        pagination_config: PaginationConfig,
    ) -> Generator[ResponseData]:
        """Paginates through an API endpoint based on the given pagination strategy."""
        strategy = APIReader._get_pagination_strategy(pagination_config)

        query_parameters = params
        current_page = 1

        while True:
            if pagination_config.max_page != -1 and current_page > pagination_config.max_page:
                break

            response = api_client.request(
                method=method,
                endpoint=endpoint,
                params=query_parameters,
                headers=headers,
                data=data,
                json=json_body,
                timeout=timeout,
                max_retries=max_retries,
                backoff_factor=backoff_factor,
                raise_for_status=False,
            )

            response_data = {"response": json.dumps(response.to_dict(key))} | APIReader._get_metadata(
                response, api_client.base_url, endpoint, query_parameters
            )

            yield cast(ResponseData, response_data)

            if not strategy.has_more_data(response):
                break

            query_parameters = strategy.get_next_params(query_parameters)
            current_page += 1

    @staticmethod
    def _read_from_api(
        api_client: APIClient,
        endpoint: str,
        method: str,
        key: str | None,
        timeout: int,
        params: dict[str, Any],
        headers: dict[str, Any] | None,
        data: dict[str, Any] | None,
        json_body: dict[str, Any] | None,
        max_retries: int,
        backoff_factor: int,
    ) -> list[list[ResponseData]]:
        try:
            response = api_client.request(
                method=method,
                endpoint=endpoint,
                timeout=timeout,
                params=params,
                headers=headers,
                data=data,
                json=json_body,
                max_retries=max_retries,
                backoff_factor=backoff_factor,
            )
            response_data = [
                [
                    cast(
                        ResponseData,
                        {"response": json.dumps(response.to_dict(key))}
                        | APIReader._get_metadata(response, api_client.base_url, endpoint, params),
                    )
                ]
            ]
            return response_data

        except (APIClientHTTPError, APIClientConnectionError, APIClientTimeoutError) as e:
            raise RuntimeError(f"API request failed: {e}") from e
        except APIClientError as e:
            raise RuntimeError(f"An error occurred while reading the API data: {e}") from e
        except Exception as e:
            raise RuntimeError(f"An unexpected error occurred: {e}") from e

    @staticmethod
    def _read_from_api_with_pagination(
        api_client: APIClient,
        endpoint: str,
        method: str,
        key: str | None,
        timeout: int,
        params: dict[str, Any],
        headers: dict[str, Any] | None,
        data: dict[str, Any] | None,
        json_body: dict[str, Any] | None,
        pagination_config: PaginationConfig,
        max_retries: int,
        backoff_factor: int,
    ) -> list[list[ResponseData]]:
        all_data: list[list[ResponseData]] = []
        all_data_temp: list[ResponseData] = []

        try:
            for response_data in APIReader._paginate(
                api_client=api_client,
                method=method,
                endpoint=endpoint,
                key=key,
                timeout=timeout,
                params=params,
                headers=headers,
                data=data,
                json_body=json_body,
                max_retries=max_retries,
                backoff_factor=backoff_factor,
                pagination_config=pagination_config,
            ):
                all_data_temp.append(response_data)
                if (
                    len(all_data_temp) >= pagination_config.pages_per_array_limit
                    and pagination_config.pages_per_array_limit != -1
                ):
                    all_data.append(all_data_temp)
                    all_data_temp = []

            if all_data_temp:
                all_data.append(all_data_temp)

            return all_data

        except (APIClientHTTPError, APIClientConnectionError, APIClientTimeoutError) as e:
            raise RuntimeError(f"API request failed: {e}") from e
        except APIClientError as e:
            raise RuntimeError(f"An error occurred while reading the API data: {e}") from e
        except Exception as e:
            raise RuntimeError(f"An unexpected error occurred: {e}") from e

    def read(
        self,
        *,
        endpoint: str | None = None,
        method: str = "GET",
        key: str | None = None,
        timeout: int = 30,
        params: dict[str, Any] | None = None,
        headers: dict[str, Any] | None = None,
        data: dict[str, Any] | None = None,
        json_body: dict[str, Any] | None = None,
        pagination_config: PaginationConfig | None = None,
        max_retries: int = 0,
        backoff_factor: int = 1,
        dynamic_requests: list[RequestSet] | None = None,
        **_: Any,
    ) -> DataFrame:
        """Reads data from an API endpoint and returns it as a DataFrame.

        Args:
            endpoint: The endpoint to send the request to.
            method: The HTTP method to use for the request.
            key: The key to extract from the JSON response.
            timeout: The timeout for the request in seconds.
            params: The query parameters for the request.
            headers: The headers to include in the request.
            data: The form data to include in the request.
            json_body: The JSON data to include in the request.
            pagination_config: Configuration for pagination.
            max_retries: The maximum number of retries for the request.
            backoff_factor: Factor for exponential backoff between retries.
            dynamic_requests: A list of RequestSet dictionaries for making multiple API requests dynamically.
                Each RequestSet should contain 'endpoint', 'params', and optionally 'headers', 'data', 'json_body'.
                When provided, the reader will execute all requests and combine the results.

        Returns:
            DataFrame: The Spark DataFrame containing the read data in the json_object column.

        Raises:
            RuntimeError: If there is an error with the API request or reading the data.
        """
        api_client = APIClient(
            base_url=self.base_url,
            auth=self.auth,
            default_headers=self.default_headers,
            pool_maxsize=self.max_concurrent_requests,
        )

        if dynamic_requests or getattr(pagination_config, "preliminary_probe", False):
            if not dynamic_requests:
                if not endpoint:
                    raise ValueError("endpoint parameter must be provided.")
                dynamic_requests = [
                    {
                        "endpoint": endpoint,
                        "params": params or {},
                        "headers": headers,
                        "data": data,
                        "json_body": json_body,
                    }
                ]

            return self._read_dynamic(
                api_client=api_client,
                dynamic_requests=dynamic_requests,
                method=method,
                key=key,
                timeout=timeout,
                pagination_config=pagination_config,
                max_retries=max_retries,
                backoff_factor=backoff_factor,
            )

        params = params if params is not None else {}

        if not endpoint:
            raise ValueError("endpoint parameter must be provided.")

        if pagination_config is not None:
            response_data = self._read_from_api_with_pagination(
                api_client=api_client,
                endpoint=endpoint,
                method=method,
                key=key,
                timeout=timeout,
                params=params,
                headers=headers,
                data=data,
                json_body=json_body,
                pagination_config=pagination_config,
                max_retries=max_retries,
                backoff_factor=backoff_factor,
            )

        else:
            response_data = self._read_from_api(
                api_client=api_client,
                endpoint=endpoint,
                method=method,
                key=key,
                timeout=timeout,
                params=params,
                headers=headers,
                data=data,
                json_body=json_body,
                max_retries=max_retries,
                backoff_factor=backoff_factor,
            )

        return self._spark.createDataFrame(data=[(response,) for response in response_data], schema=self.OUTPUT_SCHEMA)

    def _read_dynamic(
        self,
        api_client: APIClient,
        dynamic_requests: list[RequestSet],
        method: str,
        key: str | None,
        timeout: int,
        pagination_config: PaginationConfig | None,
        max_retries: int,
        backoff_factor: int,
    ) -> DataFrame:
        def _process_partition(pdf_iter):
            for pdf in pdf_iter:
                for _, row in pdf.iterrows():
                    endpoint = row["endpoint"]
                    params = row["params"] or {}
                    headers = row["headers"] or {}
                    data = row["data"] or {}
                    json_body = row["json_body"] or {}

                    if any([pagination_config is None, getattr(pagination_config, "preliminary_probe", False)]):
                        response_data = APIReader._read_from_api(
                            api_client=api_client,
                            endpoint=endpoint,
                            method=method,
                            key=key,
                            timeout=timeout,
                            params=params,
                            headers=headers,
                            data=data,
                            json_body=json_body,
                            max_retries=max_retries,
                            backoff_factor=backoff_factor,
                        )
                    else:
                        if not pagination_config:
                            raise ValueError("pagination_config must be provided for paginated requests.")
                        response_data = APIReader._read_from_api_with_pagination(
                            api_client=api_client,
                            endpoint=endpoint,
                            method=method,
                            key=key,
                            timeout=timeout,
                            params=params,
                            headers=headers,
                            data=data,
                            json_body=json_body,
                            pagination_config=pagination_config,
                            max_retries=max_retries,
                            backoff_factor=backoff_factor,
                        )

                    yield pd.DataFrame(data=[(response,) for response in response_data])

        if pagination_config is not None and getattr(pagination_config, "preliminary_probe", False):
            pagination_strategy = APIReader._get_pagination_strategy(pagination_config)

            def make_request(
                endpoint: str,
                params: dict[str, Any],
                headers: dict[str, Any] | None,
                data: dict[str, Any] | None,
                json_body: dict[str, Any] | None,
            ) -> APIResponse:
                return api_client.request(
                    method=method,
                    endpoint=endpoint,
                    params=params,
                    headers=headers,
                    data=data,
                    json=json_body,
                    timeout=timeout,
                    max_retries=max_retries,
                    backoff_factor=backoff_factor,
                    raise_for_status=False,
                )

            extended_dynamic_requests: list[RequestSet] = []
            for request in dynamic_requests:
                probed_params_items = pagination_strategy.probe_max_page(
                    **request,
                    make_request=make_request,
                )
                for probed_params_item in probed_params_items:
                    extended_dynamic_requests.append(
                        {
                            "endpoint": request["endpoint"],
                            "params": probed_params_item,
                            "headers": request["headers"],
                            "data": request["data"],
                            "json_body": request["json_body"],
                        }
                    )

            dynamic_requests = extended_dynamic_requests

        df_requests = self._spark.createDataFrame(
            cast(dict, dynamic_requests),
            schema="endpoint string, params map<string, string>, headers map<string, string>, data map<string, string>, json_body map<string, string>",
        )

        self._console_logger.info(
            f"Repartitioning requests to achieve [ '{self.max_concurrent_requests}' ] concurrent requests ..."
        )
        df_requests = df_requests.repartition(self.max_concurrent_requests)
        total_requests = df_requests.count()

        self._console_logger.info(f"Preparing to perform [ '{total_requests}' ] API requests in parallel ...")

        df_response = df_requests.mapInPandas(_process_partition, schema=self.OUTPUT_SCHEMA)

        return df_response

__init__(base_url, auth=None, default_headers=None, max_concurrent_requests=8)

Initializes the APIReader object.

Parameters:

Name Type Description Default
base_url str

The base URL for the API.

required
auth AuthBase | None

The authentication method for the API.

None
default_headers dict[str, str] | None

Default headers to include in requests.

None
max_concurrent_requests int

The maximum concurrent requests. Defaults to 8.

8
Source code in src/cloe_nessy/integration/reader/api_reader.py
def __init__(
    self,
    base_url: str,
    auth: AuthBase | None = None,
    default_headers: dict[str, str] | None = None,
    max_concurrent_requests: int = 8,
):
    """Initializes the APIReader object.

    Args:
        base_url: The base URL for the API.
        auth: The authentication method for the API.
        default_headers: Default headers to include in requests.
        max_concurrent_requests: The maximum concurrent requests. Defaults to 8.
    """
    super().__init__()
    self.base_url = base_url
    self.auth = auth
    self.default_headers = default_headers
    self.max_concurrent_requests = max_concurrent_requests

read(*, endpoint=None, method='GET', key=None, timeout=30, params=None, headers=None, data=None, json_body=None, pagination_config=None, max_retries=0, backoff_factor=1, dynamic_requests=None, **_)

Reads data from an API endpoint and returns it as a DataFrame.

Parameters:

Name Type Description Default
endpoint str | None

The endpoint to send the request to.

None
method str

The HTTP method to use for the request.

'GET'
key str | None

The key to extract from the JSON response.

None
timeout int

The timeout for the request in seconds.

30
params dict[str, Any] | None

The query parameters for the request.

None
headers dict[str, Any] | None

The headers to include in the request.

None
data dict[str, Any] | None

The form data to include in the request.

None
json_body dict[str, Any] | None

The JSON data to include in the request.

None
pagination_config PaginationConfig | None

Configuration for pagination.

None
max_retries int

The maximum number of retries for the request.

0
backoff_factor int

Factor for exponential backoff between retries.

1
dynamic_requests list[RequestSet] | None

A list of RequestSet dictionaries for making multiple API requests dynamically. Each RequestSet should contain 'endpoint', 'params', and optionally 'headers', 'data', 'json_body'. When provided, the reader will execute all requests and combine the results.

None

Returns:

Name Type Description
DataFrame DataFrame

The Spark DataFrame containing the read data in the json_object column.

Raises:

Type Description
RuntimeError

If there is an error with the API request or reading the data.

Source code in src/cloe_nessy/integration/reader/api_reader.py
def read(
    self,
    *,
    endpoint: str | None = None,
    method: str = "GET",
    key: str | None = None,
    timeout: int = 30,
    params: dict[str, Any] | None = None,
    headers: dict[str, Any] | None = None,
    data: dict[str, Any] | None = None,
    json_body: dict[str, Any] | None = None,
    pagination_config: PaginationConfig | None = None,
    max_retries: int = 0,
    backoff_factor: int = 1,
    dynamic_requests: list[RequestSet] | None = None,
    **_: Any,
) -> DataFrame:
    """Reads data from an API endpoint and returns it as a DataFrame.

    Args:
        endpoint: The endpoint to send the request to.
        method: The HTTP method to use for the request.
        key: The key to extract from the JSON response.
        timeout: The timeout for the request in seconds.
        params: The query parameters for the request.
        headers: The headers to include in the request.
        data: The form data to include in the request.
        json_body: The JSON data to include in the request.
        pagination_config: Configuration for pagination.
        max_retries: The maximum number of retries for the request.
        backoff_factor: Factor for exponential backoff between retries.
        dynamic_requests: A list of RequestSet dictionaries for making multiple API requests dynamically.
            Each RequestSet should contain 'endpoint', 'params', and optionally 'headers', 'data', 'json_body'.
            When provided, the reader will execute all requests and combine the results.

    Returns:
        DataFrame: The Spark DataFrame containing the read data in the json_object column.

    Raises:
        RuntimeError: If there is an error with the API request or reading the data.
    """
    api_client = APIClient(
        base_url=self.base_url,
        auth=self.auth,
        default_headers=self.default_headers,
        pool_maxsize=self.max_concurrent_requests,
    )

    if dynamic_requests or getattr(pagination_config, "preliminary_probe", False):
        if not dynamic_requests:
            if not endpoint:
                raise ValueError("endpoint parameter must be provided.")
            dynamic_requests = [
                {
                    "endpoint": endpoint,
                    "params": params or {},
                    "headers": headers,
                    "data": data,
                    "json_body": json_body,
                }
            ]

        return self._read_dynamic(
            api_client=api_client,
            dynamic_requests=dynamic_requests,
            method=method,
            key=key,
            timeout=timeout,
            pagination_config=pagination_config,
            max_retries=max_retries,
            backoff_factor=backoff_factor,
        )

    params = params if params is not None else {}

    if not endpoint:
        raise ValueError("endpoint parameter must be provided.")

    if pagination_config is not None:
        response_data = self._read_from_api_with_pagination(
            api_client=api_client,
            endpoint=endpoint,
            method=method,
            key=key,
            timeout=timeout,
            params=params,
            headers=headers,
            data=data,
            json_body=json_body,
            pagination_config=pagination_config,
            max_retries=max_retries,
            backoff_factor=backoff_factor,
        )

    else:
        response_data = self._read_from_api(
            api_client=api_client,
            endpoint=endpoint,
            method=method,
            key=key,
            timeout=timeout,
            params=params,
            headers=headers,
            data=data,
            json_body=json_body,
            max_retries=max_retries,
            backoff_factor=backoff_factor,
        )

    return self._spark.createDataFrame(data=[(response,) for response in response_data], schema=self.OUTPUT_SCHEMA)

CatalogReader

Bases: BaseReader

A reader for Unity Catalog objects.

This class reads data from a Unity Catalog table and loads it into a Spark DataFrame.

Source code in src/cloe_nessy/integration/reader/catalog_reader.py
class CatalogReader(BaseReader):
    """A reader for Unity Catalog objects.

    This class reads data from a Unity Catalog table and loads it into a Spark DataFrame.
    """

    def __init__(self):
        """Initializes the CatalogReader object."""
        super().__init__()

    def read(
        self,
        table_identifier: str = "",
        *,
        options: dict[str, str] | None = None,
        delta_load_options: DeltaLoadOptions | None = None,
        **kwargs: Any,
    ) -> DataFrame:
        """Reads a table from the Unity Catalog.

        Args:
            table_identifier: The table identifier in the Unity Catalog in the format 'catalog.schema.table'.
            options: PySpark options for the read table operation.
            delta_load_options: Options for delta loading, if applicable. When provided, uses delta loader
                instead of regular table read to perform incremental loading.
            **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

        Returns:
            The Spark DataFrame containing the read data.

        Raises:
            ValueError: If the table_identifier is not provided, is not a string, or is not in the correct format.
            ReadOperationFailedError: For delta load or table read failures.
        """
        if options is None:
            options = {}
        if not table_identifier:
            raise ValueError("table_identifier is required.")
        if not isinstance(table_identifier, str):
            raise ValueError("table_identifier must be a string.")
        if len(table_identifier.split(".")) != 3:
            raise ValueError("table_identifier must be in the format 'catalog.schema.table'.")

        options = options or {}

        try:
            if delta_load_options:
                # Use delta loader for incremental loading
                self._console_logger.info(f"Performing delta load for table: {table_identifier}")
                delta_loader = DeltaLoaderFactory.create_loader(
                    table_identifier=table_identifier,
                    options=delta_load_options,
                )
                df = delta_loader.read_data(options=options)
                self._console_logger.info(f"Delta load completed for table: {table_identifier}")
                return df

            # Regular table read
            df = self._spark.read.table(table_identifier, **options)
            return df
        except AnalysisException as err:
            raise ValueError(f"Table not found: {table_identifier}") from err
        except Exception as err:
            raise ReadOperationFailedError(
                f"An error occurred while reading the table '{table_identifier}': {err}"
            ) from err

    def read_stream(
        self,
        table_identifier: str = "",
        *,
        options: dict[str, str] | None = None,
        **kwargs: Any,
    ) -> DataFrame:
        """Reads a streaming table from the Unity Catalog.

        Args:
            table_identifier: The table identifier in the Unity Catalog in the format 'catalog.schema.table'.
            options: PySpark options for the read stream operation.
            **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

        Returns:
            The Spark Streaming DataFrame containing the read data.

        Raises:
            ValueError: If the table_identifier is not provided, is not a string, or is not in the correct format.
            Exception: For any other unexpected errors during streaming read operation.
        """
        if options is None:
            options = {}
        if not table_identifier:
            raise ValueError("table_identifier is required")
        if not isinstance(table_identifier, str):
            raise ValueError("table_identifier must be a string")
        if len(table_identifier.split(".")) != 3:
            raise ValueError("table_identifier must be in the format 'catalog.schema.table'")

        try:
            df = self._spark.readStream.table(table_identifier, **options)
            return df
        except AnalysisException as err:
            raise ValueError(f"Table not found or not streamable: {table_identifier}") from err
        except Exception as err:
            raise ReadOperationFailedError(
                f"An error occurred while reading the stream from table '{table_identifier}': {err}"
            ) from err

__init__()

Initializes the CatalogReader object.

Source code in src/cloe_nessy/integration/reader/catalog_reader.py
def __init__(self):
    """Initializes the CatalogReader object."""
    super().__init__()

read(table_identifier='', *, options=None, delta_load_options=None, **kwargs)

Reads a table from the Unity Catalog.

Parameters:

Name Type Description Default
table_identifier str

The table identifier in the Unity Catalog in the format 'catalog.schema.table'.

''
options dict[str, str] | None

PySpark options for the read table operation.

None
delta_load_options DeltaLoadOptions | None

Options for delta loading, if applicable. When provided, uses delta loader instead of regular table read to perform incremental loading.

None
**kwargs Any

Additional keyword arguments to maintain compatibility with the base class method.

{}

Returns:

Type Description
DataFrame

The Spark DataFrame containing the read data.

Raises:

Type Description
ValueError

If the table_identifier is not provided, is not a string, or is not in the correct format.

ReadOperationFailedError

For delta load or table read failures.

Source code in src/cloe_nessy/integration/reader/catalog_reader.py
def read(
    self,
    table_identifier: str = "",
    *,
    options: dict[str, str] | None = None,
    delta_load_options: DeltaLoadOptions | None = None,
    **kwargs: Any,
) -> DataFrame:
    """Reads a table from the Unity Catalog.

    Args:
        table_identifier: The table identifier in the Unity Catalog in the format 'catalog.schema.table'.
        options: PySpark options for the read table operation.
        delta_load_options: Options for delta loading, if applicable. When provided, uses delta loader
            instead of regular table read to perform incremental loading.
        **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

    Returns:
        The Spark DataFrame containing the read data.

    Raises:
        ValueError: If the table_identifier is not provided, is not a string, or is not in the correct format.
        ReadOperationFailedError: For delta load or table read failures.
    """
    if options is None:
        options = {}
    if not table_identifier:
        raise ValueError("table_identifier is required.")
    if not isinstance(table_identifier, str):
        raise ValueError("table_identifier must be a string.")
    if len(table_identifier.split(".")) != 3:
        raise ValueError("table_identifier must be in the format 'catalog.schema.table'.")

    options = options or {}

    try:
        if delta_load_options:
            # Use delta loader for incremental loading
            self._console_logger.info(f"Performing delta load for table: {table_identifier}")
            delta_loader = DeltaLoaderFactory.create_loader(
                table_identifier=table_identifier,
                options=delta_load_options,
            )
            df = delta_loader.read_data(options=options)
            self._console_logger.info(f"Delta load completed for table: {table_identifier}")
            return df

        # Regular table read
        df = self._spark.read.table(table_identifier, **options)
        return df
    except AnalysisException as err:
        raise ValueError(f"Table not found: {table_identifier}") from err
    except Exception as err:
        raise ReadOperationFailedError(
            f"An error occurred while reading the table '{table_identifier}': {err}"
        ) from err

read_stream(table_identifier='', *, options=None, **kwargs)

Reads a streaming table from the Unity Catalog.

Parameters:

Name Type Description Default
table_identifier str

The table identifier in the Unity Catalog in the format 'catalog.schema.table'.

''
options dict[str, str] | None

PySpark options for the read stream operation.

None
**kwargs Any

Additional keyword arguments to maintain compatibility with the base class method.

{}

Returns:

Type Description
DataFrame

The Spark Streaming DataFrame containing the read data.

Raises:

Type Description
ValueError

If the table_identifier is not provided, is not a string, or is not in the correct format.

Exception

For any other unexpected errors during streaming read operation.

Source code in src/cloe_nessy/integration/reader/catalog_reader.py
def read_stream(
    self,
    table_identifier: str = "",
    *,
    options: dict[str, str] | None = None,
    **kwargs: Any,
) -> DataFrame:
    """Reads a streaming table from the Unity Catalog.

    Args:
        table_identifier: The table identifier in the Unity Catalog in the format 'catalog.schema.table'.
        options: PySpark options for the read stream operation.
        **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

    Returns:
        The Spark Streaming DataFrame containing the read data.

    Raises:
        ValueError: If the table_identifier is not provided, is not a string, or is not in the correct format.
        Exception: For any other unexpected errors during streaming read operation.
    """
    if options is None:
        options = {}
    if not table_identifier:
        raise ValueError("table_identifier is required")
    if not isinstance(table_identifier, str):
        raise ValueError("table_identifier must be a string")
    if len(table_identifier.split(".")) != 3:
        raise ValueError("table_identifier must be in the format 'catalog.schema.table'")

    try:
        df = self._spark.readStream.table(table_identifier, **options)
        return df
    except AnalysisException as err:
        raise ValueError(f"Table not found or not streamable: {table_identifier}") from err
    except Exception as err:
        raise ReadOperationFailedError(
            f"An error occurred while reading the stream from table '{table_identifier}': {err}"
        ) from err

ExcelDataFrameReader

Bases: BaseReader

Utility class for reading an Excel file into a DataFrame.

This class uses the Pandas API on Spark to read Excel files to a DataFrame. More information can be found in the official documentation.

Source code in src/cloe_nessy/integration/reader/excel_reader.py
class ExcelDataFrameReader(BaseReader):
    """Utility class for reading an Excel file into a DataFrame.

    This class uses the Pandas API on Spark to read Excel files to a DataFrame.
    More information can be found in the [official
    documentation](https://spark.apache.org/docs/latest/api/python/reference/pyspark.pandas/index.html).
    """

    def __init__(self):
        """Initializes the ExcelDataFrameReader object."""
        super().__init__()

    def read_stream(self) -> DataFrame:
        """Currently not implemented."""
        raise NotImplementedError("Currently not implemented.")

    def read(
        self,
        location: str,
        sheet_name: str | int | list = 0,
        header: int | list[int] = 0,
        index_col: int | list[int] | None = None,
        usecols: int | str | list | Callable | None = None,
        true_values: list | None = None,
        false_values: list | None = None,
        nrows: int | None = None,
        na_values: list[str] | dict[str, list[str]] | None = None,
        keep_default_na: bool = True,
        parse_dates: bool | list | dict = False,
        date_parser: Callable | None = None,
        thousands: str | None = None,
        options: dict | None = None,
        load_as_strings: bool = False,
        add_metadata_column: bool = False,
        **_: Any,
    ) -> DataFrame:
        """Reads Excel file on specified location and returns DataFrame.

        Args:
            location: Location of files to read.
            sheet_name: Strings are used for sheet names.
                Integers are used in zero-indexed sheet positions. Lists of
                strings/integers are used to request multiple sheets. Specify None
                to get all sheets.
            header: Row to use for column labels. If a
                list of integers is passed those row positions will be combined. Use
                None if there is no header.
            index_col: Column to use as the row labels of the
                DataFrame. Pass None if there is no such column. If a list is
                passed, those columns will be combined.
            usecols: Return a subset of the columns. If
                None, then parse all columns. If str, then indicates comma separated
                list of Excel column letters and column ranges (e.g. “A:E” or
                “A,C,E:F”). Ranges are inclusive of both sides. nIf list of int,
                then indicates list of column numbers to be parsed. If list of
                string, then indicates list of column names to be parsed. If
                Callable, then evaluate each column name against it and parse the
                column if the Callable returns True.
            true_values: Values to consider as True.
            false_values: Values to consider as False.
            nrows: Number of rows to parse.
            na_values: Additional strings to recognize as
                NA/NaN. If dict passed, specific per-column NA values.
            keep_default_na: If na_values are specified and
                keep_default_na is False the default NaN values are overridden,
                otherwise they're appended to.
            parse_dates: The behavior is as follows:
                - bool. If True -> try parsing the index.
                - list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column.
                - list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column.
                - dict, e.g. {{"foo" : [1, 3]}} -> parse columns 1, 3 as date and call result "foo"
                If a column or index contains an unparseable date, the entire column or index will be returned unaltered as an object data type.
            date_parser: Function to use for converting a sequence of
                string columns to an array of datetime instances. The default uses
                dateutil.parser.parser to do the conversion.
            thousands: Thousands separator for parsing string columns to
                numeric. Note that this parameter is only necessary for columns
                stored as TEXT in Excel, any numeric columns will automatically be
                parsed, regardless of display format.
            options: Optional keyword arguments passed to
                pyspark.pandas.read_excel and handed to TextFileReader.
            load_as_strings: If True, converts all columns to string type to avoid datatype conversion errors in Spark.
            add_metadata_column: If True, adds a metadata column containing the file location and sheet name.
        """
        if options is None:
            options = {}
        if ".xls" not in location:
            raise ValueError(
                "The excel reader can only be used for files with extension .xls. Use FileReader or some other reader instead."
            )
        try:
            df = pd.read_excel(  # type: ignore
                location,
                sheet_name=sheet_name,
                header=header,
                index_col=index_col,
                usecols=usecols,
                true_values=true_values,
                false_values=false_values,
                nrows=nrows,
                na_values=na_values,
                keep_default_na=keep_default_na,
                parse_dates=parse_dates,
                date_parser=date_parser,
                thousands=thousands,
                dtype="string" if load_as_strings else None,
                **options,
            )
            if isinstance(df, dict):
                # in case pandas.read_excel returns a dict, union to single df
                df = pd.concat(list(df.values()), ignore_index=True)

        except FileNotFoundError:
            self._console_logger.error(f"No xls(x) file was found at the specified location [ '{location}' ].")
            raise
        except Exception as e:
            self._console_logger.error(f"read file [ '{location}' ] failed. Error: {e}")
        else:
            self._console_logger.info(f"Read file [ '{location}' ] succeeded.")

        spark_df = self._spark.createDataFrame(df)
        if add_metadata_column:
            spark_df = self._add_metadata_column(df=spark_df, location=location, sheet_name=sheet_name)
        return spark_df

    def _add_metadata_column(self, df: DataFrame, location: str, sheet_name: str | int | list):
        """Adds a metadata column to a DataFrame.

        This method appends a column named `__metadata` to the given DataFrame, containing a map
        of metadata related to the Excel file read operation. The metadata includes the current
        timestamp, the location of the Excel file, and the sheet name(s) from which the data was read.

        Args:
            df: The DataFrame to which the metadata column will be added.
            location: The file path of the Excel file.
            sheet_name: The sheet name or sheet index used when reading the Excel file.

        Returns:
            DataFrame: The original DataFrame with an added `__metadata` column containing the Excel file metadata.
        """
        # Convert sheet_name to string if it is not already a string
        if isinstance(sheet_name, list):
            sheet_name = ", ".join(map(str, sheet_name))
        else:
            sheet_name = str(sheet_name)

        df = df.withColumn(
            "__metadata",
            F.create_map(
                F.lit("timestamp"),
                F.current_timestamp().cast("string"),
                F.lit("file_location"),
                F.lit(location),
                F.lit("sheet_name"),
                F.lit(sheet_name),
            ),
        )
        return df

__init__()

Initializes the ExcelDataFrameReader object.

Source code in src/cloe_nessy/integration/reader/excel_reader.py
def __init__(self):
    """Initializes the ExcelDataFrameReader object."""
    super().__init__()

read(location, sheet_name=0, header=0, index_col=None, usecols=None, true_values=None, false_values=None, nrows=None, na_values=None, keep_default_na=True, parse_dates=False, date_parser=None, thousands=None, options=None, load_as_strings=False, add_metadata_column=False, **_)

Reads Excel file on specified location and returns DataFrame.

Parameters:

Name Type Description Default
location str

Location of files to read.

required
sheet_name str | int | list

Strings are used for sheet names. Integers are used in zero-indexed sheet positions. Lists of strings/integers are used to request multiple sheets. Specify None to get all sheets.

0
header int | list[int]

Row to use for column labels. If a list of integers is passed those row positions will be combined. Use None if there is no header.

0
index_col int | list[int] | None

Column to use as the row labels of the DataFrame. Pass None if there is no such column. If a list is passed, those columns will be combined.

None
usecols int | str | list | Callable | None

Return a subset of the columns. If None, then parse all columns. If str, then indicates comma separated list of Excel column letters and column ranges (e.g. “A:E” or “A,C,E:F”). Ranges are inclusive of both sides. nIf list of int, then indicates list of column numbers to be parsed. If list of string, then indicates list of column names to be parsed. If Callable, then evaluate each column name against it and parse the column if the Callable returns True.

None
true_values list | None

Values to consider as True.

None
false_values list | None

Values to consider as False.

None
nrows int | None

Number of rows to parse.

None
na_values list[str] | dict[str, list[str]] | None

Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values.

None
keep_default_na bool

If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to.

True
parse_dates bool | list | dict

The behavior is as follows: - bool. If True -> try parsing the index. - list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column. - list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column. - dict, e.g. {{"foo" : [1, 3]}} -> parse columns 1, 3 as date and call result "foo" If a column or index contains an unparseable date, the entire column or index will be returned unaltered as an object data type.

False
date_parser Callable | None

Function to use for converting a sequence of string columns to an array of datetime instances. The default uses dateutil.parser.parser to do the conversion.

None
thousands str | None

Thousands separator for parsing string columns to numeric. Note that this parameter is only necessary for columns stored as TEXT in Excel, any numeric columns will automatically be parsed, regardless of display format.

None
options dict | None

Optional keyword arguments passed to pyspark.pandas.read_excel and handed to TextFileReader.

None
load_as_strings bool

If True, converts all columns to string type to avoid datatype conversion errors in Spark.

False
add_metadata_column bool

If True, adds a metadata column containing the file location and sheet name.

False
Source code in src/cloe_nessy/integration/reader/excel_reader.py
def read(
    self,
    location: str,
    sheet_name: str | int | list = 0,
    header: int | list[int] = 0,
    index_col: int | list[int] | None = None,
    usecols: int | str | list | Callable | None = None,
    true_values: list | None = None,
    false_values: list | None = None,
    nrows: int | None = None,
    na_values: list[str] | dict[str, list[str]] | None = None,
    keep_default_na: bool = True,
    parse_dates: bool | list | dict = False,
    date_parser: Callable | None = None,
    thousands: str | None = None,
    options: dict | None = None,
    load_as_strings: bool = False,
    add_metadata_column: bool = False,
    **_: Any,
) -> DataFrame:
    """Reads Excel file on specified location and returns DataFrame.

    Args:
        location: Location of files to read.
        sheet_name: Strings are used for sheet names.
            Integers are used in zero-indexed sheet positions. Lists of
            strings/integers are used to request multiple sheets. Specify None
            to get all sheets.
        header: Row to use for column labels. If a
            list of integers is passed those row positions will be combined. Use
            None if there is no header.
        index_col: Column to use as the row labels of the
            DataFrame. Pass None if there is no such column. If a list is
            passed, those columns will be combined.
        usecols: Return a subset of the columns. If
            None, then parse all columns. If str, then indicates comma separated
            list of Excel column letters and column ranges (e.g. “A:E” or
            “A,C,E:F”). Ranges are inclusive of both sides. nIf list of int,
            then indicates list of column numbers to be parsed. If list of
            string, then indicates list of column names to be parsed. If
            Callable, then evaluate each column name against it and parse the
            column if the Callable returns True.
        true_values: Values to consider as True.
        false_values: Values to consider as False.
        nrows: Number of rows to parse.
        na_values: Additional strings to recognize as
            NA/NaN. If dict passed, specific per-column NA values.
        keep_default_na: If na_values are specified and
            keep_default_na is False the default NaN values are overridden,
            otherwise they're appended to.
        parse_dates: The behavior is as follows:
            - bool. If True -> try parsing the index.
            - list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column.
            - list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column.
            - dict, e.g. {{"foo" : [1, 3]}} -> parse columns 1, 3 as date and call result "foo"
            If a column or index contains an unparseable date, the entire column or index will be returned unaltered as an object data type.
        date_parser: Function to use for converting a sequence of
            string columns to an array of datetime instances. The default uses
            dateutil.parser.parser to do the conversion.
        thousands: Thousands separator for parsing string columns to
            numeric. Note that this parameter is only necessary for columns
            stored as TEXT in Excel, any numeric columns will automatically be
            parsed, regardless of display format.
        options: Optional keyword arguments passed to
            pyspark.pandas.read_excel and handed to TextFileReader.
        load_as_strings: If True, converts all columns to string type to avoid datatype conversion errors in Spark.
        add_metadata_column: If True, adds a metadata column containing the file location and sheet name.
    """
    if options is None:
        options = {}
    if ".xls" not in location:
        raise ValueError(
            "The excel reader can only be used for files with extension .xls. Use FileReader or some other reader instead."
        )
    try:
        df = pd.read_excel(  # type: ignore
            location,
            sheet_name=sheet_name,
            header=header,
            index_col=index_col,
            usecols=usecols,
            true_values=true_values,
            false_values=false_values,
            nrows=nrows,
            na_values=na_values,
            keep_default_na=keep_default_na,
            parse_dates=parse_dates,
            date_parser=date_parser,
            thousands=thousands,
            dtype="string" if load_as_strings else None,
            **options,
        )
        if isinstance(df, dict):
            # in case pandas.read_excel returns a dict, union to single df
            df = pd.concat(list(df.values()), ignore_index=True)

    except FileNotFoundError:
        self._console_logger.error(f"No xls(x) file was found at the specified location [ '{location}' ].")
        raise
    except Exception as e:
        self._console_logger.error(f"read file [ '{location}' ] failed. Error: {e}")
    else:
        self._console_logger.info(f"Read file [ '{location}' ] succeeded.")

    spark_df = self._spark.createDataFrame(df)
    if add_metadata_column:
        spark_df = self._add_metadata_column(df=spark_df, location=location, sheet_name=sheet_name)
    return spark_df

read_stream()

Currently not implemented.

Source code in src/cloe_nessy/integration/reader/excel_reader.py
def read_stream(self) -> DataFrame:
    """Currently not implemented."""
    raise NotImplementedError("Currently not implemented.")

FileReader

Bases: BaseReader

Utility class for reading a file into a DataFrame.

This class reads data from files and loads it into a Spark DataFrame.

Source code in src/cloe_nessy/integration/reader/file_reader.py
class FileReader(BaseReader):
    """Utility class for reading a file into a DataFrame.

    This class reads data from files and loads it into a Spark DataFrame.
    """

    def __init__(self):
        """Initializes the FileReader object."""
        super().__init__()

    def _get_reader(self) -> DataFrameReader:
        """Returns a DataFrameReader."""
        return self._spark.read

    def _get_stream_reader(self) -> DataStreamReader:
        """Returns a DataFrameReader."""
        return self._spark.readStream

    def read(
        self,
        location: str,
        *,
        spark_format: str | None = None,
        extension: str | None = None,
        schema: str | None = None,
        search_subdirs: bool = True,
        options: dict | None = None,
        add_metadata_column: bool = False,
        delta_load_options: DeltaLoadOptions | None = None,
        **kwargs: Any,
    ) -> DataFrame:
        """Reads files from a specified location and returns a DataFrame.

        Arguments:
            location: Location of files to read.
            spark_format: Format of files to read. If not provided, it will be inferred from the extension.
            extension: File extension (csv, json, parquet, txt). Used if spark_format is not provided.
            schema: Schema of the file. If None, schema will be inferred.
            search_subdirs: Whether to include files in subdirectories.
            options: Spark DataFrame reader options.
            add_metadata_column: Whether to include __metadata column in the DataFrame.
            delta_load_options: Options for delta loading, if applicable. When provided and spark_format is 'delta',
                uses delta loader for incremental loading of Delta Lake tables.
            **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

        Raises:
            ValueError: If neither spark_format nor extension is provided.
            ValueError: If the provided extension is not supported.
            Exception: If there is an error while reading the files.

        Note:
            - The `spark_format` parameter is used to specify the format of the files to be read.
            - If `spark_format` is not provided, the method will try to infer it from the `extension`.
            - The `extension` parameter is used to specify the file extension (e.g., 'csv', 'json', etc.).
            - If both `spark_format` and `extension` are provided, `spark_format` will take precedence.
            - The method will raise an error if neither `spark_format` nor `extension` is provided.

        Returns:
            A DataFrame containing the data from the files.
        """
        if options is None:
            options = {}

        if not spark_format and not extension:
            raise ValueError("Either spark_format or extension must be provided.")

        # Handle delta loading for Delta Lake tables
        if delta_load_options and (spark_format == "delta" or extension == "delta"):
            self._console_logger.info(f"Performing delta load for Delta table at: {location}")
            try:
                # For Delta tables, use location as table identifier for delta loader
                delta_loader = DeltaLoaderFactory.create_loader(
                    table_identifier=location,
                    options=delta_load_options,
                )
                df = delta_loader.read_data(options=options or {})
                self._console_logger.info(f"Delta load completed for: {location}")
                return df
            except Exception as e:
                self._console_logger.error(f"Delta load failed for '{location}': {e}")
                raise

        self._console_logger.debug(f"Reading files from [ '{location}' ] ...")
        extension_to_datatype_dict = {
            "csv": "csv",
            "json": "json",
            "parquet": "parquet",
            "txt": "text",
            "xml": "xml",
            "delta": "delta",
        }

        if extension and not spark_format:
            if extension not in extension_to_datatype_dict:
                raise ValueError(f"Unsupported file extension: {extension}")
            spark_format = extension_to_datatype_dict[extension]
        self._console_logger.debug(f"Reading files with format: {spark_format}")
        if extension:
            file_paths = get_file_paths(location, extension, search_subdirs, onelake_relative_paths=True)
        else:
            file_paths = [location]
        self._console_logger.debug(f"Found {len(file_paths)} files to read")
        self._console_logger.debug(f"File paths: {file_paths}")
        assert spark_format is not None

        reader = self._get_reader().format(spark_format)
        if schema:
            reader.schema(schema)
        else:
            options["inferSchema"] = True

        self._console_logger.debug(f"Setting options: {options}")
        reader.options(**options)

        try:
            self._console_logger.debug("Loading files into DataFrame")
            df = reader.load([str(p) for p in file_paths])
            self._console_logger.debug("Successfully loaded files into DataFrame")
            if add_metadata_column:
                df = self._add_metadata_column(df)
        except Exception as e:
            self._console_logger.error(f"Failed to read files from [ '{location}' ]: {e}")
            raise
        else:
            self._console_logger.info(f"Successfully read files from [ '{location}' ]")
            return df

    def read_stream(
        self,
        location: str = "",
        schema: StructType | str | None = None,
        format: str = "delta",
        add_metadata_column: bool = False,
        options: dict[str, Any] | None = None,
        **_: Any,
    ) -> DataFrame:
        """Reads specified location as a stream and returns streaming DataFrame.

        Arguments:
            location: Location of files to read.
            format: Format of files to read.
            schema: Schema of the file.
            add_metadata_column: Whether to include __metadata column in the DataFrame.
            options: Spark DataFrame reader options.

        Raises:
            ValueError: If location is not provided.

        Returns:
            A Streaming DataFrame
        """
        if not location:
            raise ValueError("Location is required for streaming.")
        self._console_logger.debug(f"Reading files from [ '{location}' ] ...")
        try:
            if options is None:
                options = {}
            reader = self._get_stream_reader()
            reader.format(format)
            reader.option("rescuedDataColumn", "_rescued_data")
            if schema is None:
                options["inferSchema"] = True
            else:
                reader.schema(schema)
            reader.options(**options)
            df = reader.load(location)
            if add_metadata_column:
                df = self._add_metadata_column(df)
        except Exception as e:
            self._console_logger.error(f"Failed to read files from [ '{location}' ]: {e}")
            raise
        else:
            self._console_logger.info(f"Successfully read files from [ '{location}' ]")
            return df

    def _add_metadata_column(self, df: DataFrame) -> DataFrame:
        """Add all metadata columns to the DataFrame."""
        metadata_columns = df.select("_metadata.*").columns

        # Cast all metadata values to strings to ensure type consistency in the map
        entries = [(F.lit(field), F.col(f"_metadata.{field}").cast("string")) for field in metadata_columns]
        flat_list = [item for tup in entries for item in tup]

        df = df.withColumn("__metadata", F.create_map(flat_list))

        return df

__init__()

Initializes the FileReader object.

Source code in src/cloe_nessy/integration/reader/file_reader.py
def __init__(self):
    """Initializes the FileReader object."""
    super().__init__()

read(location, *, spark_format=None, extension=None, schema=None, search_subdirs=True, options=None, add_metadata_column=False, delta_load_options=None, **kwargs)

Reads files from a specified location and returns a DataFrame.

Parameters:

Name Type Description Default
location str

Location of files to read.

required
spark_format str | None

Format of files to read. If not provided, it will be inferred from the extension.

None
extension str | None

File extension (csv, json, parquet, txt). Used if spark_format is not provided.

None
schema str | None

Schema of the file. If None, schema will be inferred.

None
search_subdirs bool

Whether to include files in subdirectories.

True
options dict | None

Spark DataFrame reader options.

None
add_metadata_column bool

Whether to include __metadata column in the DataFrame.

False
delta_load_options DeltaLoadOptions | None

Options for delta loading, if applicable. When provided and spark_format is 'delta', uses delta loader for incremental loading of Delta Lake tables.

None
**kwargs Any

Additional keyword arguments to maintain compatibility with the base class method.

{}

Raises:

Type Description
ValueError

If neither spark_format nor extension is provided.

ValueError

If the provided extension is not supported.

Exception

If there is an error while reading the files.

Note
  • The spark_format parameter is used to specify the format of the files to be read.
  • If spark_format is not provided, the method will try to infer it from the extension.
  • The extension parameter is used to specify the file extension (e.g., 'csv', 'json', etc.).
  • If both spark_format and extension are provided, spark_format will take precedence.
  • The method will raise an error if neither spark_format nor extension is provided.

Returns:

Type Description
DataFrame

A DataFrame containing the data from the files.

Source code in src/cloe_nessy/integration/reader/file_reader.py
def read(
    self,
    location: str,
    *,
    spark_format: str | None = None,
    extension: str | None = None,
    schema: str | None = None,
    search_subdirs: bool = True,
    options: dict | None = None,
    add_metadata_column: bool = False,
    delta_load_options: DeltaLoadOptions | None = None,
    **kwargs: Any,
) -> DataFrame:
    """Reads files from a specified location and returns a DataFrame.

    Arguments:
        location: Location of files to read.
        spark_format: Format of files to read. If not provided, it will be inferred from the extension.
        extension: File extension (csv, json, parquet, txt). Used if spark_format is not provided.
        schema: Schema of the file. If None, schema will be inferred.
        search_subdirs: Whether to include files in subdirectories.
        options: Spark DataFrame reader options.
        add_metadata_column: Whether to include __metadata column in the DataFrame.
        delta_load_options: Options for delta loading, if applicable. When provided and spark_format is 'delta',
            uses delta loader for incremental loading of Delta Lake tables.
        **kwargs: Additional keyword arguments to maintain compatibility with the base class method.

    Raises:
        ValueError: If neither spark_format nor extension is provided.
        ValueError: If the provided extension is not supported.
        Exception: If there is an error while reading the files.

    Note:
        - The `spark_format` parameter is used to specify the format of the files to be read.
        - If `spark_format` is not provided, the method will try to infer it from the `extension`.
        - The `extension` parameter is used to specify the file extension (e.g., 'csv', 'json', etc.).
        - If both `spark_format` and `extension` are provided, `spark_format` will take precedence.
        - The method will raise an error if neither `spark_format` nor `extension` is provided.

    Returns:
        A DataFrame containing the data from the files.
    """
    if options is None:
        options = {}

    if not spark_format and not extension:
        raise ValueError("Either spark_format or extension must be provided.")

    # Handle delta loading for Delta Lake tables
    if delta_load_options and (spark_format == "delta" or extension == "delta"):
        self._console_logger.info(f"Performing delta load for Delta table at: {location}")
        try:
            # For Delta tables, use location as table identifier for delta loader
            delta_loader = DeltaLoaderFactory.create_loader(
                table_identifier=location,
                options=delta_load_options,
            )
            df = delta_loader.read_data(options=options or {})
            self._console_logger.info(f"Delta load completed for: {location}")
            return df
        except Exception as e:
            self._console_logger.error(f"Delta load failed for '{location}': {e}")
            raise

    self._console_logger.debug(f"Reading files from [ '{location}' ] ...")
    extension_to_datatype_dict = {
        "csv": "csv",
        "json": "json",
        "parquet": "parquet",
        "txt": "text",
        "xml": "xml",
        "delta": "delta",
    }

    if extension and not spark_format:
        if extension not in extension_to_datatype_dict:
            raise ValueError(f"Unsupported file extension: {extension}")
        spark_format = extension_to_datatype_dict[extension]
    self._console_logger.debug(f"Reading files with format: {spark_format}")
    if extension:
        file_paths = get_file_paths(location, extension, search_subdirs, onelake_relative_paths=True)
    else:
        file_paths = [location]
    self._console_logger.debug(f"Found {len(file_paths)} files to read")
    self._console_logger.debug(f"File paths: {file_paths}")
    assert spark_format is not None

    reader = self._get_reader().format(spark_format)
    if schema:
        reader.schema(schema)
    else:
        options["inferSchema"] = True

    self._console_logger.debug(f"Setting options: {options}")
    reader.options(**options)

    try:
        self._console_logger.debug("Loading files into DataFrame")
        df = reader.load([str(p) for p in file_paths])
        self._console_logger.debug("Successfully loaded files into DataFrame")
        if add_metadata_column:
            df = self._add_metadata_column(df)
    except Exception as e:
        self._console_logger.error(f"Failed to read files from [ '{location}' ]: {e}")
        raise
    else:
        self._console_logger.info(f"Successfully read files from [ '{location}' ]")
        return df

read_stream(location='', schema=None, format='delta', add_metadata_column=False, options=None, **_)

Reads specified location as a stream and returns streaming DataFrame.

Parameters:

Name Type Description Default
location str

Location of files to read.

''
format str

Format of files to read.

'delta'
schema StructType | str | None

Schema of the file.

None
add_metadata_column bool

Whether to include __metadata column in the DataFrame.

False
options dict[str, Any] | None

Spark DataFrame reader options.

None

Raises:

Type Description
ValueError

If location is not provided.

Returns:

Type Description
DataFrame

A Streaming DataFrame

Source code in src/cloe_nessy/integration/reader/file_reader.py
def read_stream(
    self,
    location: str = "",
    schema: StructType | str | None = None,
    format: str = "delta",
    add_metadata_column: bool = False,
    options: dict[str, Any] | None = None,
    **_: Any,
) -> DataFrame:
    """Reads specified location as a stream and returns streaming DataFrame.

    Arguments:
        location: Location of files to read.
        format: Format of files to read.
        schema: Schema of the file.
        add_metadata_column: Whether to include __metadata column in the DataFrame.
        options: Spark DataFrame reader options.

    Raises:
        ValueError: If location is not provided.

    Returns:
        A Streaming DataFrame
    """
    if not location:
        raise ValueError("Location is required for streaming.")
    self._console_logger.debug(f"Reading files from [ '{location}' ] ...")
    try:
        if options is None:
            options = {}
        reader = self._get_stream_reader()
        reader.format(format)
        reader.option("rescuedDataColumn", "_rescued_data")
        if schema is None:
            options["inferSchema"] = True
        else:
            reader.schema(schema)
        reader.options(**options)
        df = reader.load(location)
        if add_metadata_column:
            df = self._add_metadata_column(df)
    except Exception as e:
        self._console_logger.error(f"Failed to read files from [ '{location}' ]: {e}")
        raise
    else:
        self._console_logger.info(f"Successfully read files from [ '{location}' ]")
        return df

RequestSet

Bases: TypedDict

The format for dynamic requests.

Source code in src/cloe_nessy/integration/reader/api_reader.py
class RequestSet(TypedDict):
    """The format for dynamic requests."""

    endpoint: str
    params: dict[str, Any]
    headers: dict[str, Any] | None
    data: dict[str, Any] | None
    json_body: dict[str, Any] | None