Skip to content

HfQueryAPI

tfbpapi.HfQueryAPI.HfQueryAPI

Bases: HfCacheManager

Minimal Hugging Face API client focused on metadata retrieval.

Source code in tfbpapi/HfQueryAPI.py
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
class HfQueryAPI(HfCacheManager):
    """Minimal Hugging Face API client focused on metadata retrieval."""

    def __init__(
        self,
        repo_id: str,
        repo_type: Literal["model", "dataset", "space"] = "dataset",
        token: str | None = None,
        cache_dir: str | Path | None = None,
        duckdb_conn: duckdb.DuckDBPyConnection = duckdb.connect(":memory:"),
    ):
        """
        Initialize the minimal HF Query API client.

        :param repo_id: Repository identifier (e.g., "user/dataset")
        :param repo_type: Type of repository ("dataset", "model", "space")
        :param token: HuggingFace token for authentication
        :param cache_dir: HF cache_dir for downloads

        """
        self._duckdb_conn = duckdb_conn

        # Initialize parent with minimal setup
        super().__init__(
            repo_id=repo_id,
            duckdb_conn=self._duckdb_conn,
            token=token,
            logger=logging.getLogger(self.__class__.__name__),
        )

        # Store basic configuration
        self.repo_type = repo_type
        self.cache_dir = Path(cache_dir) if cache_dir is not None else CACHE_DIR

        # Filter storage system
        # dict structure:
        #   {config_name: "SQL WHERE clause", ...}
        self._table_filters: dict[str, str] = {}

    @property
    def cache_dir(self) -> Path:
        return self._cache_dir

    @cache_dir.setter
    def cache_dir(self, value: str | Path) -> None:
        """Set the cache directory for huggingface_hub downloads."""
        path = Path(value)
        if not path.exists():
            raise FileNotFoundError(f"Cache directory {path} does not exist")
        self._cache_dir = path

    def _get_explicit_metadata(self, config, table_name: str) -> pd.DataFrame:
        """Helper function to handle explicit metadata configurations."""
        sql = f"SELECT * FROM {table_name}"
        return self.duckdb_conn.execute(sql).fetchdf()

    def _get_embedded_metadata(self, config, table_name: str) -> pd.DataFrame:
        """Helper function to handle embedded metadata configurations."""
        if config.metadata_fields is None:
            raise ValueError(f"Config {config.config_name} has no metadata fields")
        fields = ", ".join(config.metadata_fields)
        where_clauses = " AND ".join(
            [f"{field} IS NOT NULL" for field in config.metadata_fields]
        )
        sql = f"""
            SELECT DISTINCT {fields}, COUNT(*) as count
            FROM {table_name}
            WHERE {where_clauses}
            GROUP BY {fields}
            ORDER BY count DESC
        """
        return self.duckdb_conn.execute(sql).fetchdf()

    def _validate_metadata_fields(
        self, config_name: str, field_names: list[str]
    ) -> None:
        """
        Validate that field names exist in the config's metadata columns.

        :param config_name: Configuration name to validate against
        :param field_names: List of field names to validate
        :raises InvalidFilterFieldError: If any fields don't exist in metadata
        """
        if not field_names:
            return

        try:
            metadata_df = self.get_metadata(config_name)
            if metadata_df.empty:
                raise InvalidFilterFieldError(
                    config_name=config_name,
                    invalid_fields=field_names,
                    available_fields=[],
                )

            available_fields = list(metadata_df.columns)
            invalid_fields = [
                field for field in field_names if field not in available_fields
            ]

            if invalid_fields:
                raise InvalidFilterFieldError(
                    config_name=config_name,
                    invalid_fields=invalid_fields,
                    available_fields=available_fields,
                )
        except Exception as e:
            if isinstance(e, InvalidFilterFieldError):
                raise
            # If metadata retrieval fails for other reasons, log warning but allow
            self.logger.warning(
                f"Could not validate filter fields for {config_name}: {e}"
            )

    def _extract_fields_from_sql(self, sql_where: str) -> list[str]:
        """
        Extract potential field names from SQL WHERE clause.

        Uses a more robust approach to identify column references while
        avoiding string literals used as values.

        :param sql_where: SQL WHERE clause (without 'WHERE' keyword)
        :return: List of potential field names found in the SQL
        """
        if not sql_where.strip():
            return []

        field_names = set()

        # Tokenize the SQL to better understand context
        # This regex splits on key tokens while preserving them
        tokens = re.findall(
            r"""
            \bIN\s*\([^)]+\)|                    # IN clauses with content
            \bBETWEEN\s+\S+\s+AND\s+\S+|        # BETWEEN clauses
            (?:'[^']*')|(?:"[^"]*")|             # Quoted strings
            \b(?:AND|OR|NOT|IS|NULL|LIKE|BETWEEN|IN)\b|  # SQL keywords
            [=!<>]+|                             # Comparison operators
            [(),]|                               # Delimiters
            \b[a-zA-Z_][a-zA-Z0-9_]*\b|          # Identifiers
            \S+                                  # Other tokens
        """,
            sql_where,
            re.VERBOSE | re.IGNORECASE,
        )

        # Track the context to determine if an identifier is a field name or value
        i = 0
        while i < len(tokens):
            token = tokens[i].strip()
            if not token:
                i += 1
                continue

            # Skip IN clauses entirely - they contain values, not field names
            if re.match(r"\bIN\s*\(", token, re.IGNORECASE):
                i += 1
                continue

            # Skip BETWEEN clauses entirely - they contain values, not field names
            if re.match(r"\bBETWEEN\b", token, re.IGNORECASE):
                i += 1
                continue

            # Handle quoted strings - could be identifiers or values depending on context
            if token.startswith(("'", '"')):
                # Extract the content inside quotes
                quoted_content = token[1:-1]

                # Find next significant token to determine context
                next_significant_token = None
                for j in range(i + 1, len(tokens)):
                    next_token = tokens[j].strip()
                    if next_token and next_token not in [" ", "\n", "\t"]:
                        next_significant_token = next_token
                        break

                # Check if this quoted string is a field name based on context
                is_quoted_field = False

                # Check what comes after this quoted string
                if next_significant_token:
                    # If followed by comparison operators or SQL keywords, it's a field name
                    if (
                        next_significant_token
                        in ["=", "!=", "<>", "<", ">", "<=", ">="]
                        or next_significant_token.upper() in ["IS", "LIKE", "NOT"]
                        or re.match(
                            r"\bBETWEEN\b", next_significant_token, re.IGNORECASE
                        )
                        or re.match(r"\bIN\s*\(", next_significant_token, re.IGNORECASE)
                    ):
                        is_quoted_field = True

                # Also check what comes before this quoted string
                if not is_quoted_field and i > 0:
                    # Find the previous significant token
                    prev_significant_token = None
                    for j in range(i - 1, -1, -1):
                        prev_token = tokens[j].strip()
                        if prev_token and prev_token not in [" ", "\n", "\t"]:
                            prev_significant_token = prev_token
                            break

                    # If preceded by a comparison operator, could be a field name
                    # But we need to be very careful not to treat string literals as field names
                    if prev_significant_token and prev_significant_token in [
                        "=",
                        "!=",
                        "<>",
                        "<",
                        ">",
                        "<=",
                        ">=",
                    ]:
                        # Only treat as field name if it looks like a database identifier
                        # AND doesn't look like a typical string value
                        if self._looks_like_identifier(
                            quoted_content
                        ) and self._looks_like_database_identifier(quoted_content):
                            is_quoted_field = True

                if is_quoted_field:
                    field_names.add(quoted_content)

                i += 1
                continue

            # Skip SQL keywords and operators
            if token.upper() in SQL_FILTER_KEYWORDS or token in [
                "=",
                "!=",
                "<>",
                "<",
                ">",
                "<=",
                ">=",
                "(",
                ")",
                ",",
            ]:
                i += 1
                continue

            # Skip numeric literals
            if re.match(r"^-?\d+(\.\d+)?$", token):
                i += 1
                continue

            # Check if this looks like an identifier (field name)
            if re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", token):
                # Check the context - if the next non-whitespace token is a comparison operator,
                # then this is likely a field name
                next_significant_token = None
                for j in range(i + 1, len(tokens)):
                    next_token = tokens[j].strip()
                    if next_token and next_token not in [" ", "\n", "\t"]:
                        next_significant_token = next_token
                        break

                # Check if followed by a comparison operator or SQL keyword that indicates a field
                is_field = False

                if next_significant_token:
                    # Direct comparison operators
                    if next_significant_token in [
                        "=",
                        "!=",
                        "<>",
                        "<",
                        ">",
                        "<=",
                        ">=",
                    ]:
                        is_field = True
                    # SQL keywords that follow field names
                    elif next_significant_token.upper() in ["IS", "LIKE", "NOT"]:
                        is_field = True
                    # BETWEEN clause (could be just 'BETWEEN' or 'BETWEEN ... AND ...')
                    elif next_significant_token.upper() == "BETWEEN" or re.match(
                        r"\bBETWEEN\b", next_significant_token, re.IGNORECASE
                    ):
                        is_field = True
                    # IN clause (could be just 'IN' or 'IN (...)')
                    elif next_significant_token.upper() == "IN" or re.match(
                        r"\bIN\s*\(", next_significant_token, re.IGNORECASE
                    ):
                        is_field = True

                # If not a field yet, check other contexts
                if not is_field and i > 0:
                    # Find the previous significant token
                    prev_significant_token = None
                    for j in range(i - 1, -1, -1):
                        prev_token = tokens[j].strip()
                        if prev_token and prev_token not in [" ", "\n", "\t"]:
                            prev_significant_token = prev_token
                            break

                    # Case 1: After AND/OR and before an operator (original logic)
                    if (
                        prev_significant_token
                        and prev_significant_token.upper() in ["AND", "OR"]
                        and next_significant_token
                    ):
                        # Same checks as above
                        if next_significant_token in [
                            "=",
                            "!=",
                            "<>",
                            "<",
                            ">",
                            "<=",
                            ">=",
                        ]:
                            is_field = True
                        elif next_significant_token.upper() in ["IS", "LIKE", "NOT"]:
                            is_field = True
                        elif next_significant_token.upper() == "BETWEEN" or re.match(
                            r"\bBETWEEN\b", next_significant_token, re.IGNORECASE
                        ):
                            is_field = True
                        elif next_significant_token.upper() == "IN" or re.match(
                            r"\bIN\s*\(", next_significant_token, re.IGNORECASE
                        ):
                            is_field = True

                    # Case 2: After a comparison operator (second operand)
                    elif prev_significant_token and prev_significant_token in [
                        "=",
                        "!=",
                        "<>",
                        "<",
                        ">",
                        "<=",
                        ">=",
                    ]:
                        # But exclude function names (identifiers followed by '(')
                        if next_significant_token != "(":
                            is_field = True

                    # Case 3: After opening parenthesis (function parameter)
                    elif prev_significant_token == "(":
                        is_field = True

                if is_field:
                    field_names.add(token)

            i += 1

        return list(field_names)

    def _looks_like_identifier(self, content: str) -> bool:
        """
        Determine if quoted content looks like an identifier rather than a string literal.

        :param content: The content inside quotes
        :return: True if it looks like an identifier, False if it looks like a string literal
        """
        if not content:
            return False

        # Basic identifier pattern: starts with letter/underscore, contains only alphanumeric/underscore
        if re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", content):
            return True

        # Extended identifier pattern: could contain spaces if it's a column name like "quoted field"
        # but not if it contains many special characters or looks like natural language
        if " " in content:
            # If it contains spaces, it should still look identifier-like
            # Allow simple cases like "quoted field" but not "this is a long string value"
            words = content.split()
            if len(words) <= 3 and all(
                re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", word) for word in words
            ):
                return True
            return False

        return False

    def _looks_like_database_identifier(self, content: str) -> bool:
        """
        Determine if content looks like a database identifier (field/table name).

        This is more strict than _looks_like_identifier and helps distinguish
        between quoted identifiers like "field_name" and string values like "value1".

        :param content: The content to check
        :return: True if it looks like a database identifier
        """
        if not content:
            return False

        # Database identifiers typically:
        # 1. Don't start with numbers (field names rarely start with numbers)
        # 2. Often contain underscores or descriptive words
        # 3. Don't look like simple values

        # Reject if starts with a number (like "value1", "123abc")
        if content[0].isdigit():
            return False

        # Handle short simple values that could be literals or field names
        if len(content) <= 6 and re.match(r"^[a-z]+\d*$", content.lower()):
            # Allow common field name prefixes
            field_prefixes = ["field", "col", "column", "attr", "prop"]
            if any(content.lower().startswith(prefix) for prefix in field_prefixes):
                return True  # It's a valid field name like "field1", "col2"
            else:
                return False  # It's likely a simple value like "value", "test"

        # Accept if it contains underscore (common in field names)
        if "_" in content:
            return True

        # Accept if it has multiple words (like "quoted field")
        if " " in content:
            return True

        # Accept if it's a longer descriptive name
        if len(content) > 8:
            return True

        # Reject otherwise (likely a simple value)
        return False

    def get_metadata(
        self, config_name: str, refresh_cache: bool = False
    ) -> pd.DataFrame:
        """
        Retrieve metadata as a DataFrame with actual metadata values for a specific config.

        Supports three types of metadata retrieval:
        1. Direct metadata configs: config_name is itself a metadata config
        2. Embedded metadata: config_name has metadata_fields defined
        3. Applied metadata: config_name appears in another metadata config's applies_to list

        For explicit metadata configs (types 1 & 3), returns all rows from metadata table.
        For embedded metadata (type 2), returns distinct combinations of metadata fields.

        :param config_name: Specific config name to retrieve metadata for
        :param refresh_cache: If True, force refresh from remote instead of using cache
        :return: DataFrame with metadata values for the specified config
        :raises ValueError: If config_name has no associated metadata
        :raises RuntimeError: If data loading fails for the config
        """
        # Get metadata relationships for this config
        relationships = self.get_metadata_relationships(refresh_cache=refresh_cache)

        relevant_relationships = None

        # First priority: data_config matches (config_name is a data config with metadata)
        data_config_matches = [r for r in relationships if r.data_config == config_name]

        if data_config_matches:
            relevant_relationships = data_config_matches
        else:
            # Second priority: metadata_config matches (config_name is itself a metadata config)
            metadata_config_matches = [
                r for r in relationships if r.metadata_config == config_name
            ]
            relevant_relationships = metadata_config_matches

        if not relevant_relationships:
            # Check what configs are available for helpful error message
            all_data_configs = {r.data_config for r in relationships}
            all_metadata_configs = {r.metadata_config for r in relationships}
            all_available = sorted(all_data_configs | all_metadata_configs)

            if not all_available:
                return pd.DataFrame()

            raise ValueError(
                f"Config '{config_name}' not found. "
                f"Available configs with metadata: {all_available}"
            )

        # Get the config object to process
        # For explicit relationships, use the metadata config
        # For embedded relationships, use the data config
        relationship = relevant_relationships[0]  # Use first relationship found

        if relationship.relationship_type == "explicit":
            # Find the metadata config
            if relationship.metadata_config == config_name:
                # config_name is itself a metadata config
                config = self.get_config(config_name)
            else:
                # config_name is a data config with metadata applied to it
                config = self.get_config(relationship.metadata_config)
        else:  # embedded
            # config_name is a data config with embedded metadata
            config = self.get_config(config_name)

        if not config:
            raise ValueError(f"Could not find config object for '{config_name}'")

        # Process the single configuration
        config_result = self._get_metadata_for_config(
            config, force_refresh=refresh_cache
        )

        if not config_result.get("success", False):
            raise RuntimeError(f"Failed to load data for config {config.config_name}")

        table_name = config_result.get("table_name")
        if not table_name:
            raise RuntimeError(f"No table name for config {config.config_name}")

        try:
            if relationship.relationship_type == "explicit":
                return self._get_explicit_metadata(config, table_name)
            else:  # embedded
                return self._get_embedded_metadata(config, table_name)
        except Exception as e:
            self.logger.error(f"Error querying metadata for {config.config_name}: {e}")
            raise

    def set_filter(self, config_name: str, **kwargs) -> None:
        """
        Set simple filters using keyword arguments.

        Converts keyword arguments to SQL WHERE clause and stores
        for automatic application. Validates that all filter fields
        exist in the config's metadata columns.

        :param config_name: Configuration name to apply filters to
        :param kwargs: Filter conditions as keyword arguments
            (e.g., time=15, mechanism="ZEV")
        :raises InvalidFilterFieldError: If any filter field doesn't exist
            in the metadata columns

        Example:
            api.set_filter("hackett_2020", time=15, mechanism="ZEV", restriction="P")
            # Equivalent to: WHERE time = 15 AND mechanism = 'ZEV' AND restriction = 'P'

        """
        if not kwargs:
            # If no kwargs provided, clear the filter
            self.clear_filter(config_name)
            return

        # Validate that all filter fields exist in metadata columns
        self._validate_metadata_fields(config_name, list(kwargs.keys()))

        # Convert kwargs to SQL WHERE clause
        conditions = []
        for key, value in kwargs.items():
            if isinstance(value, str):
                # String values need quotes
                conditions.append(f"{key} = '{value}'")
            elif value is None:
                # Handle NULL values
                conditions.append(f"{key} IS NULL")
            else:
                # Numeric/boolean values
                conditions.append(f"{key} = {value}")

        where_clause = " AND ".join(conditions)
        self._table_filters[config_name] = where_clause
        self.logger.info(f"Set filter for {config_name}: {where_clause}")

    def set_sql_filter(
        self, config_name: str, sql_where: str, validate_fields: bool = True
    ) -> None:
        """
        Set complex filters using SQL WHERE clause.

        Stores raw SQL WHERE clause for automatic application to queries.
        Validates that field references in the SQL exist in metadata columns
        unless validation is disabled.

        :param config_name: Configuration name to apply filters to
        :param sql_where: SQL WHERE clause (without the 'WHERE' keyword)
        :param validate_fields: Whether to validate field names (default: True)
        :raises InvalidFilterFieldError: If any field references don't exist
            in the metadata columns (when validate_fields=True)

        Example:
            api.set_sql_filter("hackett_2020", "time IN (15, 30) AND mechanism = 'ZEV'")
            # To skip validation for complex SQL:
            api.set_sql_filter("hackett_2020", "complex_expression(...)", validate_fields=False)

        """
        if not sql_where.strip():
            self.clear_filter(config_name)
            return

        # Validate fields if requested
        if validate_fields:
            extracted_fields = self._extract_fields_from_sql(sql_where)
            self._validate_metadata_fields(config_name, extracted_fields)

        self._table_filters[config_name] = sql_where.strip()
        self.logger.info(f"Set SQL filter for {config_name}: {sql_where}")

    def clear_filter(self, config_name: str) -> None:
        """
        Remove all filters for the specified configuration.

        :param config_name: Configuration name to clear filters for

        """
        if config_name in self._table_filters:
            del self._table_filters[config_name]
            self.logger.info(f"Cleared filter for {config_name}")

    def get_current_filter(self, config_name: str) -> str | None:
        """
        Get the current filter for the specified configuration.

        :param config_name: Configuration name to get filter for
        :return: Current SQL WHERE clause or None if no filter set

        """
        return self._table_filters.get(config_name)

    def query(
        self, sql: str, config_name: str, refresh_cache: bool = False
    ) -> pd.DataFrame:
        """
        Execute SQL query with automatic filter application.

        Loads the specified configuration, applies any stored filters,
        and executes the query.

        :param sql: SQL query to execute
        :param config_name: Configuration name to query (table will be loaded if needed)
        :param refresh_cache: If True, force refresh from remote instead of using cache
        :return: DataFrame with query results
        :raises ValueError: If config_name not found or query fails

        Example:
            api.set_filter("hackett_2020", time=15, mechanism="ZEV")
            df = api.query("SELECT regulator_locus_tag, target_locus_tag
                FROM hackett_2020", "hackett_2020")
            # Automatically applies: WHERE time = 15 AND mechanism = 'ZEV'

        """
        # Validate config exists
        if config_name not in [c.config_name for c in self.configs]:
            available_configs = [c.config_name for c in self.configs]
            raise ValueError(
                f"Config '{config_name}' not found. "
                f"Available configs: {available_configs}"
            )

        # Load the configuration data
        config = self.get_config(config_name)
        if not config:
            raise ValueError(f"Could not retrieve config '{config_name}'")

        config_result = self._get_metadata_for_config(
            config, force_refresh=refresh_cache
        )
        if not config_result.get("success", False):
            raise ValueError(
                f"Failed to load data for config '{config_name}': "
                f"{config_result.get('message', 'Unknown error')}"
            )

        table_name = config_result.get("table_name")
        if not table_name:
            raise ValueError(f"No table available for config '{config_name}'")

        # Replace config name with actual table name in SQL for user convenience
        sql_with_table = sql.replace(config_name, table_name)

        # Apply stored filters
        final_sql = self._apply_filter_to_sql(sql_with_table, config_name)

        try:
            return self.duckdb_conn.execute(final_sql).fetchdf()
        except Exception as e:
            self.logger.error(f"Query execution failed: {e}")
            self.logger.error(f"Final SQL: {final_sql}")
            raise ValueError(f"Query execution failed: {e}") from e

    def _apply_filter_to_sql(self, sql: str, config_name: str) -> str:
        """
        Apply stored filters to SQL query.

        Modifies the SQL query to include stored WHERE clause filters.

        :param sql: Original SQL query
        :param config_name: Configuration name to get filters for
        :return: Modified SQL query with filters applied

        """
        if config_name not in self._table_filters:
            return sql

        filter_clause = self._table_filters[config_name]
        sql_upper = sql.upper()

        if "WHERE" in sql_upper:
            # SQL already has WHERE clause, append with AND
            return f"{sql} AND ({filter_clause})"
        else:
            # Add WHERE clause
            # Find the position to insert WHERE (before ORDER BY, GROUP BY, LIMIT, etc.)
            insert_keywords = ["ORDER BY", "GROUP BY", "HAVING", "LIMIT", "OFFSET"]
            insert_position = len(sql)

            for keyword in insert_keywords:
                pos = sql_upper.find(keyword)
                if pos != -1 and pos < insert_position:
                    insert_position = pos

            if insert_position == len(sql):
                # No special clauses, append WHERE at the end
                return f"{sql} WHERE {filter_clause}"
            else:
                # Insert WHERE before the special clause
                return (
                    f"{sql[:insert_position].rstrip()} "
                    f"WHERE {filter_clause} {sql[insert_position:]}"
                )

__init__(repo_id, repo_type='dataset', token=None, cache_dir=None, duckdb_conn=duckdb.connect(':memory:'))

Initialize the minimal HF Query API client.

Parameters:

Name Type Description Default
repo_id str

Repository identifier (e.g., “user/dataset”)

required
repo_type Literal['model', 'dataset', 'space']

Type of repository (“dataset”, “model”, “space”)

'dataset'
token str | None

HuggingFace token for authentication

None
cache_dir str | Path | None

HF cache_dir for downloads

None
Source code in tfbpapi/HfQueryAPI.py
def __init__(
    self,
    repo_id: str,
    repo_type: Literal["model", "dataset", "space"] = "dataset",
    token: str | None = None,
    cache_dir: str | Path | None = None,
    duckdb_conn: duckdb.DuckDBPyConnection = duckdb.connect(":memory:"),
):
    """
    Initialize the minimal HF Query API client.

    :param repo_id: Repository identifier (e.g., "user/dataset")
    :param repo_type: Type of repository ("dataset", "model", "space")
    :param token: HuggingFace token for authentication
    :param cache_dir: HF cache_dir for downloads

    """
    self._duckdb_conn = duckdb_conn

    # Initialize parent with minimal setup
    super().__init__(
        repo_id=repo_id,
        duckdb_conn=self._duckdb_conn,
        token=token,
        logger=logging.getLogger(self.__class__.__name__),
    )

    # Store basic configuration
    self.repo_type = repo_type
    self.cache_dir = Path(cache_dir) if cache_dir is not None else CACHE_DIR

    # Filter storage system
    # dict structure:
    #   {config_name: "SQL WHERE clause", ...}
    self._table_filters: dict[str, str] = {}

clear_filter(config_name)

Remove all filters for the specified configuration.

Parameters:

Name Type Description Default
config_name str

Configuration name to clear filters for

required
Source code in tfbpapi/HfQueryAPI.py
def clear_filter(self, config_name: str) -> None:
    """
    Remove all filters for the specified configuration.

    :param config_name: Configuration name to clear filters for

    """
    if config_name in self._table_filters:
        del self._table_filters[config_name]
        self.logger.info(f"Cleared filter for {config_name}")

get_current_filter(config_name)

Get the current filter for the specified configuration.

Parameters:

Name Type Description Default
config_name str

Configuration name to get filter for

required

Returns:

Type Description
str | None

Current SQL WHERE clause or None if no filter set

Source code in tfbpapi/HfQueryAPI.py
def get_current_filter(self, config_name: str) -> str | None:
    """
    Get the current filter for the specified configuration.

    :param config_name: Configuration name to get filter for
    :return: Current SQL WHERE clause or None if no filter set

    """
    return self._table_filters.get(config_name)

get_metadata(config_name, refresh_cache=False)

Retrieve metadata as a DataFrame with actual metadata values for a specific config.

Supports three types of metadata retrieval: 1. Direct metadata configs: config_name is itself a metadata config 2. Embedded metadata: config_name has metadata_fields defined 3. Applied metadata: config_name appears in another metadata config’s applies_to list

For explicit metadata configs (types 1 & 3), returns all rows from metadata table. For embedded metadata (type 2), returns distinct combinations of metadata fields.

Parameters:

Name Type Description Default
config_name str

Specific config name to retrieve metadata for

required
refresh_cache bool

If True, force refresh from remote instead of using cache

False

Returns:

Type Description
DataFrame

DataFrame with metadata values for the specified config

Raises:

Type Description
ValueError

If config_name has no associated metadata

RuntimeError

If data loading fails for the config

Source code in tfbpapi/HfQueryAPI.py
def get_metadata(
    self, config_name: str, refresh_cache: bool = False
) -> pd.DataFrame:
    """
    Retrieve metadata as a DataFrame with actual metadata values for a specific config.

    Supports three types of metadata retrieval:
    1. Direct metadata configs: config_name is itself a metadata config
    2. Embedded metadata: config_name has metadata_fields defined
    3. Applied metadata: config_name appears in another metadata config's applies_to list

    For explicit metadata configs (types 1 & 3), returns all rows from metadata table.
    For embedded metadata (type 2), returns distinct combinations of metadata fields.

    :param config_name: Specific config name to retrieve metadata for
    :param refresh_cache: If True, force refresh from remote instead of using cache
    :return: DataFrame with metadata values for the specified config
    :raises ValueError: If config_name has no associated metadata
    :raises RuntimeError: If data loading fails for the config
    """
    # Get metadata relationships for this config
    relationships = self.get_metadata_relationships(refresh_cache=refresh_cache)

    relevant_relationships = None

    # First priority: data_config matches (config_name is a data config with metadata)
    data_config_matches = [r for r in relationships if r.data_config == config_name]

    if data_config_matches:
        relevant_relationships = data_config_matches
    else:
        # Second priority: metadata_config matches (config_name is itself a metadata config)
        metadata_config_matches = [
            r for r in relationships if r.metadata_config == config_name
        ]
        relevant_relationships = metadata_config_matches

    if not relevant_relationships:
        # Check what configs are available for helpful error message
        all_data_configs = {r.data_config for r in relationships}
        all_metadata_configs = {r.metadata_config for r in relationships}
        all_available = sorted(all_data_configs | all_metadata_configs)

        if not all_available:
            return pd.DataFrame()

        raise ValueError(
            f"Config '{config_name}' not found. "
            f"Available configs with metadata: {all_available}"
        )

    # Get the config object to process
    # For explicit relationships, use the metadata config
    # For embedded relationships, use the data config
    relationship = relevant_relationships[0]  # Use first relationship found

    if relationship.relationship_type == "explicit":
        # Find the metadata config
        if relationship.metadata_config == config_name:
            # config_name is itself a metadata config
            config = self.get_config(config_name)
        else:
            # config_name is a data config with metadata applied to it
            config = self.get_config(relationship.metadata_config)
    else:  # embedded
        # config_name is a data config with embedded metadata
        config = self.get_config(config_name)

    if not config:
        raise ValueError(f"Could not find config object for '{config_name}'")

    # Process the single configuration
    config_result = self._get_metadata_for_config(
        config, force_refresh=refresh_cache
    )

    if not config_result.get("success", False):
        raise RuntimeError(f"Failed to load data for config {config.config_name}")

    table_name = config_result.get("table_name")
    if not table_name:
        raise RuntimeError(f"No table name for config {config.config_name}")

    try:
        if relationship.relationship_type == "explicit":
            return self._get_explicit_metadata(config, table_name)
        else:  # embedded
            return self._get_embedded_metadata(config, table_name)
    except Exception as e:
        self.logger.error(f"Error querying metadata for {config.config_name}: {e}")
        raise

query(sql, config_name, refresh_cache=False)

Execute SQL query with automatic filter application.

Loads the specified configuration, applies any stored filters, and executes the query.

Parameters:

Name Type Description Default
sql str

SQL query to execute

required
config_name str

Configuration name to query (table will be loaded if needed)

required
refresh_cache bool

If True, force refresh from remote instead of using cache

False

Returns:

Type Description
DataFrame

DataFrame with query results

Raises:

Type Description
ValueError

If config_name not found or query fails Example: api.set_filter(“hackett_2020”, time=15, mechanism=”ZEV”) df = api.query(“SELECT regulator_locus_tag, target_locus_tag FROM hackett_2020”, “hackett_2020”) # Automatically applies: WHERE time = 15 AND mechanism = ‘ZEV’

Source code in tfbpapi/HfQueryAPI.py
def query(
    self, sql: str, config_name: str, refresh_cache: bool = False
) -> pd.DataFrame:
    """
    Execute SQL query with automatic filter application.

    Loads the specified configuration, applies any stored filters,
    and executes the query.

    :param sql: SQL query to execute
    :param config_name: Configuration name to query (table will be loaded if needed)
    :param refresh_cache: If True, force refresh from remote instead of using cache
    :return: DataFrame with query results
    :raises ValueError: If config_name not found or query fails

    Example:
        api.set_filter("hackett_2020", time=15, mechanism="ZEV")
        df = api.query("SELECT regulator_locus_tag, target_locus_tag
            FROM hackett_2020", "hackett_2020")
        # Automatically applies: WHERE time = 15 AND mechanism = 'ZEV'

    """
    # Validate config exists
    if config_name not in [c.config_name for c in self.configs]:
        available_configs = [c.config_name for c in self.configs]
        raise ValueError(
            f"Config '{config_name}' not found. "
            f"Available configs: {available_configs}"
        )

    # Load the configuration data
    config = self.get_config(config_name)
    if not config:
        raise ValueError(f"Could not retrieve config '{config_name}'")

    config_result = self._get_metadata_for_config(
        config, force_refresh=refresh_cache
    )
    if not config_result.get("success", False):
        raise ValueError(
            f"Failed to load data for config '{config_name}': "
            f"{config_result.get('message', 'Unknown error')}"
        )

    table_name = config_result.get("table_name")
    if not table_name:
        raise ValueError(f"No table available for config '{config_name}'")

    # Replace config name with actual table name in SQL for user convenience
    sql_with_table = sql.replace(config_name, table_name)

    # Apply stored filters
    final_sql = self._apply_filter_to_sql(sql_with_table, config_name)

    try:
        return self.duckdb_conn.execute(final_sql).fetchdf()
    except Exception as e:
        self.logger.error(f"Query execution failed: {e}")
        self.logger.error(f"Final SQL: {final_sql}")
        raise ValueError(f"Query execution failed: {e}") from e

set_filter(config_name, **kwargs)

Set simple filters using keyword arguments.

Converts keyword arguments to SQL WHERE clause and stores for automatic application. Validates that all filter fields exist in the config’s metadata columns.

Parameters:

Name Type Description Default
config_name str

Configuration name to apply filters to

required
kwargs

Filter conditions as keyword arguments (e.g., time=15, mechanism=”ZEV”)

{}

Raises:

Type Description
InvalidFilterFieldError

If any filter field doesn’t exist in the metadata columns Example: api.set_filter(“hackett_2020”, time=15, mechanism=”ZEV”, restriction=”P”) # Equivalent to: WHERE time = 15 AND mechanism = ‘ZEV’ AND restriction = ‘P’

Source code in tfbpapi/HfQueryAPI.py
def set_filter(self, config_name: str, **kwargs) -> None:
    """
    Set simple filters using keyword arguments.

    Converts keyword arguments to SQL WHERE clause and stores
    for automatic application. Validates that all filter fields
    exist in the config's metadata columns.

    :param config_name: Configuration name to apply filters to
    :param kwargs: Filter conditions as keyword arguments
        (e.g., time=15, mechanism="ZEV")
    :raises InvalidFilterFieldError: If any filter field doesn't exist
        in the metadata columns

    Example:
        api.set_filter("hackett_2020", time=15, mechanism="ZEV", restriction="P")
        # Equivalent to: WHERE time = 15 AND mechanism = 'ZEV' AND restriction = 'P'

    """
    if not kwargs:
        # If no kwargs provided, clear the filter
        self.clear_filter(config_name)
        return

    # Validate that all filter fields exist in metadata columns
    self._validate_metadata_fields(config_name, list(kwargs.keys()))

    # Convert kwargs to SQL WHERE clause
    conditions = []
    for key, value in kwargs.items():
        if isinstance(value, str):
            # String values need quotes
            conditions.append(f"{key} = '{value}'")
        elif value is None:
            # Handle NULL values
            conditions.append(f"{key} IS NULL")
        else:
            # Numeric/boolean values
            conditions.append(f"{key} = {value}")

    where_clause = " AND ".join(conditions)
    self._table_filters[config_name] = where_clause
    self.logger.info(f"Set filter for {config_name}: {where_clause}")

set_sql_filter(config_name, sql_where, validate_fields=True)

Set complex filters using SQL WHERE clause.

Stores raw SQL WHERE clause for automatic application to queries. Validates that field references in the SQL exist in metadata columns unless validation is disabled.

Parameters:

Name Type Description Default
config_name str

Configuration name to apply filters to

required
sql_where str

SQL WHERE clause (without the ‘WHERE’ keyword)

required
validate_fields bool

Whether to validate field names (default: True)

True

Raises:

Type Description
InvalidFilterFieldError

If any field references don’t exist in the metadata columns (when validate_fields=True) Example: api.set_sql_filter(“hackett_2020”, “time IN (15, 30) AND mechanism = ‘ZEV’”) # To skip validation for complex SQL: api.set_sql_filter(“hackett_2020”, “complex_expression(…)”, validate_fields=False)

Source code in tfbpapi/HfQueryAPI.py
def set_sql_filter(
    self, config_name: str, sql_where: str, validate_fields: bool = True
) -> None:
    """
    Set complex filters using SQL WHERE clause.

    Stores raw SQL WHERE clause for automatic application to queries.
    Validates that field references in the SQL exist in metadata columns
    unless validation is disabled.

    :param config_name: Configuration name to apply filters to
    :param sql_where: SQL WHERE clause (without the 'WHERE' keyword)
    :param validate_fields: Whether to validate field names (default: True)
    :raises InvalidFilterFieldError: If any field references don't exist
        in the metadata columns (when validate_fields=True)

    Example:
        api.set_sql_filter("hackett_2020", "time IN (15, 30) AND mechanism = 'ZEV'")
        # To skip validation for complex SQL:
        api.set_sql_filter("hackett_2020", "complex_expression(...)", validate_fields=False)

    """
    if not sql_where.strip():
        self.clear_filter(config_name)
        return

    # Validate fields if requested
    if validate_fields:
        extracted_fields = self._extract_fields_from_sql(sql_where)
        self._validate_metadata_fields(config_name, extracted_fields)

    self._table_filters[config_name] = sql_where.strip()
    self.logger.info(f"Set SQL filter for {config_name}: {sql_where}")