Skip to content

change_reports

CSV/HTML report generation and track data management for music library operations.

Handles change reports (genres, years, naming), track list persistence with synchronization, and HTML analytics with performance metrics. Uses CacheServiceProtocol for album year caching.

ChangeType

Enumeration of change types.

Key

Enumeration of key names for CSV fields.

Format

Enumeration of formatting constants.

Misc

Enumeration of miscellaneous constants.

save_html_report

save_html_report(
    events,
    call_counts,
    success_counts,
    decorator_overhead,
    config,
    console_logger=None,
    error_logger=None,
    group_successful_short_calls=False,
    force_mode=False,
)

Generate an HTML report from the provided analytics data.

Source code in src/metrics/html_reports.py
def save_html_report(
    events: list[dict[str, Any]],
    call_counts: dict[str, int],
    success_counts: dict[str, int],
    decorator_overhead: dict[str, float],
    config: AppConfig,
    console_logger: logging.Logger | None = None,
    error_logger: logging.Logger | None = None,
    group_successful_short_calls: bool = False,
    force_mode: bool = False,
) -> None:
    """Generate an HTML report from the provided analytics data."""
    if console_logger is None:
        console_logger = logging.getLogger("console_logger")
    if error_logger is None:
        error_logger = logging.getLogger("error_logger")

    # Configuration and setup
    console_logger.info(
        "Starting HTML report generation with %d events, %d function counts",
        len(events),
        len(call_counts),
    )
    date_str = datetime.now(UTC).strftime("%Y-%m-%d")

    logs_base_dir = config.logs_base_dir
    thresholds = config.analytics.duration_thresholds
    duration_thresholds = {
        "short_max": thresholds.short_max,
        "medium_max": thresholds.medium_max,
        "long_max": thresholds.long_max,
    }

    reports_dir = Path(logs_base_dir) / "analytics"
    reports_dir.mkdir(parents=True, exist_ok=True)

    report_file = get_full_log_path(
        config,
        "analytics_html_report_file",
        str(Path("analytics") / ("analytics_full.html" if force_mode else "analytics_incremental.html")),
    )

    # Check for empty data
    if not events and not call_counts:
        console_logger.warning(
            "No analytics data available for report - creating empty template",
        )
        generate_empty_html_template(date_str, report_file, console_logger, error_logger)
        return

    # Group events
    grouped_short_success, big_or_fail_events = group_events_by_duration_and_success(
        events, duration_thresholds, group_successful_short_calls, error_logger
    )

    # Generate HTML sections
    html_content = _generate_main_html_template(date_str, call_counts, success_counts, events, force_mode)
    html_content += generate_grouped_success_table(grouped_short_success, group_successful_short_calls)
    html_content += _generate_detailed_events_table_html(big_or_fail_events, duration_thresholds, error_logger)
    html_content += generate_summary_table_html(call_counts, success_counts, decorator_overhead)

    # Save the report
    try:
        Path(report_file).parent.mkdir(parents=True, exist_ok=True)
        with Path(report_file).open("w", encoding="utf-8") as file:
            file.write(html_content)
        console_logger.info("Analytics HTML report saved to %s.", report_file)
    except (OSError, UnicodeError):
        error_logger.exception("Failed to save HTML report")

load_track_list

load_track_list(csv_path)

Load the track list from the CSV file into a dictionary.

The track ID is used as the key. Reads columns: id, name, artist, album, genre, date_added, track_status, year_before_mgu, year_set_by_mgu.

Parameters:

Name Type Description Default
csv_path str

Path to the CSV file.

required

Returns:

Type Description
dict[str, TrackDict]

Dictionary of track dictionaries.

Source code in src/metrics/track_sync.py
def load_track_list(csv_path: str) -> dict[str, TrackDict]:
    """Load the track list from the CSV file into a dictionary.

    The track ID is used as the key.
    Reads columns: id, name, artist, album, genre, date_added, track_status, year_before_mgu, year_set_by_mgu.

    Args:
        csv_path: Path to the CSV file.

    Returns:
        Dictionary of track dictionaries.

    """
    # Import here to avoid circular imports

    track_map: dict[str, TrackDict] = {}
    if not Path(csv_path).exists():
        return track_map

    logger = logging.getLogger("console_logger")
    expected_fieldnames = TRACK_FIELDNAMES

    try:
        with Path(csv_path).open(encoding="utf-8") as f:
            reader = csv.DictReader(f)
            fields_to_read = validate_csv_header(reader, expected_fieldnames, csv_path, logger)
            if not fields_to_read:
                return track_map

            for row in reader:
                track = create_track_from_row(row, fields_to_read, expected_fieldnames)
                if track and track.id:
                    track_map[track.id] = track

        logger.info("Loaded %d tracks from track_list.csv.", len(track_map))
    except (OSError, UnicodeError, csv.Error):
        logger.exception("Could not read CSV file: %s", csv_path)
    return track_map

save_track_map_to_csv

save_track_map_to_csv(
    track_map, csv_path, console_logger, error_logger
)

Persist the provided track map to CSV using standard field ordering.

Source code in src/metrics/track_sync.py
def save_track_map_to_csv(
    track_map: dict[str, TrackDict],
    csv_path: str,
    console_logger: logging.Logger,
    error_logger: logging.Logger,
) -> None:
    """Persist the provided track map to CSV using standard field ordering."""
    sorted_tracks = sorted(track_map.values(), key=lambda t: t.id)
    track_dicts = [convert_track_to_csv_dict(track) for track in sorted_tracks]
    fieldnames = TRACK_FIELDNAMES
    save_csv(track_dicts, fieldnames, csv_path, console_logger, error_logger, "tracks")

sync_track_list_with_current async

sync_track_list_with_current(
    all_tracks,
    csv_path,
    cache_service,
    console_logger,
    error_logger,
    partial_sync=False,
    applescript_client=None,
)

Synchronize the current track list with the data in a CSV file.

Parameters:

Name Type Description Default
all_tracks Sequence[TrackDict]

List of track dictionaries to sync.

required
csv_path str

Path to the CSV file.

required
cache_service CacheServiceProtocol

Cache service protocol for album year caching.

required
console_logger Logger

Logger for console output.

required
error_logger Logger

Logger for error output.

required
partial_sync bool

Whether to perform a partial sync (only update year_set_by_mgu if missing).

False
applescript_client AppleScriptClientProtocol | None

AppleScript client for fetching missing track fields.

None
Source code in src/metrics/track_sync.py
async def sync_track_list_with_current(
    all_tracks: Sequence[TrackDict],
    csv_path: str,
    cache_service: CacheServiceProtocol,
    console_logger: logging.Logger,
    error_logger: logging.Logger,
    partial_sync: bool = False,
    applescript_client: AppleScriptClientProtocol | None = None,
) -> None:
    """Synchronize the current track list with the data in a CSV file.

    Args:
        all_tracks: List of track dictionaries to sync.
        csv_path: Path to the CSV file.
        cache_service: Cache service protocol for album year caching.
        console_logger: Logger for console output.
        error_logger: Logger for error output.
        partial_sync: Whether to perform a partial sync (only update year_set_by_mgu if missing).
        applescript_client: AppleScript client for fetching missing track fields.

    """
    console_logger.info(
        "Starting sync: fetched %s tracks; CSV file: %s",
        len(all_tracks),
        csv_path,
    )

    # 1. Load existing CSV as dict
    csv_map = load_track_list(csv_path)

    # 2. Determine albums already processed (for partial sync logic)
    processed_albums = get_processed_albums_from_csv(csv_map, cache_service)

    # 3. Build map of tracks fetched from Music.app
    musicapp_tracks = await build_musicapp_track_map(
        all_tracks,
        processed_albums,
        cache_service,
        partial_sync,
        error_logger,
    )

    # 4. Merge Music.app tracks into CSV
    added_or_updated = merge_musicapp_into_csv(musicapp_tracks, csv_map)
    console_logger.info("Added/Updated %s tracks in CSV.", added_or_updated)

    # 5. Remove tracks from CSV that no longer exist in Music.app
    removed_count = len([tid for tid in csv_map if tid not in musicapp_tracks])
    csv_map = {tid: track for tid, track in csv_map.items() if tid in musicapp_tracks}

    if removed_count > 0:
        console_logger.info(
            "Removed %s tracks from CSV that no longer exist in Music.app",
            removed_count,
        )

    # Generate the final list from the updated csv_map and write to CSV
    final_list = list(csv_map.values())
    console_logger.info("Final CSV track count after sync: %s", len(final_list))

    # Define the fieldnames for the output CSV file
    fieldnames = TRACK_FIELDNAMES

    # Convert TrackDict to dict[str, str] for save_csv with proper field mapping
    track_dicts: list[dict[str, str]] = []
    missing_fields_count = 0

    # Fetch missing track fields via AppleScript if needed
    tracks_cache = await fetch_missing_track_fields_for_sync(final_list, applescript_client, console_logger)

    # Process tracks and convert to CSV format
    for track in final_list:
        update_track_with_cached_fields_for_sync(track, tracks_cache)

        if not track.date_added and track.id and track.id in tracks_cache and tracks_cache[track.id]["date_added"]:
            missing_fields_count += 1

        track_dict = convert_track_to_csv_dict(track)
        track_dicts.append(track_dict)

    if missing_fields_count > 0:
        console_logger.info(
            "Filled missing fields for %d tracks via AppleScript cache",
            missing_fields_count,
        )
    save_csv(track_dicts, fieldnames, csv_path, console_logger, error_logger, "tracks")

save_to_csv

save_to_csv(
    tracks,
    file_path,
    console_logger=None,
    error_logger=None,
)

Save the list of track dictionaries to a CSV file.

Source code in src/metrics/change_reports.py
def save_to_csv(
    tracks: Sequence[TrackDict],
    file_path: str,
    console_logger: logging.Logger | None = None,
    error_logger: logging.Logger | None = None,
) -> None:
    """Save the list of track dictionaries to a CSV file."""
    if console_logger is None:
        console_logger = logging.getLogger("console_logger")
    if error_logger is None:
        error_logger = logging.getLogger("error_logger")

    # Convert TrackDict models to plain dictionaries with string values
    track_dicts = [{field: str(track.get(field) or "") for field in TRACK_FIELDNAMES} for track in tracks]
    _save_csv(
        track_dicts,
        TRACK_FIELDNAMES,
        file_path,
        console_logger,
        error_logger,
        "tracks",
    )

save_unified_changes_report

save_unified_changes_report(
    changes,
    file_path,
    console_logger,
    error_logger,
    compact_mode=False,
)

Pretty-print change summary to console and optionally persist full CSV.

Parameters:

Name Type Description Default
changes list[dict[str, Any]]

List of change records

required
file_path str | None

Path to save CSV report (None to skip file saving)

required
console_logger Logger

Logger for console output

required
error_logger Logger

Logger for errors

required
compact_mode bool

If True, display changes in compact format with arrows

False
Source code in src/metrics/change_reports.py
def save_unified_changes_report(
    changes: list[dict[str, Any]],
    file_path: str | None,
    console_logger: logging.Logger,
    error_logger: logging.Logger,
    compact_mode: bool = False,
) -> None:
    """Pretty-print change summary to console and optionally persist full CSV.

    Args:
        changes: List of change records
        file_path: Path to save CSV report (None to skip file saving)
        console_logger: Logger for console output
        error_logger: Logger for errors
        compact_mode: If True, display changes in compact format with arrows

    """
    # Handle empty changes case
    if not changes:
        _print_no_changes_summary()
        return

    # Sort and group changes
    changes_sorted = _sort_changes_by_artist_album(changes)

    # Filter changes for console display (show only real changes where old != new)
    console_changes = [change for change in changes_sorted if _is_real_change(change, console_logger)]

    # Handle case where all changes were filtered out (e.g., force_update with no real changes)
    if not console_changes:
        _print_no_changes_summary()
        # No real changes to save - skip CSV creation
        return

    grouped = _group_changes_by_type(console_changes)

    # Console output (filtered - only real changes)
    console = Console()
    console.print(f"\n{Misc.EMOJI_REPORT} [bold]Changes Summary:[/]")

    # Render each group based on mode
    for change_type, records in grouped.items():
        if compact_mode:
            _render_compact_group(console, change_type, records)
        else:
            _render_change_group(console, change_type, records, console_logger)

    console.print("-" * Format.SEPARATOR_100)

    # CSV export if file path provided (only real changes where old != new)
    if file_path:
        ensure_directory(str(Path(file_path).parent), error_logger)
        _save_csv(
            console_changes,  # Use filtered list - only actual changes
            _get_csv_fieldnames(),
            file_path,
            console_logger,
            error_logger,
            Misc.CHANGES_REPORT_TYPE,
        )

save_changes_report

save_changes_report(
    changes,
    file_path,
    console_logger=None,
    error_logger=None,
    add_timestamp=False,
    compact_mode=True,
)

Save the list of change dictionaries to a CSV file.

By default, it overwrites the specified file. If add_timestamp is True, it appends a timestamp to the filename to preserve previous reports.

Parameters:

Name Type Description Default
changes Sequence[dict[str, Any] | ChangeLogEntry]

List of change dictionaries or ChangeLogEntry objects

required
file_path str | None

Path to save the CSV file (None to skip file saving)

required
console_logger Logger | None

Logger for console output

None
error_logger Logger | None

Logger for errors

None
add_timestamp bool

If True, add timestamp to filename

False
compact_mode bool

If True, display changes in compact format with yellow highlighting

True
Source code in src/metrics/change_reports.py
def save_changes_report(
    changes: Sequence[dict[str, Any] | ChangeLogEntry],
    file_path: str | None,
    console_logger: logging.Logger | None = None,
    error_logger: logging.Logger | None = None,
    add_timestamp: bool = False,
    compact_mode: bool = True,  # Default to compact mode for better visibility
) -> None:
    """Save the list of change dictionaries to a CSV file.

    By default, it overwrites the specified file. If `add_timestamp` is True,
    it appends a timestamp to the filename to preserve previous reports.

    Args:
        changes: List of change dictionaries or ChangeLogEntry objects
        file_path: Path to save the CSV file (None to skip file saving)
        console_logger: Logger for console output
        error_logger: Logger for errors
        add_timestamp: If True, add timestamp to filename
        compact_mode: If True, display changes in compact format with yellow highlighting

    """
    if console_logger is None:
        console_logger = logging.getLogger("console_logger")
    if error_logger is None:
        error_logger = logging.getLogger("error_logger")

    # Convert ChangeLogEntry objects to dictionaries
    converted_changes = [_convert_changelog_to_dict(item) for item in changes]

    # Process each change for type determination and field normalization
    for change in converted_changes:
        if "change_type" not in change:
            change["change_type"] = _determine_change_type(change)
        _normalize_field_mappings(change)

    # Generate final file path with optional timestamp (handle None)
    final_path = _add_timestamp_to_filename(file_path) if add_timestamp and file_path else file_path

    save_unified_changes_report(
        converted_changes,
        final_path,
        console_logger,
        error_logger,
        compact_mode,
    )