Last active
March 23, 2026 07:52
-
-
Save flodolo/21e66cc03bc5e8ddcc8275db1375a26a to your computer and use it in GitHub Desktop.
Benchmark save translations
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import time | |
| from django.conf import settings | |
| from django.db import connection, transaction | |
| from django.db.models import F | |
| from pontoon.base.models import Resource, Translation, TranslatedResource | |
| settings.DEBUG = True | |
| SAMPLES = 50 | |
| LARGEST = 5 | |
| qs = Translation.objects.filter(entity__obsolete=False).select_related( | |
| "entity__resource", "locale" | |
| ) | |
| top_resources = list( | |
| Resource.objects.order_by("-total_strings").values_list("pk", flat=True)[:LARGEST] | |
| ) | |
| qs = qs.filter(entity__resource__in=top_resources) | |
| translations = list(qs.order_by("?")[:SAMPLES]) | |
| def get_stats(entity, locale): | |
| approved = 0 | |
| pretranslated = 0 | |
| errors = 0 | |
| warnings = 0 | |
| unreviewed = 0 | |
| for t in entity.translation_set.filter(locale=locale).prefetch_related( | |
| "errors", "warnings" | |
| ): | |
| if t.errors.exists(): | |
| if t.approved or t.pretranslated or t.fuzzy: | |
| errors += 1 | |
| elif t.warnings.exists(): | |
| if t.approved or t.pretranslated or t.fuzzy: | |
| warnings += 1 | |
| elif t.approved: | |
| approved += 1 | |
| elif t.pretranslated: | |
| pretranslated += 1 | |
| if not (t.approved or t.pretranslated or t.fuzzy or t.rejected): | |
| unreviewed += 1 | |
| return { | |
| "approved": approved, | |
| "pretranslated": pretranslated, | |
| "errors": errors, | |
| "warnings": warnings, | |
| "unreviewed": unreviewed, | |
| } | |
| def adjust_stats(translatedresource, before, after, tr_created): | |
| if tr_created: | |
| translatedresource.total_strings = translatedresource.resource.total_strings | |
| translatedresource.approved_strings = ( | |
| F("approved_strings") + after["approved"] - before["approved"] | |
| ) | |
| translatedresource.pretranslated_strings = ( | |
| F("pretranslated_strings") + after["pretranslated"] - before["pretranslated"] | |
| ) | |
| translatedresource.strings_with_errors = ( | |
| F("strings_with_errors") + after["errors"] - before["errors"] | |
| ) | |
| translatedresource.strings_with_warnings = ( | |
| F("strings_with_warnings") + after["warnings"] - before["warnings"] | |
| ) | |
| translatedresource.unreviewed_strings = ( | |
| F("unreviewed_strings") + after["unreviewed"] - before["unreviewed"] | |
| ) | |
| translatedresource.save( | |
| update_fields=[ | |
| "total_strings", | |
| "approved_strings", | |
| "pretranslated_strings", | |
| "strings_with_errors", | |
| "strings_with_warnings", | |
| "unreviewed_strings", | |
| ] | |
| ) | |
| def benchmark_old(entity, locale): | |
| n_before = len(connection.queries) | |
| start = time.perf_counter() | |
| stats_before = get_stats(entity, locale) | |
| stats_after = get_stats(entity, locale) | |
| translatedresource, created = TranslatedResource.objects.get_or_create( | |
| resource=entity.resource, locale=locale | |
| ) | |
| adjust_stats(translatedresource, stats_before, stats_after, created) | |
| return len(connection.queries) - n_before, time.perf_counter() - start | |
| def benchmark_new(entity, locale): | |
| n_before = len(connection.queries) | |
| start = time.perf_counter() | |
| translatedresource, _ = TranslatedResource.objects.get_or_create( | |
| resource=entity.resource, locale=locale | |
| ) | |
| translatedresource.calculate_stats() | |
| return len(connection.queries) - n_before, time.perf_counter() - start | |
| output = [] | |
| output.append(f"Benchmarking with {len(translations)} sample translations...\n") | |
| old_query_counts, new_query_counts, old_times, new_times = [], [], [], [] | |
| for translation in translations: | |
| entity = translation.entity | |
| locale = translation.locale | |
| with transaction.atomic(): | |
| old_count, old_elapsed = benchmark_old(entity, locale) | |
| transaction.set_rollback(True) | |
| with transaction.atomic(): | |
| new_count, new_elapsed = benchmark_new(entity, locale) | |
| transaction.set_rollback(True) | |
| old_query_counts.append(old_count) | |
| old_times.append(old_elapsed) | |
| new_query_counts.append(new_count) | |
| new_times.append(new_elapsed) | |
| avg_old_q = sum(old_query_counts) / len(old_query_counts) | |
| avg_new_q = sum(new_query_counts) / len(new_query_counts) | |
| avg_old_t = sum(old_times) / len(old_times) * 1000 | |
| avg_new_t = sum(new_times) / len(new_times) * 1000 | |
| output.append(" Old approach (get_stats x2 + adjust_stats):") | |
| output.append(f" Avg queries : {avg_old_q:.1f}") | |
| output.append(f" Avg time : {avg_old_t:.2f} ms\n") | |
| output.append(" New approach (calculate_stats):") | |
| output.append(f" Avg queries : {avg_new_q:.1f}") | |
| output.append(f" Avg time : {avg_new_t:.2f} ms\n") | |
| output.append( | |
| f" Query reduction : {avg_old_q - avg_new_q:.1f} fewer queries per save" | |
| f" ({(1 - avg_new_q / avg_old_q) * 100:.0f}%)" | |
| ) | |
| output.append( | |
| f" Time change : {avg_new_t - avg_old_t:.2f} ms per save" | |
| f" ({((avg_new_t - avg_old_t) / avg_old_t) * 100:.0f}%)" | |
| ) | |
| print("\n".join(output)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment