diff --git a/Jenkinsfile b/Jenkinsfile index 24ac047eb..23c99d3ad 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -17,7 +17,7 @@ pipeline { ES_EN_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/08-30-24-0' FR_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/04-07-25-0' HU_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/07-16-24-0' - PT_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/06-08-23-0' + PT_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/05-01-26-1' RU_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/06-08-23-0' VI_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/10-29-25-0' SV_TN_CACHE='/home/jenkins/TestData/text_norm/ci/grammars/06-08-23-0' @@ -242,16 +242,16 @@ pipeline { sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/text_normalization/normalize.py --lang=sv --text="100" --cache_dir ${SV_TN_CACHE}' } } - // stage('L0: SV ITN grammars') { - // steps { - // sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/inverse_text_normalization/inverse_normalize.py --lang=sv --text="hundra " --cache_dir ${SV_TN_CACHE}' - // } - // } - // stage('L0: PT TN grammars') { - // steps { - // sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/text_normalization/normalize.py --lang=pt --text="2" --cache_dir ${DEFAULT_TN_CACHE}' - // } - // } + // stage('L0: SV ITN grammars') { + // steps { + // sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/inverse_text_normalization/inverse_normalize.py --lang=sv --text="hundra " --cache_dir ${SV_TN_CACHE}' + // } + // } + stage('L0: PT TN grammars') { + steps { + sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/text_normalization/normalize.py --lang=pt --text="2" --cache_dir ${PT_TN_CACHE}' + } + } stage('L0: PT ITN grammars') { steps { sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/inverse_text_normalization/inverse_normalize.py --lang=pt --text="dez " --cache_dir ${PT_TN_CACHE}' diff --git a/nemo_text_processing/text_normalization/normalize.py b/nemo_text_processing/text_normalization/normalize.py index 5e2f9ebb5..d8ebf2f4d 100644 --- a/nemo_text_processing/text_normalization/normalize.py +++ b/nemo_text_processing/text_normalization/normalize.py @@ -185,6 +185,9 @@ def __init__( if post_process: self.post_processor = PostProcessingFst(cache_dir=cache_dir, overwrite_cache=overwrite_cache) + elif lang == 'pt': + from nemo_text_processing.text_normalization.pt.taggers.tokenize_and_classify import ClassifyFst + from nemo_text_processing.text_normalization.pt.verbalizers.verbalize_final import VerbalizeFinalFst elif lang == 'ko': from nemo_text_processing.text_normalization.ko.taggers.tokenize_and_classify import ClassifyFst from nemo_text_processing.text_normalization.ko.verbalizers.verbalize_final import VerbalizeFinalFst @@ -734,7 +737,7 @@ def parse_args(): parser.add_argument( "--language", help="language", - choices=["en", "de", "es", "fr", "hu", "sv", "zh", "ar", "it", "hy", "ja", "hi", "ko", "vi"], + choices=["en", "de", "es", "fr", "hu", "sv", "zh", "ar", "it", "hy", "ja", "hi", "ko", "vi", "pt"], default="en", type=str, ) diff --git a/nemo_text_processing/text_normalization/pt/__init__.py b/nemo_text_processing/text_normalization/pt/__init__.py new file mode 100644 index 000000000..ffd13e2d6 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/__init__.py b/nemo_text_processing/text_normalization/pt/data/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/date/__init__.py b/nemo_text_processing/text_normalization/pt/data/date/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/date/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/date/months.tsv b/nemo_text_processing/text_normalization/pt/data/date/months.tsv new file mode 100644 index 000000000..6713229bd --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/date/months.tsv @@ -0,0 +1,21 @@ +1 janeiro +01 janeiro +2 fevereiro +02 fevereiro +3 março +03 março +4 abril +04 abril +5 maio +05 maio +6 junho +06 junho +7 julho +07 julho +8 agosto +08 agosto +9 setembro +09 setembro +10 outubro +11 novembro +12 dezembro diff --git a/nemo_text_processing/text_normalization/pt/data/date/numeric_separators.tsv b/nemo_text_processing/text_normalization/pt/data/date/numeric_separators.tsv new file mode 100644 index 000000000..ee24567bc --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/date/numeric_separators.tsv @@ -0,0 +1,3 @@ +/ +. +- diff --git a/nemo_text_processing/text_normalization/pt/data/date/verbal_phrases.tsv b/nemo_text_processing/text_normalization/pt/data/date/verbal_phrases.tsv new file mode 100644 index 000000000..d04c0fa50 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/date/verbal_phrases.tsv @@ -0,0 +1 @@ +preposition de diff --git a/nemo_text_processing/text_normalization/pt/data/electronic/__init__.py b/nemo_text_processing/text_normalization/pt/data/electronic/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/electronic/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/electronic/domain.tsv b/nemo_text_processing/text_normalization/pt/data/electronic/domain.tsv new file mode 100644 index 000000000..b9daa19a5 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/electronic/domain.tsv @@ -0,0 +1,7 @@ +.com ponto com +.com.br ponto com ponto br +.gov.br ponto gov ponto br +.org ponto org +.net ponto net +.edu ponto edu +.br ponto br diff --git a/nemo_text_processing/text_normalization/pt/data/electronic/electronic_spoken_unit.tsv b/nemo_text_processing/text_normalization/pt/data/electronic/electronic_spoken_unit.tsv new file mode 100644 index 000000000..698bc2773 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/electronic/electronic_spoken_unit.tsv @@ -0,0 +1,16 @@ +google google +usuario usuario +microsoft microsoft +amazon amazon +facebook facebook +meta meta +netflix netflix +spotify spotify +samsung samsung +apple apple +linkedin linkedin +instagram instagram +whatsapp whatsapp +oracle oracle +adobe adobe +paypal paypal diff --git a/nemo_text_processing/text_normalization/pt/data/electronic/server_name.tsv b/nemo_text_processing/text_normalization/pt/data/electronic/server_name.tsv new file mode 100644 index 000000000..50b4eeb65 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/electronic/server_name.tsv @@ -0,0 +1,6 @@ +gmail +nvidia +outlook +hotmail +yahoo +live diff --git a/nemo_text_processing/text_normalization/pt/data/electronic/symbols.tsv b/nemo_text_processing/text_normalization/pt/data/electronic/symbols.tsv new file mode 100644 index 000000000..85c8a1b10 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/electronic/symbols.tsv @@ -0,0 +1,22 @@ +. ponto +- traço +_ underscore +! exclamação +# cerquilha +$ dólar +% por cento +& e comercial +' apóstrofo +* asterisco ++ mais +/ barra += igual +? interrogação +^ acento circunflexo +` crase +{ chave esquerda +| barra vertical +} chave direita +~ til +, vírgula +: dois pontos diff --git a/nemo_text_processing/text_normalization/pt/data/fractions/__init__.py b/nemo_text_processing/text_normalization/pt/data/fractions/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/fractions/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/fractions/ordinal_exceptions.tsv b/nemo_text_processing/text_normalization/pt/data/fractions/ordinal_exceptions.tsv new file mode 100644 index 000000000..22c9ed16c --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/fractions/ordinal_exceptions.tsv @@ -0,0 +1,2 @@ +segundo meio +terceiro terço diff --git a/nemo_text_processing/text_normalization/pt/data/fractions/powers_of_ten.tsv b/nemo_text_processing/text_normalization/pt/data/fractions/powers_of_ten.tsv new file mode 100644 index 000000000..b19c44364 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/fractions/powers_of_ten.tsv @@ -0,0 +1 @@ +mil milésimo diff --git a/nemo_text_processing/text_normalization/pt/data/fractions/specials.tsv b/nemo_text_processing/text_normalization/pt/data/fractions/specials.tsv new file mode 100644 index 000000000..7efeba5e6 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/fractions/specials.tsv @@ -0,0 +1,5 @@ +connector e +minus menos +plural_suffix s +avos_suffix avos +avos_between diff --git a/nemo_text_processing/text_normalization/pt/data/measure/__init__.py b/nemo_text_processing/text_normalization/pt/data/measure/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/measure/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/measure/measurements_plural.tsv b/nemo_text_processing/text_normalization/pt/data/measure/measurements_plural.tsv new file mode 100755 index 000000000..d9cbfb87f --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/measure/measurements_plural.tsv @@ -0,0 +1,64 @@ +h horas +min minutos +s segundos +ms milissegundos +ns nanossegundos +μs microssegundos +t toneladas +kg quilos +kg quilogramas +g gramas +mg miligramas +μm micrômetros +nm nanômetros +mm milímetros +cm centímetros +cm² centímetros quadrados +cm³ centímetros cúbicos +m metros +m² metros quadrados +m³ metros cúbicos +km quilômetros +km² quilômetros quadrados +ha hectares +kph quilômetros por hora +mph milhas por hora +m/s metros por segundo +l litros +ml mililitros +kgf quilogramas força +% por cento +°F fahrenheit +°F graus fahrenheit +°C graus celsius +Hz hertz +kHz quilo hertz +MHz mega hertz +GHz giga hertz +W watts +kW quilowatts +MW megawatts +GW gigawatts +Wh watts hora +kWh quilowatts hora +MWh megawatts hora +GWh gigawatts hora +kV quilovolts +V volts +mV milivolts +A amperes +mA miliamperes +rpm rotações por minuto +db decibéis +cal calorias +kcal quilocalorias +G gramas +KG quilos +KG quilogramas +KM quilômetros +M metros +L litros +ML mililitros +M2 metros quadrados +M^2 metros quadrados +C graus celsius diff --git a/nemo_text_processing/text_normalization/pt/data/measure/measurements_singular.tsv b/nemo_text_processing/text_normalization/pt/data/measure/measurements_singular.tsv new file mode 100755 index 000000000..242aba14e --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/measure/measurements_singular.tsv @@ -0,0 +1,65 @@ +h hora +min minuto +s segundo +ms milissegundo +ns nanossegundo +μs microssegundo +t tonelada +kg quilo +kg quilograma +g grama +mg miligrama +μm micrômetro +nm nanômetro +mm milímetro +cm centímetro +cm² centímetro quadrado +cm³ centímetro cúbico +m metro +m² metro quadrado +m³ metro cúbico +km quilômetro +km² quilômetro quadrado +ha hectare +kph quilômetro por hora +mph milha por hora +m/s metro por segundo +l litro +ml mililitro +kgf quilograma força +% por cento +°F fahrenheit +°C celsius +°F grau fahrenheit +°C grau celsius +Hz hertz +kHz quilo hertz +MHz mega hertz +GHz giga hertz +W watt +kW quilowatt +MW megawatt +GW gigawatt +Wh watt hora +kWh quilowatt hora +MWh megawatt hora +GWh gigawatt hora +kV quilovolt +V volt +mV milivolt +A ampere +mA miliampere +rpm rotação por minuto +db decibel +cal caloria +kcal quilocaloria +G grama +KG quilo +KG quilograma +KM quilômetro +M metro +L litro +ML mililitro +M2 metro quadrado +M^2 metro quadrado +C celsius diff --git a/nemo_text_processing/text_normalization/pt/data/money/__init__.py b/nemo_text_processing/text_normalization/pt/data/money/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/money/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/money/currency_major.tsv b/nemo_text_processing/text_normalization/pt/data/money/currency_major.tsv new file mode 100644 index 000000000..b23d1bcce --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/money/currency_major.tsv @@ -0,0 +1,5 @@ +US$ dólar americano +R$ real +€ euro +£ libra esterlina +$ dólar diff --git a/nemo_text_processing/text_normalization/pt/data/money/currency_major_plural.tsv b/nemo_text_processing/text_normalization/pt/data/money/currency_major_plural.tsv new file mode 100644 index 000000000..feca270ff --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/money/currency_major_plural.tsv @@ -0,0 +1,5 @@ +dólar americano dólares americanos +real reais +euro euros +libra esterlina libras esterlinas +dólar dólares diff --git a/nemo_text_processing/text_normalization/pt/data/money/currency_minor.tsv b/nemo_text_processing/text_normalization/pt/data/money/currency_minor.tsv new file mode 100644 index 000000000..4e9a95d66 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/money/currency_minor.tsv @@ -0,0 +1,5 @@ +US$ centavo +R$ centavo +€ centavo +£ centavo +$ centavo diff --git a/nemo_text_processing/text_normalization/pt/data/money/currency_minor_plural.tsv b/nemo_text_processing/text_normalization/pt/data/money/currency_minor_plural.tsv new file mode 100644 index 000000000..2ac61dff7 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/money/currency_minor_plural.tsv @@ -0,0 +1 @@ +centavo centavos diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/__init__.py b/nemo_text_processing/text_normalization/pt/data/numbers/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/cardinal_specials.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/cardinal_specials.tsv new file mode 100644 index 000000000..04ea91ee4 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/cardinal_specials.tsv @@ -0,0 +1,4 @@ +connector e +thousand mil +hundred_100 cem +hundred_1 cento diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/decimal_fractional_specials.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/decimal_fractional_specials.tsv new file mode 100644 index 000000000..c84a95f53 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/decimal_fractional_specials.tsv @@ -0,0 +1,3 @@ +001 mil e um +010 mil e dez +100 mil e cem diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/decimal_specials.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/decimal_specials.tsv new file mode 100644 index 000000000..f6257d9d1 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/decimal_specials.tsv @@ -0,0 +1,2 @@ +separator vírgula +minus menos diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/digit.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/digit.tsv new file mode 100644 index 000000000..1859416c8 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/digit.tsv @@ -0,0 +1,9 @@ +1 um +2 dois +3 três +4 quatro +5 cinco +6 seis +7 sete +8 oito +9 nove diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/hundreds.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/hundreds.tsv new file mode 100644 index 000000000..620f512b3 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/hundreds.tsv @@ -0,0 +1,8 @@ +2 duzentos +3 trezentos +4 quatrocentos +5 quinhentos +6 seiscentos +7 setecentos +8 oitocentos +9 novecentos diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/quantity_words.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/quantity_words.tsv new file mode 100644 index 000000000..a94cbd553 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/quantity_words.tsv @@ -0,0 +1,9 @@ +mil +milhão +milhões +bilhão +bilhões +trilhão +trilhões +quatrilhão +quatrilhões diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/scales.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/scales.tsv new file mode 100644 index 000000000..a3dffe4e0 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/scales.tsv @@ -0,0 +1,4 @@ +one_label plural_suffix magnitude_zeros +um milhão milhões 0 +um bilhão bilhões 9 +um trilhão trilhões 12 diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/teens.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/teens.tsv new file mode 100644 index 000000000..50c4e0b8d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/teens.tsv @@ -0,0 +1,10 @@ +10 dez +11 onze +12 doze +13 treze +14 catorze +15 quinze +16 dezesseis +17 dezessete +18 dezoito +19 dezenove diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/tens.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/tens.tsv new file mode 100644 index 000000000..43c4a8bc6 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/tens.tsv @@ -0,0 +1,8 @@ +2 vinte +3 trinta +4 quarenta +5 cinquenta +6 sessenta +7 setenta +8 oitenta +9 noventa diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/zero.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/zero.tsv new file mode 100644 index 000000000..29be0f38b --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/zero.tsv @@ -0,0 +1 @@ +0 zero diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/__init__.py b/nemo_text_processing/text_normalization/pt/data/ordinals/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/digit.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/digit.tsv new file mode 100644 index 000000000..5fefbc3b8 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/digit.tsv @@ -0,0 +1,10 @@ +primeiro um +segundo dois +terceiro três +quarto quatro +quinto cinco +sexto seis +sétimo sete +oitavo oito +nono nove +décimo dez diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/feminine.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/feminine.tsv new file mode 100644 index 000000000..c75ae5ed0 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/feminine.tsv @@ -0,0 +1,11 @@ +primeiro primeira +segundo segunda +terceiro terceira +quarto quarta +quinto quinta +sexto sexta +sétimo sétima +oitavo oitava +nono nona +décimo décima +ésimo ésima diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/hundreds.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/hundreds.tsv new file mode 100644 index 000000000..6d919a86c --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/hundreds.tsv @@ -0,0 +1,10 @@ +centésimo cem +centésimo cento +ducentésimo duzentos +trecentésimo trezentos +quadringentésimo quatrocentos +quincentésimo quinhentos +sexcentésimo seiscentos +septingentésimo setecentos +octingentésimo oitocentos +noningentésimo novecentos diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/specials.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/specials.tsv new file mode 100644 index 000000000..bb6933fe6 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/specials.tsv @@ -0,0 +1,2 @@ +connector_in e +connector_out diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/teen.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/teen.tsv new file mode 100644 index 000000000..1b1e191c9 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/teen.tsv @@ -0,0 +1,9 @@ +décimo primeiro onze +décimo segundo doze +décimo terceiro treze +décimo quarto catorze +décimo quinto quinze +décimo sexto dezesseis +décimo sétimo dezessete +décimo oitavo dezoito +décimo nono dezenove diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/ties.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/ties.tsv new file mode 100644 index 000000000..f40700034 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/ties.tsv @@ -0,0 +1,8 @@ +vigésimo vinte +trigésimo trinta +quadragésimo quarenta +quinquagésimo cinquenta +sexagésimo sessenta +septuagésimo setenta +octogésimo oitenta +nonagésimo noventa diff --git a/nemo_text_processing/text_normalization/pt/data/telephone/__init__.py b/nemo_text_processing/text_normalization/pt/data/telephone/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/telephone/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/telephone/ip_prompt.tsv b/nemo_text_processing/text_normalization/pt/data/telephone/ip_prompt.tsv new file mode 100644 index 000000000..7d59e35eb --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/telephone/ip_prompt.tsv @@ -0,0 +1,2 @@ +ip ip +endereço de ip endereço de i p diff --git a/nemo_text_processing/text_normalization/pt/data/telephone/telephone_prompt.tsv b/nemo_text_processing/text_normalization/pt/data/telephone/telephone_prompt.tsv new file mode 100644 index 000000000..62efeccd5 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/telephone/telephone_prompt.tsv @@ -0,0 +1,4 @@ +ligue para +telefone +celular +meu número é diff --git a/nemo_text_processing/text_normalization/pt/data/time/__init__.py b/nemo_text_processing/text_normalization/pt/data/time/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/time/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/time/day_period_suffix.tsv b/nemo_text_processing/text_normalization/pt/data/time/day_period_suffix.tsv new file mode 100644 index 000000000..a942795f4 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/time/day_period_suffix.tsv @@ -0,0 +1,4 @@ +manhã da manhã 6 11 +tarde da tarde 12 17 +noite da noite 18 23 +madrugada da madrugada 0 5 diff --git a/nemo_text_processing/text_normalization/pt/data/whitelist/__init__.py b/nemo_text_processing/text_normalization/pt/data/whitelist/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/whitelist/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/whitelist/ipa_symbols.tsv b/nemo_text_processing/text_normalization/pt/data/whitelist/ipa_symbols.tsv new file mode 100644 index 000000000..f5559c711 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/whitelist/ipa_symbols.tsv @@ -0,0 +1,521 @@ +a +aoj +aəj +aː +aːʲ +aː͡j +aː͡ɨ̯ +aˤ +aˤː +a̠ +a̠ː +a̰ +a͡e +a͡i +a͡iː +a͡i̯ +a͡j +a͡o +a͡u +a͡uː +a͡u̯ +a͡w +a͡ə +a͡ɨ̯ +a͡ɪ +a͡ʊ +b +bʱ +bʲ +bː +b̥ +c +cʰ +cː +ç +d +dʲ +dː +d̥ +d̪ +d̪ʱ +d͡z +d͡zʷ +d͡zː +d͡ʑ +d͡ʒ +d͡ʒʱ +d͡ʒʲ +d͡ʒː +e +eː +eːʲ +eː͡j +ẽː +ẽ͡j̃ +e̞ +e̞ː +e̯ +e͡i +e͡iː +e͡ɨ̯ +f +fʲ +fː +h +hː +i +iəj +iəw +iʲ +iː +iːʲ +ĩː +i̥ +i̯ +i͡u +i͡ə +i͡ɛ +j +jː +j̃ +k +kʰ +kʰː +kʲ +kʲʼ +kʷ +kʷʼ +kʼ +kː +k̚ +k̚ʲ +k̟̚ +k͈ +k͡p̚ +l +lʲ +lː +l̥ +l̩ +m +mʲ +mʲː +mː +m̥ +m̩ +n +nʲ +nː +n̥ +n̩ +o +oʲ +oː +oːʲ +ò +õ͡j̃ +õ͡w̃ +o̝ +o̞ +o̞ː +o̯ +o̰ +o͡u +o͡uː +p +pʰ +pʰː +pʲ +pʷʼ +pʼ +pː +p̚ +p̚ʲ +p͈ +p͜f +p͡f +q +qʷ +qʼ +r +rʲ +rː +r̂ +r̂ː +r̥ +r̩ +s +sʰ +sʲ +sʼ +sː +s͈ +t +tʰ +tʰː +tʲ +tʷʼ +tʼ +tː +t̚ +t̪ +t̪ʰ +t͈ +t͜s +t͡s +t͡sʰ +t͡sʰː +t͡sʲ +t͡sʷ +t͡sʼ +t͡sː +t͡ɕ +t͡ɕʰ +t͡ɕ͈ +t͡ʂ +t͡ʂʼ +t͡ʃ +t͡ʃʰ +t͡ʃʰː +t͡ʃʲ +t͡ʃʷ +t͡ʃʼ +t͡ʃː +u +uəj +uʲ +uː +uːʲ +ũː +ũ͡j̃ +u̯ +u͡e +u͡i +u͡j +u͡ɔ +u͡ə +v +vʲ +vː +w +w̃ +x +xʷ +xː +y +yː +yːʲ +y̯ +z +zʲ +zː +z̥ +à +àː +á +áː +â +âː +ã +ã̠ +æ +æː +æ̀ +æ̀ː +æ̂ +æ̂ː +æ͡ɪ +æ͡ʉ +ç +è +èː +é +éː +ê +êː +ì +ìː +í +íː +î +îː +ï +ð +ò +òː +ó +óː +ô +ôː +õ +õː +õ̞ +ø +øː +øːʲ +ø̯ +ù +ùː +ú +úː +û +ûː +ā +āː +ē +ēː +ĕ +ĕ͡ə +ě +ěː +ħ +ĩ +ĩː +ī +īː +ŋ +ŋʲ +ŋ̊ +ŋ̍ +ŋ̟ +ŋ̩ +ŋ͡m +ō +ŏ +ŏ͡ə +œ +œː +œ̃ +œ͡i +œ͡iː +œ͡ʏ +ř +řː +ũ +ũː +ū +ūː +ŭ +ŭ͡ə +ǎ +ǎː +ǐ +ǐː +ǒ +ǒː +ǔ +ǔː +ǣ +ǣː +ɐ +ɐː +ɐ̃ +ɐ̃͡j̃ +ɐ̃͡w̃ +ɐ̯ +ɐ̯̯ +ɑ +ɑː +ɑ̃ +ɑ̃ː +ɒ +ɒʲ +ɒː +ɓ +ɔ +ɔː +ɔˤː +ɔ̀ +ɔ̀ː +ɔ́ +ɔ́ː +ɔ̃ +ɔ̃ː +ɔ̰ +ɔ͡i̯ +ɔ͡ə +ɔ͡ɨ̯ +ɔ͡ɪ +ɔ͡ʊ +ɕ +ɕʰ +ɕː +ɕ͈ +ɖ +ɖʱ +ɗ +ɘ +ɘː +ə +əː +əˤ +ə̀ +ə́ +ə̃ +ə̯ +ə͡u̯ +ə͡w +ə͡ɨ +ə͡ɨ̯ +ɚ +ɛ +ɛʲ +ɛː +ɛˤː +ɛ̀ +ɛ̀ː +ɛ́ +ɛ́ː +ɛ̂ +ɛ̂ː +ɛ̃ +ɛ̃ː +ɛ̄ +ɛ̄ː +ɛ̰ +ɛ͡i +ɛ͡i̯ +ɛ͡u +ɛ͡u̯ +ɛ͡ɪ +ɛ͡ʊ +ɜ +ɜː +ɝ +ɝː +ɟ +ɟː +ɟ͡ʝ +ɡ +ɡʱ +ɡʲ +ɡʷ +ɡː +ɡ̊ +ɣ +ɤ +ɥ +ɦ +ɨ +ɨəj +ɨː +ɨ̃ᵝ +ɨ̞ +ɨ̥ᵝ +ɨ̯ +ɨ͡u̯ +ɨ͡w +ɨ͡ə +ɨᵝ +ɨᵝː +ɪ +ɪː +ɪ̀ +ɪ́ +ɪ̃ +ɪ̯ +ɪ̰ +ɪ͡u̯ +ɪ͡ʊ +ɫ +ɫː +ɬ +ɬʼ +ɭ +ɮ +ɯ +ɯː +ɯ̟̃ᵝ +ɯ̟̊ᵝ +ɯ̟ᵝ +ɯ̟ᵝː +ɰ +ɰ̃ +ɰᵝ +ɱ +ɱ̩ +ɲ +ɲː +ɲ̊ +ɲ̟ +ɳ +ɴ +ɸ +ɸʷ +ɹ +ɻ +ɽ +ɽʱ +ɾ +ɾʲ +ɾː +ɾ̝̊ +ʀ +ʁ +ʁʷ +ʁː +ʂ +ʂʷ +ʃ +ʃʰ +ʃʲ +ʃʷ +ʃʷʼ +ʃʼ +ʃː +ʈ +ʈʰ +ʉ +ʉː +ʊ +ʊ̀ +ʊ́ +ʊ̃ +ʊ̯ +ʊ̯͡i +ʊ̯͡ɨ +ʊ̰ +ʋ +ʌ +ʌ̹ +ʍ +ʎ +ʏ +ʏː +ʏ̯ +ʐ +ʐʷ +ʑ +ʒ +ʒʲ +ʒʷ +ʒː +ʔ +ʔʲ +ʔʷ +ʝ +˦ˀ˥ +˦˥ +˦˧˥ +˦˩ +˧ˀ˨ +˧˦ +˧˧ +˧˨ +˧˩ +˨˩ +˨˩˦ +˨˩˨ +β +θ +χ +χʷ +χː +ḛ +ḭ +ṵ +ẽ +ẽː +ẽ̞ +‿ \ No newline at end of file diff --git a/nemo_text_processing/text_normalization/pt/data/whitelist/symbol.tsv b/nemo_text_processing/text_normalization/pt/data/whitelist/symbol.tsv new file mode 100644 index 000000000..7f7b525e3 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/whitelist/symbol.tsv @@ -0,0 +1,23 @@ +& e comercial +# cerquilha +@ arroba +§ parágrafo +™ marca comercial +® marca registrada +© direitos autorais +_ sublinhado +% por cento +* asterisco ++ mais +/ barra += igual +^ acento circunflexo +| barra vertical +~ til +$ dólar +£ libra esterlina +€ euro +₩ won +¥ ienes +° grau +º ordinal masculino diff --git a/nemo_text_processing/text_normalization/pt/data/whitelist/tts.tsv b/nemo_text_processing/text_normalization/pt/data/whitelist/tts.tsv new file mode 100644 index 000000000..e69de29bb diff --git a/nemo_text_processing/text_normalization/pt/graph_utils.py b/nemo_text_processing/text_normalization/pt/graph_utils.py new file mode 100644 index 000000000..1d548deb5 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/graph_utils.py @@ -0,0 +1,193 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Portuguese (PT) text normalization graph utilities. + +Self-contained module with no dependency on en.graph_utils. Provides character/digit +symbols (NEMO_*), space helpers (delete_space, insert_space, delete_extra_space), +GraphFst base class, generator_main for FAR export, and PT-specific helpers +(filter_cardinal_punctuation, shift_cardinal_gender_pt). +""" + +import os +import string +from pathlib import Path +from typing import Dict + +import pynini +from pynini import Far +from pynini.export import export +from pynini.lib import byte, pynutil, utf8 + +from nemo_text_processing.utils.logging import logger + +# ---- Character/digit symbols (same semantics as EN) ---- +NEMO_CHAR = utf8.VALID_UTF8_CHAR +NEMO_DIGIT = byte.DIGIT +NEMO_LOWER = pynini.union(*string.ascii_lowercase).optimize() +NEMO_UPPER = pynini.union(*string.ascii_uppercase).optimize() +NEMO_ALPHA = pynini.union(NEMO_LOWER, NEMO_UPPER).optimize() +NEMO_SPACE = " " +NEMO_NON_BREAKING_SPACE = "\u00a0" +NEMO_WHITE_SPACE = pynini.union(" ", "\t", "\n", "\r", "\u00a0").optimize() +NEMO_NOT_QUOTE = pynini.difference(NEMO_CHAR, pynini.accep('"')).optimize() +NEMO_SIGMA = pynini.closure(NEMO_CHAR) +NEMO_NOT_SPACE = pynini.difference(NEMO_CHAR, NEMO_WHITE_SPACE).optimize() + +MIN_NEG_WEIGHT = -0.0001 +INPUT_CASED = "cased" +INPUT_LOWER_CASED = "lower_cased" + +delete_space = pynutil.delete(pynini.closure(NEMO_WHITE_SPACE)) +insert_space = pynutil.insert(" ") +delete_extra_space = pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 1), " ").optimize() + +delete_preserve_order = pynini.closure(pynutil.delete(" preserve_order: true")) + + +def generator_main(file_name: str, graphs: Dict[str, "pynini.FstLike"]) -> None: + """ + Export one or more graphs to an OpenFst Finite State Archive (FAR) file. + + Args: + file_name: path to the output .far file. + graphs: mapping of rule names to FST graphs to export. + """ + exporter = export.Exporter(file_name) + for rule, graph in graphs.items(): + exporter[rule] = graph.optimize() + exporter.close() + logger.info(f"Created {file_name}") + + +class GraphFst: + """ + Base class for all Portuguese text normalization grammar FSTs. + + Args: + name: name of the grammar (e.g. "cardinal", "decimal"). + kind: either "classify" or "verbalize". + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization). + """ + + def __init__(self, name: str, kind: str, deterministic: bool = True): + self.name = name + self.kind = kind + self._fst = None + self.deterministic = deterministic + + self.far_path = Path(os.path.dirname(os.path.abspath(__file__)) + "/grammars/" + kind + "/" + name + ".far") + if self.far_exist(): + self._fst = Far(self.far_path, mode="r", arc_type="standard", far_type="default").get_fst() + + def far_exist(self) -> bool: + return self.far_path.exists() + + @property + def fst(self) -> "pynini.FstLike": + return self._fst + + @fst.setter + def fst(self, fst): + self._fst = fst + + def add_tokens(self, fst) -> "pynini.FstLike": + return pynutil.insert(f"{self.name} {{ ") + fst + pynutil.insert(" }") + + def delete_tokens(self, fst) -> "pynini.FstLike": + res = ( + pynutil.delete(f"{self.name}") + + delete_space + + pynutil.delete("{") + + delete_space + + fst + + delete_space + + pynutil.delete("}") + ) + return res @ pynini.cdrewrite(pynini.cross("\u00a0", " "), "", "", NEMO_SIGMA) + + +# ---- PT-specific (Brazilian: 1.000.000 or 1 000 000) ---- +cardinal_separator = pynini.string_map([".", " "]) + + +def filter_cardinal_punctuation(fst: "pynini.FstLike") -> "pynini.FstLike": + """ + Parse digit groups separated by cardinal_separator (e.g. 1.000.000) then apply fst. + + Args: + fst: FST that maps digit string to verbalized cardinal. + + Returns: + Composed FST that accepts digit strings with optional thousand separators. + """ + exactly_three = NEMO_DIGIT**3 + up_to_three = pynini.closure(NEMO_DIGIT, 1, 3) + cardinal_string = pynini.closure(NEMO_DIGIT, 1) + cardinal_string |= ( + up_to_three + + pynutil.delete(cardinal_separator) + + pynini.closure(exactly_three + pynutil.delete(cardinal_separator)) + + exactly_three + ) + return cardinal_string @ fst + + +def shift_cardinal_gender_pt(fst: "pynini.FstLike") -> "pynini.FstLike": + """ + Apply Portuguese masculine-to-feminine conversion for cardinal strings, e.g. + "um" -> "uma", "dois" -> "duas", "duzentos" -> "duzentas". + + Args: + fst: FST producing masculine cardinal verbalization. + + Returns: + FST that produces feminine form when composed with the same input. + """ + fem_ones = pynini.cdrewrite( + pynini.cross("um", "uma"), + "", + pynini.union(NEMO_SPACE, pynini.accep("[EOS]"), pynini.accep('"')), + NEMO_SIGMA, + ) + fem_twos = pynini.cdrewrite( + pynini.cross("dois", "duas"), + "", + pynini.union(NEMO_SPACE, pynini.accep("[EOS]"), pynini.accep('"')), + NEMO_SIGMA, + ) + fem_hundreds = pynini.cdrewrite( + pynini.cross("entos", "entas"), + pynini.union("duz", "trez", "quatroc", "quinh", "seisc", "setec", "oitoc", "novec"), + pynini.union(NEMO_SPACE, pynini.accep("[EOS]"), pynini.accep('"')), + NEMO_SIGMA, + ) + return fst @ fem_ones @ fem_twos @ fem_hundreds + + +def convert_space(fst) -> "pynini.FstLike": + """ + Converts space to nonbreaking space. + Used only in tagger grammars for transducing token values within quotes, e.g. name: "hello kitty" + This is making transducer significantly slower, so only use when there could be potential spaces within quotes, otherwise leave it. + + Args: + fst: input fst + + Returns output fst where breaking spaces are converted to non breaking spaces + """ + return fst @ pynini.cdrewrite(pynini.cross(NEMO_SPACE, NEMO_NON_BREAKING_SPACE), "", "", NEMO_SIGMA) diff --git a/nemo_text_processing/text_normalization/pt/taggers/__init__.py b/nemo_text_processing/text_normalization/pt/taggers/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/taggers/cardinal.py b/nemo_text_processing/text_normalization/pt/taggers/cardinal.py new file mode 100644 index 000000000..0f14f3b46 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/cardinal.py @@ -0,0 +1,303 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from functools import reduce + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_ALPHA, + NEMO_DIGIT, + NEMO_SIGMA, + NEMO_SPACE, + NEMO_WHITE_SPACE, + GraphFst, + delete_space, + filter_cardinal_punctuation, + insert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class CardinalFst(GraphFst): + """ + Finite state transducer for classifying Portuguese cardinals, e.g. + "1000" -> cardinal { integer: "mil" } + "2.000.000" -> cardinal { integer: "dois milhões" } + "-5" -> cardinal { negative: "true" integer: "cinco" } + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="cardinal", kind="classify", deterministic=deterministic) + + specials = { + row[0]: row[1] for row in load_labels(get_abs_path("data/numbers/cardinal_specials.tsv")) if len(row) >= 2 + } + connector_e = insert_space + pynutil.insert(specials["connector"]) + insert_space + thousand = specials["thousand"] + hundred_100 = specials["hundred_100"] + hundred_1 = specials["hundred_1"] + + scale_rows = load_labels(get_abs_path("data/numbers/scales.tsv")) + scales = [(row[0], row[1], int(row[2])) for row in scale_rows if len(row) >= 3 and row[2].strip().isdigit()] + + _num = lambda p: pynini.string_file(get_abs_path(f"data/numbers/{p}")) + zero, digit, teens, tens, hundreds = ( + _num("zero.tsv"), + _num("digit.tsv"), + _num("teens.tsv"), + _num("tens.tsv"), + _num("hundreds.tsv"), + ) + digits_no_one = (NEMO_DIGIT - "1") @ digit + + graph_tens = teens | (tens + (pynutil.delete("0") | (connector_e + digit))) + self.tens = graph_tens.optimize() + self.two_digit_non_zero = pynini.union(digit, graph_tens, (pynini.cross("0", NEMO_SPACE) + digit)).optimize() + + # After "X00" hundreds (oitocentos, …), suffix "01"-"09" needs leading zero stripped + # (graph_tens has no path for "09"; connector+digit only consumes one digit). + graph_hundreds = hundreds + pynini.union( + pynutil.delete("00"), + (connector_e + graph_tens), + (connector_e + pynutil.delete("0") + digit), + (connector_e + digit), + ) + # "100" -> cem only (cross("1", cento)+delete("00") would also match "100" but + # yields "cento"; OpenFst vs pynini top_rewrite can disagree on ties — Sparrowhawk). + graph_hundreds |= pynini.cross("100", hundred_100) + graph_hundreds |= pynini.cross("1", hundred_1) + pynini.union( + (connector_e + graph_tens), + (connector_e + pynutil.delete("0") + digit), + ) + self.hundreds = graph_hundreds.optimize() + + h_comp_base = pynini.union(graph_hundreds, pynutil.delete("0") + graph_tens) + h_comp = h_comp_base | (pynutil.delete("00") + digit) + h_comp_no_one = h_comp_base | (pynutil.delete("00") + digits_no_one) + + pure_tens_input = pynini.union(*[pynini.accep(str(d * 10)) for d in range(1, 10)]) + graph_pure_tens_only = pure_tens_input @ graph_tens + graph_compound_tens = (pynini.closure(NEMO_DIGIT, 2, 2) - pure_tens_input) @ graph_tens + + graph_pure_components = pynini.union( + pynutil.delete("0") + graph_pure_tens_only, + pynutil.delete("00") + digit, + hundreds + pynutil.delete("00"), + pynini.cross("100", hundred_100), + ) + graph_compound_hundreds = pynini.union( + pynini.cross("1", hundred_1) + + pynini.union( + (connector_e + graph_tens), + (connector_e + pynutil.delete("0") + digit), + ), + hundreds + + pynini.union( + (connector_e + graph_tens), + (connector_e + pynutil.delete("0") + digit), + (connector_e + digit), + ), + ) + + suffix_after_mil = pynini.union( + pynutil.delete("000"), + (connector_e + graph_pure_components), + (insert_space + graph_compound_hundreds), + # Use connector_e so "2024" -> dois mil e vinte e quatro (not dois mil vinte e quatro). + (connector_e + pynutil.delete("0") + graph_compound_tens), + ) + + t_comp = pynini.union( + pynutil.delete("000") + h_comp, + h_comp_no_one + insert_space + pynutil.insert(thousand) + suffix_after_mil, + pynini.cross("001", thousand) + suffix_after_mil, + ) + t_comp_no_one = pynini.union( + pynutil.delete("000") + h_comp_no_one, + h_comp_no_one + + insert_space + + pynutil.insert(thousand) + + ((insert_space + h_comp) | pynutil.delete("000")), + pynini.cross("001", thousand) + ((insert_space + h_comp) | pynutil.delete("000")), + ) + + graph_large_scales = pynini.accep("") + for one_label, plural_suffix, _ in reversed(scales): + g = pynutil.add_weight(pynini.cross("000001", one_label), -0.001) + g |= t_comp_no_one + pynutil.insert(plural_suffix) + g |= pynutil.delete("000000") + g += insert_space + graph_large_scales += g + + # 9/12-digit: scale block + trailing (million+thousands, billion+9digits) + scale_3_mil = self._scale_block_3(scales[0][0], scales[0][1], h_comp_no_one) + scale_3_bi = self._scale_block_3(scales[1][0], scales[1][1], h_comp_no_one) + graph_9 = self._build_scale_trailing_graph(scale_3_mil, t_comp, 6, 9) + graph_12 = self._build_scale_trailing_graph(scale_3_bi, graph_9, 9, 12) + pure_9, pure_12 = self._pure_inputs(9), self._pure_inputs(12) + trail_9 = (pure_9 @ graph_9, (NEMO_DIGIT**9 - pure_9) @ graph_9) + trail_12 = (pure_12 @ graph_12, (NEMO_DIGIT**12 - pure_12) @ graph_12) + + # Units 6 (u6): pure get "e" after scale; compound no "e" + u6_one = pynini.cross("000001", "1") @ digit + u6_pure = pynini.union( + u6_one, + pynini.cross("001000", thousand), + pynini.cross("000010", "10") @ graph_tens, + pynini.cross("000100", hundred_100), + (pynini.cross("010000", "10") @ graph_tens) + insert_space + pynutil.insert(thousand), + pynini.cross("100000", hundred_100) + insert_space + pynutil.insert(thousand), + ) + u6_compound = (NEMO_DIGIT**6 - self._pure_inputs(6)) @ t_comp + u6 = u6_pure | u6_compound + z18 = pynini.accep("0" * 18) # 18 zeros: branch no "e" + smaller_e = (connector_e + u6_pure) | u6_compound | pynutil.delete("0" * 6) + smaller = u6 | pynutil.delete("0" * 6) + graph_24 = (((NEMO_DIGIT**18 - z18) + NEMO_DIGIT**6) @ (graph_large_scales + smaller_e)) | ( + (z18 + NEMO_DIGIT**6) @ (pynutil.delete(z18) + smaller) + ) + + trail_by_z = {9: trail_9, 12: trail_12} + magnitude_patterns = [ + self._build_magnitude_pattern( + one_label, + plural_suffix, + magnitude_zeros, + trail_by_z.get(magnitude_zeros), + connector_e, + insert_space, + digit, + graph_tens, + graph_hundreds, + ) + for one_label, plural_suffix, magnitude_zeros in scales + if magnitude_zeros > 0 + ] + + pad = (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, 0) + pad = pad @ pynini.cdrewrite(pynini.closure(pynutil.insert("0")), "[BOS]", "", NEMO_SIGMA) @ NEMO_DIGIT**24 + norm = pynini.cdrewrite(delete_space, "[BOS]", "", NEMO_SIGMA) @ pynini.cdrewrite( + delete_space, "", "[EOS]", NEMO_SIGMA + ) + norm = norm @ pynini.cdrewrite( + pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 2), NEMO_SPACE), NEMO_ALPHA, NEMO_ALPHA, NEMO_SIGMA + ) + self.graph = reduce(lambda a, b: a | b, magnitude_patterns, pad @ graph_24 @ norm) | zero + self.graph = filter_cardinal_punctuation(self.graph).optimize() + + optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1) + final_graph = optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"") + final_graph = self.add_tokens(final_graph) + self.fst = final_graph.optimize() + + def _scale_block_3(self, one_label, plural_suffix, component_no_one): + """001->one_label, 000->'', else component+plural.""" + return pynini.union( + pynini.cross("001", one_label), + pynini.cross("000", ""), + (NEMO_DIGIT**3 - pynini.accep("001") - pynini.accep("000")) + @ (component_no_one + insert_space + pynutil.insert(plural_suffix)), + ) + + def _build_scale_trailing_graph(self, scale_3, sub_graph, trailing_len, total_len): + """total_len digits = scale_3 + trailing; no trailing space when trailing all zeros.""" + zt, ztotal = "0" * trailing_len, "0" * total_len + scale_nonzero = NEMO_DIGIT**3 - pynini.accep("000") + branches = [ + (pynini.accep("000") + NEMO_DIGIT**trailing_len) @ (pynutil.delete("000") + sub_graph), + (scale_nonzero + (NEMO_DIGIT**trailing_len - pynini.accep(zt))) @ (scale_3 + insert_space + sub_graph), + (scale_nonzero + pynini.accep(zt)) @ (scale_3 + pynutil.delete(zt)), + (pynini.accep("000") + pynini.accep(zt)) @ pynutil.delete(ztotal), + ] + return pynini.union(*branches) + + @staticmethod + def _pure_inputs(num_digits): + """Inputs 1, 10, 100, ... as num_digits-digit strings.""" + return pynini.union(*[pynini.accep(str(10**k).zfill(num_digits)) for k in range(0, num_digits)]) + + def _magnitude_graph( + self, + one_word, + plural_suffix, + zero_count, + graph_digit, + graph_tens, + graph_hundreds, + connector_e, + insert_space, + trailing_pair=None, + ): + """Round (1–3 digit + scale + zeros); optional trailing (e + pure | space + compound).""" + zeros = "0" * zero_count + round_pats = [] + trail_pats = [] if trailing_pair else None + for L in (1, 2, 3): + total = zero_count + L + if L == 1: + lead = pynini.cross("1", one_word) | ((NEMO_DIGIT - "1") @ graph_digit + pynutil.insert(plural_suffix)) + else: + lead = pynini.closure(NEMO_DIGIT, L, L) @ (graph_tens if L == 2 else graph_hundreds) + pynutil.insert( + plural_suffix + ) + lead_fst = NEMO_DIGIT**L @ lead + round_pats.append(pynini.closure(NEMO_DIGIT, total, total) @ (lead_fst + pynutil.delete(zeros))) + if trailing_pair: + pure, compound = trailing_pair + trail_part = NEMO_DIGIT**zero_count @ (connector_e + pure) | NEMO_DIGIT**zero_count @ ( + insert_space + compound + ) + trail_pats.append(pynini.closure(NEMO_DIGIT, total, total) @ (lead_fst + trail_part)) + graph_round = pynini.union(*round_pats) + graph_trail = pynini.union(*trail_pats) if trail_pats else None + return graph_round, graph_trail + + def _build_magnitude_pattern( + self, + one_label, + plural_suffix, + magnitude_zeros, + trailing_pair, + connector_e, + insert_space, + graph_digit, + graph_tens, + graph_hundreds, + ): + """Restrict length; round + optional non-zero trailing.""" + restrict = (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, magnitude_zeros, magnitude_zeros + 2) + graph_round, graph_trail = self._magnitude_graph( + one_label, + plural_suffix, + magnitude_zeros, + graph_digit, + graph_tens, + graph_hundreds, + connector_e, + insert_space, + trailing_pair, + ) + if graph_trail is None: + return pynutil.add_weight(restrict @ graph_round, -1.0) + non_zero_trail = pynini.union( + *[NEMO_DIGIT**n + (NEMO_DIGIT**magnitude_zeros - pynini.accep("0" * magnitude_zeros)) for n in (1, 2, 3)] + ) + return pynutil.add_weight(restrict @ (graph_round | (non_zero_trail @ graph_trail)), -1.0) diff --git a/nemo_text_processing/text_normalization/pt/taggers/date.py b/nemo_text_processing/text_normalization/pt/taggers/date.py new file mode 100644 index 000000000..8f12677ab --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/date.py @@ -0,0 +1,151 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_DIGIT, + NEMO_SIGMA, + GraphFst, + delete_space, + insert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class DateFst(GraphFst): + """ + Finite state transducer for classifying Portuguese (Brazilian) dates, e.g. + 15/03/2024 -> date { day: "quinze" month: "março" year: "dois mil e vinte e quatro" preserve_order: true } + 15 de março de 2024 -> date { day: "quinze" month: "março" year: "dois mil e vinte e quatro" preserve_order: true } + 2024-03-15 -> date { day: "quinze" month: "março" year: "dois mil e vinte e quatro" preserve_order: true } + 03/15/2024 -> date { day: "quinze" month: "março" year: "dois mil e vinte e quatro" preserve_order: true } + """ + + def __init__(self, cardinal: GraphFst, deterministic: bool = True): + super().__init__(name="date", kind="classify", deterministic=deterministic) + numbers = cardinal.graph + + month_rows = load_labels(get_abs_path("data/date/months.tsv")) + month_pairs = [(r[0], r[1]) for r in month_rows if len(r) >= 2] + month_to_word = pynini.string_map(month_pairs).optimize() + + day_10_31 = ((NEMO_DIGIT - "0") + NEMO_DIGIT) @ pynini.union(*[str(x) for x in range(10, 32)]) @ numbers + day_02_09 = pynutil.delete("0") + (pynini.union(*[str(x) for x in range(2, 10)]) @ numbers) + day_2_9 = pynini.union(*[str(x) for x in range(2, 10)]) @ numbers + day_inner = pynini.union( + pynini.cross("01", "primeiro"), + day_10_31, + day_02_09, + day_2_9, + pynini.cross("1", "primeiro"), + ).optimize() + day_part = pynutil.insert('day: "') + day_inner + pynutil.insert('"') + + month_digits = ( + pynini.union("10", "11", "12") + | pynutil.delete("0") + pynini.union(*[str(x) for x in range(1, 10)]) + | pynini.union(*[str(x) for x in range(1, 10)]) + ) + month_num = month_digits @ month_to_word + month_part = pynutil.insert('month: "') + month_num + pynutil.insert('"') + + year_num = ((NEMO_DIGIT - "0") + NEMO_DIGIT**3) @ numbers + year_part = pynutil.insert('year: "') + year_num + pynutil.insert('"') + + preserve = pynutil.insert(" preserve_order: true") + + delete_de = delete_space + pynutil.delete("de") + delete_space + month_names = sorted({r[1] for r in month_rows if len(r) >= 2}, key=len, reverse=True) + text_pairs = [] + for name in month_names: + text_pairs.append((name, name)) + if name and name[0].islower(): + text_pairs.append((name[0].upper() + name[1:], name)) + month_written = pynutil.insert('month: "') + pynini.string_map(text_pairs).optimize() + pynutil.insert('"') + graph_text = day_part + delete_de + month_written + delete_de + year_part + preserve + + sep_path = get_abs_path("data/date/numeric_separators.tsv") + separators = [r[0].strip() for r in load_labels(sep_path) if r and r[0].strip()] + + one_or_two_digits = pynini.closure(NEMO_DIGIT, 1, 2) + year_four = (NEMO_DIGIT - "0") + NEMO_DIGIT**3 + _mdy_weight = 0.05 + + months_spoken = sorted({r[1] for r in month_rows if len(r) >= 2}) + day_spokens = set() + for n in range(1, 32): + for key in (str(n), f"{n:02d}"): + dstr = pynini.shortestpath(pynini.compose(pynini.accep(key), day_inner.optimize())).string() + day_spokens.add(dstr) + + _preserve_tail = " preserve_order: true" + + ymd_to_dmy_graph = None + mdy_to_dmy_graph = None + for month in months_spoken: + for day in day_spokens: + # After year: + sigma (year value + quotes), delete month/day and trailing preserve + # so the input is fully consumed (mdy_to_dmy does not need this: sigma eats the tail). + ymd_curr = ( + pynutil.insert('day: "' + day + '" month: "' + month + '" ') + + pynini.accep("year:") + + NEMO_SIGMA + + pynutil.delete(' month: "' + month + '" day: "' + day + '"' + _preserve_tail) + ) + ymd_to_dmy_graph = ymd_curr if ymd_to_dmy_graph is None else pynini.union(ymd_to_dmy_graph, ymd_curr) + + mdy_curr = ( + pynutil.insert('day: "' + day + '" month: "' + month + '" ') + + pynutil.delete('month: "' + month + '" day: "' + day + '" ') + + pynini.accep("year:") + + NEMO_SIGMA + ) + mdy_to_dmy_graph = mdy_curr if mdy_to_dmy_graph is None else pynini.union(mdy_to_dmy_graph, mdy_curr) + + ymd_to_dmy_graph = ymd_to_dmy_graph.optimize() + mdy_to_dmy_graph = mdy_to_dmy_graph.optimize() + + patterns = [graph_text] + for sep in separators: + sep_accep = pynini.accep(pynini.escape(sep)) + del_sep = pynutil.delete(sep_accep) + + dmy_core = day_part + del_sep + insert_space + month_part + del_sep + insert_space + year_part + preserve + iso_core = year_part + del_sep + insert_space + month_part + del_sep + insert_space + day_part + preserve + mdy_core = month_part + del_sep + insert_space + day_part + del_sep + insert_space + year_part + preserve + + lhs_dmy = one_or_two_digits + sep_accep + one_or_two_digits + sep_accep + year_four + lhs_iso = year_four + sep_accep + one_or_two_digits + sep_accep + one_or_two_digits + lhs_mdy = one_or_two_digits + sep_accep + one_or_two_digits + sep_accep + year_four + + patterns.append(pynini.compose(lhs_dmy, dmy_core)) + patterns.append( + pynutil.add_weight( + pynini.compose( + pynini.compose(lhs_mdy, mdy_core), + mdy_to_dmy_graph, + ), + _mdy_weight, + ) + ) + patterns.append( + pynini.compose( + pynini.compose(lhs_iso, iso_core), + ymd_to_dmy_graph, + ) + ) + + self.fst = self.add_tokens(pynini.union(*patterns).optimize()).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/decimal.py b/nemo_text_processing/text_normalization/pt/taggers/decimal.py new file mode 100644 index 000000000..d9d5d6094 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/decimal.py @@ -0,0 +1,91 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_DIGIT, GraphFst, delete_space, insert_space +from nemo_text_processing.text_normalization.pt.utils import get_abs_path + + +class DecimalFst(GraphFst): + """ + Finite state transducer for classifying Portuguese decimal numbers, e.g. + "1,26" -> decimal { integer_part: "um" fractional_part: "dois seis" } + "0,01" -> decimal { integer_part: "zero" fractional_part: "zero um" } + "-1,26" -> decimal { negative: "true" ... } + "1,33 milhões" / "1 milhão" -> decimal { ... quantity: "milhões" / "milhão" } + + The fractional mantissa (after the comma) is always read digit-by-digit (0–9), + including leading zeros. Integer part and quantities still use cardinals. + + Args: + cardinal: CardinalFst instance for integer verbalization in tags. + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, cardinal: GraphFst, deterministic: bool = True): + super().__init__(name="decimal", kind="classify", deterministic=deterministic) + cardinal_graph = cardinal.graph + _num = lambda name: pynini.string_file(get_abs_path(f"data/numbers/{name}")).optimize() + + comma = pynutil.delete(",") + quantity_words = _num("quantity_words.tsv") + digit = _num("digit.tsv") + zero = _num("zero.tsv") + graph_digit_or_zero = pynini.union(digit, zero) + digit_by_digit = (graph_digit_or_zero + pynini.closure(insert_space + graph_digit_or_zero)).optimize() + + fractional_digits = pynini.closure(NEMO_DIGIT, 1, 15) + graph_fractional = ( + pynutil.insert('fractional_part: "') + (fractional_digits @ digit_by_digit) + pynutil.insert('"') + ) + + non_zero_lead = pynini.difference(NEMO_DIGIT, pynini.accep("0")) + + graph_integer_zero = ( + pynutil.insert('integer_part: "') + pynini.cross("0", "zero") + pynutil.insert('"') + insert_space + ) + decimal_when_zero = graph_integer_zero + comma + insert_space + graph_fractional + + graph_integer_pos = ( + pynutil.insert('integer_part: "') + + (non_zero_lead + pynini.closure(NEMO_DIGIT, 0, 11)) @ cardinal_graph + + pynutil.insert('"') + + insert_space + ) + decimal_when_pos = graph_integer_pos + comma + insert_space + graph_fractional + + decimal_core = pynini.union(decimal_when_zero, decimal_when_pos) + integer_quantity = ( + pynutil.insert('integer_part: "') + + (pynini.closure(NEMO_DIGIT, 1, 12) @ cardinal_graph) + + pynutil.insert('"') + + insert_space + + delete_space + + pynutil.insert('quantity: "') + + quantity_words + + pynutil.insert('"') + ) + decimal_quantity = ( + decimal_core + delete_space + pynutil.insert('quantity: "') + quantity_words + pynutil.insert('"') + ) + final_graph_wo_sign = pynini.union(decimal_core, integer_quantity, decimal_quantity) + self.final_graph_wo_negative = final_graph_wo_sign.optimize() + optional_minus = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", '"true" '), 0, 1) + final_graph = optional_minus + final_graph_wo_sign + + self.fst = self.add_tokens(final_graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/electronic.py b/nemo_text_processing/text_normalization/pt/taggers/electronic.py new file mode 100644 index 000000000..ddd89f4e0 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/electronic.py @@ -0,0 +1,82 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_ALPHA, NEMO_DIGIT, NEMO_SPACE, GraphFst +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class ElectronicFst(GraphFst): + """ + Finite state transducer for classifying electronic strings in pt-BR: + abc@hotmail.com -> electronic { username: "abc" domain: "hotmail.com" preserve_order: true } + https://www.abc.com -> electronic { protocol: "https://www." domain: "abc.com" preserve_order: true } + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="electronic", kind="classify", deterministic=deterministic) + + full_stop = pynini.accep(".") + at_symbol = "@" + protocol_string = "protocol" + domain_string = "domain" + username_string = "username" + http = "http" + https = "https" + www = "www" + + symbols = [x[0] for x in load_labels(get_abs_path("data/electronic/symbols.tsv"))] + symbols = pynini.union(*symbols) + symbols_no_full_stop = pynini.difference(symbols, full_stop) + accepted_characters = pynini.closure((NEMO_ALPHA | NEMO_DIGIT | symbols_no_full_stop), 1) + all_characters = pynini.closure((NEMO_ALPHA | NEMO_DIGIT | symbols), 1) + + domain_component = full_stop + accepted_characters + domain_graph = ( + pynutil.insert(domain_string + ': "') + + (accepted_characters + pynini.closure(domain_component, 1)) + + pynutil.insert('"') + ) + + username = ( + pynutil.insert(username_string + ': "') + + all_characters + + pynutil.insert('"') + + pynini.cross(at_symbol, NEMO_SPACE) + ) + email = username + domain_graph + + social_tag = ( + pynini.cross(at_symbol, "") + + pynutil.insert(username_string + ': "') + + (accepted_characters | (accepted_characters + pynini.closure(domain_component, 1))) + + pynutil.insert('"') + ) + + protocol_start = pynini.accep(https + "://") | pynini.accep(http + "://") + protocol_end = pynini.accep(www + ".") + if not deterministic: + protocol_end |= pynini.cross(www + ".", "dáblio dáblio dáblio.") + + protocol = protocol_start | protocol_end | (protocol_start + protocol_end) + protocol = pynutil.insert(protocol_string + ': "') + protocol + pynutil.insert('"') + url = protocol + pynutil.insert(NEMO_SPACE) + domain_graph + + graph = url | domain_graph | email | social_tag + self.graph = graph + + final_graph = self.add_tokens(self.graph + pynutil.insert(" preserve_order: true")) + self.fst = final_graph.optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/fraction.py b/nemo_text_processing/text_normalization/pt/taggers/fraction.py new file mode 100644 index 000000000..b5a206ff0 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/fraction.py @@ -0,0 +1,117 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_DIGIT, + NEMO_SIGMA, + NEMO_WHITE_SPACE, + GraphFst, + insert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class FractionFst(GraphFst): + """ + Finite state transducer for classifying Portuguese fraction numbers, e.g. + "1/2" -> fraction { numerator: "um" denominator: "meio" morphosyntactic_features: "ordinal" } + "2 3/4" -> fraction { integer_part: "dois" numerator: "três" denominator: "quarto" ... } + "2/11" -> fraction { numerator: "dois" denominator: "onze" morphosyntactic_features: "avos" } + + Args: + cardinal: CardinalFst instance for number parts. + ordinal: OrdinalFst instance for denominator 2-10 and exceptions. + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, cardinal: GraphFst, ordinal: GraphFst, deterministic: bool = True): + super().__init__(name="fraction", kind="classify", deterministic=deterministic) + cardinal_graph = cardinal.graph + + # Denominators 2–10 use ordinal form (no data file: fixed set) + two_to_ten = pynini.union(*[pynini.accep(str(d)) for d in range(2, 11)]).optimize() + + ord_digit_rows = load_labels(get_abs_path("data/ordinals/digit.tsv")) + ordinal_digit = pynini.string_map([(r[1], r[0]) for r in ord_digit_rows if len(r) >= 2]).optimize() + + ord_exc_rows = load_labels(get_abs_path("data/fractions/ordinal_exceptions.tsv")) + ordinal_exceptions = pynini.string_map([(r[0], r[1]) for r in ord_exc_rows if len(r) >= 2]).optimize() + + ord_hundreds_rows = load_labels(get_abs_path("data/ordinals/hundreds.tsv")) + ordinal_hundreds = pynini.string_map([(r[1], r[0]) for r in ord_hundreds_rows if len(r) >= 2]).optimize() + + powers_rows = load_labels(get_abs_path("data/fractions/powers_of_ten.tsv")) + powers_of_ten = pynini.string_map([(r[0], r[1]) for r in powers_rows if len(r) >= 2]).optimize() + + denom_ordinal_form = two_to_ten @ cardinal_graph @ ordinal_digit + denom_ordinal_form = denom_ordinal_form @ pynini.cdrewrite(ordinal_exceptions, "", "", NEMO_SIGMA) + denom_ordinal = ( + pynutil.insert('denominator: "') + + denom_ordinal_form + + pynutil.insert('" morphosyntactic_features: "ordinal"') + ) + + denom_100 = ( + pynutil.insert('denominator: "') + + (pynini.accep("100") @ cardinal_graph @ ordinal_hundreds) + + pynutil.insert('" morphosyntactic_features: "ordinal"') + ) + denom_1000 = ( + pynutil.insert('denominator: "') + + (pynini.accep("1000") @ cardinal_graph @ powers_of_ten) + + pynutil.insert('" morphosyntactic_features: "ordinal"') + ) + + denom_ordinal_2_10_100_1000 = pynini.union(denom_ordinal, denom_100, denom_1000) + digit_plus = pynini.closure(NEMO_DIGIT, 1) + denom_avos_input = pynini.difference( + digit_plus, + pynini.union( + two_to_ten, + pynini.accep("100"), + pynini.accep("1000"), + ), + ) + denom_avos = ( + pynutil.insert('denominator: "') + + (denom_avos_input @ cardinal_graph) + + pynutil.insert('" morphosyntactic_features: "avos"') + ) + + denominator = pynini.union(denom_ordinal_2_10_100_1000, denom_avos) + + # Slash variants: ASCII /, Unicode ⁄ (U+2044), ∕ (U+2215); with or without spaces + slash_or_space_slash = pynini.union( + pynini.cross("/", '" '), + pynini.cross(" / ", '" '), + pynini.cross("\u2044", '" '), # fraction slash ⁄ + pynini.cross(" \u2044 ", '" '), + pynini.cross("\u2215", '" '), # division slash ∕ + pynini.cross(" \u2215 ", '" '), + ) + numerator = pynutil.insert('numerator: "') + cardinal_graph + slash_or_space_slash + fraction_core = numerator + denominator + + integer_part = pynutil.insert('integer_part: "') + cardinal_graph + pynutil.insert('"') + insert_space + + optional_minus = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", '"true" '), 0, 1) + + mixed = integer_part + pynini.closure(NEMO_WHITE_SPACE, 1) + fraction_core + graph = optional_minus + pynini.union(mixed, fraction_core) + + self.fst = self.add_tokens(graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/measure.py b/nemo_text_processing/text_normalization/pt/taggers/measure.py new file mode 100644 index 000000000..0906551d1 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/measure.py @@ -0,0 +1,78 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_SIGMA, + NEMO_SPACE, + GraphFst, + convert_space, + delete_space, + insert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path + + +class MeasureFst(GraphFst): + """ + Finite state transducer for classifying measure (pt-BR), e.g. + 200 g -> measure { cardinal { integer: "duzentos" } units: "gramas" } + 1 kg -> measure { cardinal { integer: "um" } units: "quilo" } + 2,4 g -> measure { decimal { ... } units: "gramas" } + 1/2 l -> measure { fraction { ... } units: "litros" } + + Args: + cardinal: CardinalFst + decimal: DecimalFst + fraction: FractionFst + deterministic: if True will provide a single transduction option, + for False multiple transduction are generated (used for audio-based normalization) + """ + + def __init__(self, cardinal: GraphFst, decimal: GraphFst, fraction: GraphFst, deterministic: bool = True): + super().__init__(name="measure", kind="classify", deterministic=deterministic) + + unit_singular = pynini.string_file(get_abs_path("data/measure/measurements_singular.tsv")) + unit_plural = pynini.string_file(get_abs_path("data/measure/measurements_plural.tsv")) + + graph_unit_singular = convert_space(unit_singular) + graph_unit_plural = convert_space(unit_plural) + + optional_graph_negative = pynini.closure(pynini.accep("-"), 0, 1) + + unit_plural = pynutil.insert('units: "') + graph_unit_plural + pynutil.insert('"') + unit_singular_graph = pynutil.insert('units: "') + graph_unit_singular + pynutil.insert('"') + + subgraph_decimal = decimal.fst + insert_space + pynini.closure(NEMO_SPACE, 0, 1) + unit_plural + + subgraph_cardinal = ( + (optional_graph_negative + (NEMO_SIGMA - "1")) @ cardinal.fst + + insert_space + + pynini.closure(delete_space, 0, 1) + + unit_plural + ) + + subgraph_cardinal |= ( + (optional_graph_negative + pynini.accep("1")) @ cardinal.fst + + insert_space + + pynini.closure(delete_space, 0, 1) + + unit_singular_graph + ) + + subgraph_fraction = fraction.fst + insert_space + pynini.closure(delete_space, 0, 1) + unit_plural + + final_graph = subgraph_decimal | subgraph_cardinal | subgraph_fraction + self.fst = self.add_tokens(final_graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/money.py b/nemo_text_processing/text_normalization/pt/taggers/money.py new file mode 100644 index 000000000..e059891f5 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/money.py @@ -0,0 +1,171 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_ALPHA, + NEMO_DIGIT, + NEMO_SIGMA, + NEMO_SPACE, + GraphFst, + delete_space, + insert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class MoneyFst(GraphFst): + """ + Finite state transducer for classifying money (pt-BR), e.g. + R$ 12 -> money { currency_maj: "reais" integer_part: "doze" } + R$ 12,05 -> money { currency_maj: "reais" integer_part: "doze" fractional_part: "cinco" + currency_min: "centavos" preserve_order: true } + R$ 0,20 -> money { fractional_part: "vinte" currency_min: "centavos" preserve_order: true } + + Args: + cardinal: CardinalFst + decimal: DecimalFst + deterministic: if True will provide a single transduction option, + for False multiple transduction are generated (used for audio-based normalization) + """ + + def __init__(self, cardinal: GraphFst, decimal: GraphFst, deterministic: bool = True): + super().__init__(name="money", kind="classify", deterministic=deterministic) + + decimal_separator = pynini.accep(",") + maj_singular = pynini.string_file(get_abs_path("data/money/currency_major.tsv")) + maj_plural_map = pynini.string_file(get_abs_path("data/money/currency_major_plural.tsv")) + maj_plural_graph = maj_singular @ maj_plural_map + min_singular = pynini.string_file(get_abs_path("data/money/currency_minor.tsv")) + min_plural_map = pynini.string_file(get_abs_path("data/money/currency_minor_plural.tsv")) + min_plural_graph = min_singular @ min_plural_map + + cardinal_graph = cardinal.graph + graph_decimal_final = decimal.final_graph_wo_negative + + graph_maj_singular = pynutil.insert('currency_maj: "') + maj_singular + pynutil.insert('"') + graph_maj_plural = pynutil.insert('currency_maj: "') + maj_plural_graph + pynutil.insert('"') + + graph_integer_one = ( + pynutil.insert('integer_part: "') + (pynini.accep("1") @ cardinal_graph) + pynutil.insert('"') + ) + + decimal_with_quantity = (NEMO_SIGMA + NEMO_ALPHA) @ graph_decimal_final + + graph_decimal_plural = pynini.union( + graph_maj_plural + pynini.closure(delete_space, 0, 1) + insert_space + graph_decimal_final, + graph_decimal_final + pynini.closure(delete_space, 0, 1) + insert_space + graph_maj_plural, + ) + graph_decimal_plural = ((NEMO_SIGMA - "1") + decimal_separator + NEMO_SIGMA) @ graph_decimal_plural + + graph_decimal_singular = pynini.union( + graph_maj_singular + pynini.closure(delete_space, 0, 1) + insert_space + graph_decimal_final, + graph_decimal_final + pynini.closure(delete_space, 0, 1) + insert_space + graph_maj_singular, + ) + graph_decimal_singular = (pynini.accep("1") + decimal_separator + NEMO_SIGMA) @ graph_decimal_singular + + graph_decimal = pynini.union( + graph_decimal_singular, + graph_decimal_plural, + graph_maj_plural + pynini.closure(delete_space, 0, 1) + insert_space + decimal_with_quantity, + ) + + graph_integer = pynutil.insert('integer_part: "') + ((NEMO_SIGMA - "1") @ cardinal_graph) + pynutil.insert('"') + + graph_integer_only = pynini.union( + graph_maj_singular + pynini.closure(delete_space, 0, 1) + insert_space + graph_integer_one, + graph_integer_one + pynini.closure(delete_space, 0, 1) + insert_space + graph_maj_singular, + ) + graph_integer_only |= pynini.union( + graph_maj_plural + pynini.closure(delete_space, 0, 1) + insert_space + graph_integer, + graph_integer + pynini.closure(delete_space, 0, 1) + insert_space + graph_maj_plural, + ) + + graph = graph_integer_only | graph_decimal + + two_digits_fractional_part = ( + pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(pynutil.delete("0")) + ) @ ( + (pynutil.delete("0") + (NEMO_DIGIT - "0")) + | ((NEMO_DIGIT - "0") + pynutil.insert("0")) + | ((NEMO_DIGIT - "0") + NEMO_DIGIT) + ) + + graph_min_singular = pynutil.insert('currency_min: "') + min_singular + pynutil.insert('"') + graph_min_plural = pynutil.insert('currency_min: "') + min_plural_graph + pynutil.insert('"') + + maj_singular_labels = load_labels(get_abs_path("data/money/currency_major.tsv")) + decimal_graph_with_minor = None + for curr_symbol, _ in maj_singular_labels: + preserve_order = pynutil.insert(" preserve_order: true") + + integer_plus_maj = pynini.union( + graph_integer + insert_space + pynutil.insert(curr_symbol) @ graph_maj_plural, + graph_integer_one + insert_space + pynutil.insert(curr_symbol) @ graph_maj_singular, + ) + integer_plus_maj = (pynini.closure(NEMO_DIGIT) - "0") @ integer_plus_maj + + graph_fractional_one = ( + pynutil.insert('fractional_part: "') + + (two_digits_fractional_part @ pynini.cross("1", "um")) + + pynutil.insert('"') + ) + + graph_fractional = ( + two_digits_fractional_part @ (pynini.closure(NEMO_DIGIT, 1, 2) - "1") @ cardinal.two_digit_non_zero + ) + graph_fractional = pynutil.insert('fractional_part: "') + graph_fractional + pynutil.insert('"') + + fractional_plus_min = pynini.union( + graph_fractional + insert_space + pynutil.insert(curr_symbol) @ graph_min_plural, + graph_fractional_one + insert_space + pynutil.insert(curr_symbol) @ graph_min_singular, + ) + + decimal_graph_with_minor_curr = ( + integer_plus_maj + pynini.cross(decimal_separator, NEMO_SPACE) + fractional_plus_min + ) + if not deterministic: + decimal_graph_with_minor_curr |= pynutil.add_weight( + integer_plus_maj + + pynini.cross(decimal_separator, NEMO_SPACE) + + pynutil.insert('fractional_part: "') + + two_digits_fractional_part @ cardinal.two_digit_non_zero + + pynutil.insert('"'), + weight=0.0001, + ) + + decimal_graph_with_minor_curr |= pynutil.delete("0,") + fractional_plus_min + decimal_graph_with_minor_curr = pynini.union( + pynutil.delete(curr_symbol) + + pynini.closure(delete_space, 0, 1) + + decimal_graph_with_minor_curr + + preserve_order, + decimal_graph_with_minor_curr + + preserve_order + + pynini.closure(delete_space, 0, 1) + + pynutil.delete(curr_symbol), + ) + + decimal_graph_with_minor = ( + decimal_graph_with_minor_curr + if decimal_graph_with_minor is None + else pynini.union(decimal_graph_with_minor, decimal_graph_with_minor_curr) + ) + + final_graph = graph | pynutil.add_weight(decimal_graph_with_minor, -0.001) + + final_graph = self.add_tokens(final_graph) + self.fst = final_graph.optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/ordinal.py b/nemo_text_processing/text_normalization/pt/taggers/ordinal.py new file mode 100644 index 000000000..e5ab95fa5 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/ordinal.py @@ -0,0 +1,83 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_DIGIT, NEMO_SPACE, GraphFst +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class OrdinalFst(GraphFst): + """ + Finite state transducer for classifying Portuguese ordinals, e.g. + "1º" / "1ª" -> ordinal { integer: "primeiro" / "primeira" morphosyntactic_features: "gender_masc" / "gender_fem" } + "21º" -> ordinal { integer: "vigésimo primeiro" morphosyntactic_features: "gender_masc" } + + Args: + cardinal: CardinalFst instance for composing compound ordinals. + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, cardinal: GraphFst, deterministic: bool = True): + super().__init__(name="ordinal", kind="classify", deterministic=deterministic) + cardinal_graph = cardinal.graph + + spec_rows = load_labels(get_abs_path("data/ordinals/specials.tsv")) + spec = {r[0]: r[1] for r in spec_rows if len(r) >= 2} + conn_in = spec.get("connector_in", " e ") + conn_out = spec.get("connector_out", NEMO_SPACE) + conn = pynini.cross(conn_in, conn_out) + + # Data: ordinal \t cardinal → FST cardinal→ordinal via load_labels + digit_rows = load_labels(get_abs_path("data/ordinals/digit.tsv")) + graph_digit = pynini.string_map([(r[1], r[0]) for r in digit_rows if len(r) >= 2]).optimize() + teen_rows = load_labels(get_abs_path("data/ordinals/teen.tsv")) + graph_teens = pynini.string_map([(r[1], r[0]) for r in teen_rows if len(r) >= 2]).optimize() + ties_rows = load_labels(get_abs_path("data/ordinals/ties.tsv")) + graph_ties = pynini.string_map([(r[1], r[0]) for r in ties_rows if len(r) >= 2]).optimize() + hundreds_rows = load_labels(get_abs_path("data/ordinals/hundreds.tsv")) + graph_hundreds = pynini.string_map([(r[1], r[0]) for r in hundreds_rows if len(r) >= 2]).optimize() + + graph_tens = pynini.union( + graph_teens, + graph_ties + pynini.closure(conn + graph_digit, 0, 1), + ) + graph_hundred_component = pynini.union( + graph_hundreds + pynini.closure(conn + pynini.union(graph_tens, graph_digit), 0, 1), + graph_tens, + graph_digit, + ) + ordinal_rewrite = graph_hundred_component.optimize() + ordinal_inner = cardinal_graph @ ordinal_rewrite + + opt_dot = pynini.closure(pynutil.delete("."), 0, 1) + suffix_masc = opt_dot + pynutil.delete(pynini.union("º", "°")) + suffix_fem = opt_dot + pynutil.delete("ª") + digit_block = pynini.closure(NEMO_DIGIT, 1, 3) + + to_ordinal_masc = (digit_block + suffix_masc) @ ordinal_inner + to_ordinal_fem = (digit_block + suffix_fem) @ ordinal_inner + + graph_masc = ( + pynutil.insert('integer: "') + + to_ordinal_masc + + pynutil.insert('" morphosyntactic_features: "gender_masc"') + ) + graph_fem = ( + pynutil.insert('integer: "') + to_ordinal_fem + pynutil.insert('" morphosyntactic_features: "gender_fem"') + ) + self.fst = self.add_tokens(pynini.union(graph_masc, graph_fem)).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/punctuation.py b/nemo_text_processing/text_normalization/pt/taggers/punctuation.py new file mode 100644 index 000000000..f42a8815f --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/punctuation.py @@ -0,0 +1,63 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from unicodedata import category + +import pynini +from pynini.examples import plurals +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_NOT_SPACE, NEMO_SIGMA, GraphFst +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class PunctuationFst(GraphFst): + """ + Finite state transducer for classifying punctuation (pt-BR pipeline). + Mirrors the English punctuation tagger; whitelist symbols are excluded via data/whitelist/symbol.tsv. + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="punctuation", kind="classify", deterministic=deterministic) + s = "!#%&\'()*+,-./:;<=>?@^_`{|}~\"" + + punct_symbols_to_exclude = ["[", "]"] + punct_unicode = [ + chr(i) + for i in range(sys.maxunicode) + if category(chr(i)).startswith("P") and chr(i) not in punct_symbols_to_exclude + ] + [r"\[", r"\]"] + + symbol_path = get_abs_path("data/whitelist/symbol.tsv") + whitelist_rows = load_labels(symbol_path) if os.path.isfile(symbol_path) else [] + whitelist_symbols = [x[0] for x in whitelist_rows if x] + self.punct_marks = [p for p in punct_unicode + list(s) if p not in whitelist_symbols] + + punct = pynini.union(*self.punct_marks) + punct = pynini.closure(punct, 1) + + emphasis = ( + pynini.accep("<") + + ( + (pynini.closure(NEMO_NOT_SPACE - pynini.union("<", ">"), 1) + pynini.closure(pynini.accep("/"), 0, 1)) + | (pynini.accep("/") + pynini.closure(NEMO_NOT_SPACE - pynini.union("<", ">"), 1)) + ) + + pynini.accep(">") + ) + punct = plurals._priority_union(emphasis, punct, NEMO_SIGMA) + + self.graph = punct + self.fst = (pynutil.insert("name: \"") + self.graph + pynutil.insert("\"")).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/telephone.py b/nemo_text_processing/text_normalization/pt/taggers/telephone.py new file mode 100644 index 000000000..a24e30c67 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/telephone.py @@ -0,0 +1,119 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_SPACE, NEMO_WHITE_SPACE, GraphFst, insert_space +from nemo_text_processing.text_normalization.pt.utils import get_abs_path + + +class TelephoneFst(GraphFst): + """ + Finite state transducer for classifying pt-BR telephone and IP formats, e.g. + (11) 99999-8888 -> telephone { number_part: "um um nove nove nove nove nove oito oito oito oito" } + +55 11 3333-4444 -> telephone { country_code: "mais cinco cinco" number_part: "um um três três três três quatro quatro quatro quatro" } + 192.168.1.1 -> telephone { number_part: "um nove dois ponto um seis oito ponto um ponto um" } + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="telephone", kind="classify", deterministic=deterministic) + + digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv")).optimize() + zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv")).optimize() + single_digits = (digit | zero).optimize() + + # Only strip grouping punctuation between digit blocks; do not delete spaces + # (deleting spaces would glue spoken digit words together in the output). + separators = pynini.union("-", ".") + # Optional separator after country / prompt (still allow +55 11 …). + delete_optional_sep = pynini.closure(pynutil.delete(separators), 0, 1) + # Require an explicit separator between local digit blocks so plain long cardinals + # (e.g. 3022110709) are not misclassified as telephone. + delete_required_separator = pynutil.delete(separators) + delete_optional_spaces = pynini.closure(pynutil.delete(NEMO_WHITE_SPACE), 0) + + def n_digits(n: int): + return pynini.closure(single_digits + insert_space, n - 1, n - 1) + single_digits + + country_digits = n_digits(1) | n_digits(2) | n_digits(3) + country_code = pynini.cross("+", "mais ") + country_digits + + ip_prompts = pynini.string_file(get_abs_path("data/telephone/ip_prompt.tsv")) + telephone_prompts = pynini.string_file(get_abs_path("data/telephone/telephone_prompt.tsv")) + tel_prompt_sequence = telephone_prompts + NEMO_SPACE + pynini.closure(country_code, 0, 1) + + country_code_graph = ( + pynutil.insert('country_code: "') + + (country_code | ip_prompts | tel_prompt_sequence) + + delete_optional_sep + + pynutil.insert('"') + ) + + area_code = (pynutil.delete("(") + n_digits(2) + pynutil.delete(")")) | n_digits(2) + + eleven_digit_graph = ( + area_code + + delete_optional_spaces + + insert_space + + n_digits(5) + + delete_required_separator + + insert_space + + n_digits(4) + ) + ten_digit_graph = ( + area_code + + delete_optional_spaces + + insert_space + + n_digits(4) + + delete_required_separator + + insert_space + + n_digits(4) + ) + nine_digit_graph = n_digits(5) + delete_required_separator + insert_space + n_digits(4) + eight_digit_graph = n_digits(4) + delete_required_separator + insert_space + n_digits(4) + seven_digit_graph = n_digits(3) + delete_required_separator + insert_space + n_digits(4) + + digit_to_str_graph = single_digits + pynini.closure(pynutil.insert(" ") + single_digits, 0, 2) + ip_graph = digit_to_str_graph + (pynini.cross(".", " ponto ") + digit_to_str_graph) ** 3 + + number_part = ( + eleven_digit_graph + | ten_digit_graph + | nine_digit_graph + | eight_digit_graph + | seven_digit_graph + | pynutil.add_weight(ip_graph, 0.01) + ) + number_part = pynutil.insert('number_part: "') + number_part + pynutil.insert('"') + + # "ramal" -> spoken "ramal …"; "extensão" / "ext." -> spoken "extensão …" (not "ext." letter-by-letter). + ext_core = n_digits(1) + pynini.closure(insert_space + n_digits(1), 0, 3) + extension_intro = delete_optional_spaces + ( + (pynutil.delete("ramal") + delete_optional_spaces + pynutil.insert("ramal ")) + | ( + (pynutil.delete("extensão") | pynutil.delete("ext.")) + + delete_optional_spaces + + pynutil.insert("extensão ") + ) + ) + ext_graph = pynutil.insert('extension: "') + extension_intro + ext_core + pynutil.insert('"') + + graph = ( + pynini.closure(country_code_graph + delete_optional_spaces + insert_space, 0, 1) + + number_part + + pynini.closure(delete_optional_spaces + insert_space + ext_graph, 0, 1) + ) + + self.fst = self.add_tokens(graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/time.py b/nemo_text_processing/text_normalization/pt/taggers/time.py new file mode 100644 index 000000000..4e2f84165 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/time.py @@ -0,0 +1,180 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import GraphFst, delete_space, insert_space +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class TimeFst(GraphFst): + """ + Finite state transducer for classifying Portuguese (Brazilian) time, e.g. + 14:30 -> time { hours: "catorze" minutes: "trinta" preserve_order: true } + 14:30:05 -> time { hours: "catorze" minutes: "trinta" seconds: "cinco" preserve_order: true } + 09:00:31 -> time { hours: "nove" minutes: "zero" seconds: "trinta e um" preserve_order: true } + 12:00 -> time { hours: "doze" preserve_order: true } + 11:00 da manhã -> time { hours: "onze" suffix: "da manhã" preserve_order: true } + 16:00 da tarde -> time { hours: "quatro" suffix: "da tarde" preserve_order: true } + 23:18 da tarde -> time { hours: "vinte e três" ... suffix: "da tarde" preserve_order: true } + + Args: + cardinal: CardinalFst + deterministic: if True will provide a single transduction option, + for False multiple transduction are generated (used for audio-based normalization) + """ + + def __init__(self, cardinal: GraphFst, deterministic: bool = True): + super().__init__(name="time", kind="classify", deterministic=deterministic) + cardinal_graph = cardinal.graph.optimize() + + hour_words = [] + for h in range(24): + key = str(h) + comp = pynini.compose(pynini.accep(key), cardinal_graph).optimize() + hour_words.append(pynini.shortestpath(comp, nshortest=1, unique=True).string()) + + hour_delete_fsts = [] + for h in range(24): + if h < 10: + hour_delete_fsts.append(pynini.union(pynutil.delete(str(h)), pynutil.delete(f"0{h}")).optimize()) + else: + hour_delete_fsts.append(pynutil.delete(str(h))) + + labels_minute_single = [str(x) for x in range(1, 10)] + labels_minute_double = [str(x) for x in range(10, 60)] + + graph_minute_single = pynini.union(*labels_minute_single) @ cardinal_graph + graph_minute_double = pynini.union(*labels_minute_double) @ cardinal_graph + final_graph_minute = ( + pynutil.insert('minutes: "') + + (pynutil.delete("0") + graph_minute_single | graph_minute_double) + + pynutil.insert('"') + ) + + final_graph_second = ( + pynutil.insert('seconds: "') + + (pynutil.delete("0") + graph_minute_single | graph_minute_double) + + pynutil.insert('"') + ) + + # HMS verbalizer always expects ``minutes`` and ``seconds`` tags; bare ``delete("00")`` omits them. + zero_word = hour_words[0] + minutes_zero = ( + pynutil.delete("00") + pynutil.insert('minutes: "') + pynutil.insert(zero_word) + pynutil.insert('"') + ) + seconds_zero = ( + pynutil.delete("00") + pynutil.insert('seconds: "') + pynutil.insert(zero_word) + pynutil.insert('"') + ) + + delete_h = pynini.union( + pynutil.delete(pynini.accep(pynini.escape("h"))), + pynutil.delete(pynini.accep(pynini.escape("H"))), + ) + + time_delim = pynini.union( + pynini.accep(pynini.escape(":")), + pynini.accep(pynini.escape(".")), + ) + + period_rows = load_labels(get_abs_path("data/time/day_period_suffix.tsv")) + period_meta = [] + for row in period_rows: + if len(row) < 2 or not row[0].strip(): + continue + tail, tag_val = row[0].strip(), row[1].strip() + if len(row) < 4 or not row[2].strip().isdigit() or not row[3].strip().isdigit(): + raise ValueError( + f"day_period_suffix.tsv row must have 4 columns (tail, tag, hour_min, hour_max): {row!r}" + ) + h0, h1 = int(row[2].strip()), int(row[3].strip()) + allowed = frozenset(range(h0, h1 + 1)) + suf_fst = insert_space + delete_space + pynutil.delete("da") + delete_space + pynutil.delete(tail) + period_meta.append((tag_val, allowed, suf_fst, tail)) + + preserve = pynutil.insert(" preserve_order: true") + + mid_hm = pynutil.delete(time_delim) + (pynutil.delete("00") | insert_space + final_graph_minute) + mid_h_minute = delete_h + (pynutil.delete("00") | insert_space + final_graph_minute) + mid_h_only = delete_h + mid_hms = ( + pynutil.delete(time_delim) + + (minutes_zero | insert_space + final_graph_minute) + + pynutil.delete(time_delim) + + (seconds_zero | insert_space + final_graph_second) + ) + + graph_chunks = [] + for mid_after_hour in (mid_hm, mid_h_minute, mid_h_only, mid_hms): + branches = [] + for h in range(24): + hd = hour_delete_fsts[h] + hw24 = hour_words[h] + hour_tok_24 = pynutil.insert('hours: "') + pynutil.insert(hw24) + pynutil.insert('"') + branches.append(hd + hour_tok_24 + mid_after_hour + preserve) + for tag_val, allowed, suf, tail in period_meta: + keep_suffix, hour_idx = TimeFst._resolve_suffix_hour(h, tail, allowed) + hw_suf = hour_words[hour_idx] + hour_tok_suf = pynutil.insert('hours: "') + pynutil.insert(hw_suf) + pynutil.insert('"') + if keep_suffix: + branches.append( + hd + + hour_tok_suf + + mid_after_hour + + suf + + pynutil.insert(f' suffix: "{tag_val}"') + + preserve + ) + else: + # User wrote a period: always emit ``suffix:`` so TN does not drop it from speech + # (hours stay 24h when the period does not match the clock policy). + branches.append( + hd + + hour_tok_24 + + mid_after_hour + + suf + + pynutil.insert(f' suffix: "{tag_val}"') + + preserve + ) + graph_chunks.append(pynini.union(*branches).optimize()) + + final_graph = pynini.union(*graph_chunks).optimize() + self.fst = self.add_tokens(final_graph).optimize() + + @staticmethod + def _resolve_suffix_hour(h: int, period_tail: str, allowed: frozenset) -> tuple[bool, int]: + """Return (keep_suffix, hour_index) for ``hour_words[hour_index]`` when a day-period applies.""" + if period_tail == "manhã": + allowed_m = allowed | frozenset({1, 2, 3, 4, 5}) + if h not in allowed_m: + return False, h + return True, h + if period_tail == "tarde": + if h in allowed: + return True, 12 if h == 12 else h - 12 + if 1 <= h <= 5 and (h + 12) in allowed: + return True, h + return False, h + if period_tail == "noite": + if h in allowed: + return True, h - 12 + if 6 <= h <= 11 and (h + 12) in allowed: + return True, h + return False, h + if period_tail == "madrugada": + if h in allowed: + return True, h + return False, h + return False, h diff --git a/nemo_text_processing/text_normalization/pt/taggers/tokenize_and_classify.py b/nemo_text_processing/text_normalization/pt/taggers/tokenize_and_classify.py new file mode 100644 index 000000000..61f1ee161 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/tokenize_and_classify.py @@ -0,0 +1,135 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_WHITE_SPACE, + GraphFst, + delete_extra_space, + delete_space, + generator_main, +) +from nemo_text_processing.text_normalization.pt.taggers.cardinal import CardinalFst +from nemo_text_processing.text_normalization.pt.taggers.date import DateFst +from nemo_text_processing.text_normalization.pt.taggers.decimal import DecimalFst +from nemo_text_processing.text_normalization.pt.taggers.electronic import ElectronicFst +from nemo_text_processing.text_normalization.pt.taggers.fraction import FractionFst +from nemo_text_processing.text_normalization.pt.taggers.measure import MeasureFst +from nemo_text_processing.text_normalization.pt.taggers.money import MoneyFst +from nemo_text_processing.text_normalization.pt.taggers.ordinal import OrdinalFst +from nemo_text_processing.text_normalization.pt.taggers.punctuation import PunctuationFst +from nemo_text_processing.text_normalization.pt.taggers.telephone import TelephoneFst +from nemo_text_processing.text_normalization.pt.taggers.time import TimeFst +from nemo_text_processing.text_normalization.pt.taggers.whitelist import WhiteListFst +from nemo_text_processing.text_normalization.pt.taggers.word import WordFst +from nemo_text_processing.utils.logging import logger + + +class ClassifyFst(GraphFst): + """ + Final class that composes all Portuguese classification grammars. This class can process an entire sentence (lower cased). + For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File. + More details to deployment at NeMo/tools/text_processing_deployment. + + Args: + input_case: accepting either "lower_cased" or "cased" input. + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache. + overwrite_cache: set to True to overwrite .far files. + whitelist: path to a file with whitelist replacements. + """ + + def __init__( + self, + input_case: str, + deterministic: bool = False, + cache_dir: str = None, + overwrite_cache: bool = False, + whitelist: str = None, + ): + super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic) + far_file = None + if cache_dir is not None and cache_dir != "None": + os.makedirs(cache_dir, exist_ok=True) + whitelist_file = os.path.basename(whitelist) if whitelist else "" + far_file = os.path.join( + cache_dir, + f"_{input_case}_pt_tn_{deterministic}_deterministic{whitelist_file}.far", + ) + if not overwrite_cache and far_file and os.path.exists(far_file): + self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"] + logger.info(f"ClassifyFst.fst was restored from {far_file}.") + else: + logger.info(f"Creating ClassifyFst grammars. This might take some time...") + + # Initialize Portuguese taggers + cardinal = CardinalFst(deterministic=deterministic) + ordinal = OrdinalFst(cardinal, deterministic=deterministic) + fraction = FractionFst(cardinal, ordinal, deterministic=deterministic) + decimal = DecimalFst(cardinal, deterministic=deterministic) + measure = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction, deterministic=deterministic) + money = MoneyFst(cardinal=cardinal, decimal=decimal, deterministic=deterministic) + date = DateFst(cardinal, deterministic=deterministic) + time = TimeFst(cardinal, deterministic=deterministic) + telephone = TelephoneFst(deterministic=deterministic) + electronic = ElectronicFst(deterministic=deterministic) + + punctuation = PunctuationFst(deterministic=deterministic) + word_graph = WordFst(punctuation=punctuation, deterministic=deterministic).fst + whitelist = WhiteListFst(input_case=input_case, deterministic=deterministic, input_file=whitelist) + + classify = ( + pynutil.add_weight(whitelist.fst, 1.01) + | pynutil.add_weight(date.fst, 1.1) + | pynutil.add_weight(time.fst, 1.1) + | pynutil.add_weight(measure.fst, 1.1) + | pynutil.add_weight(fraction.fst, 1.1) + | pynutil.add_weight(decimal.fst, 1.1) + | pynutil.add_weight(ordinal.fst, 1.1) + | pynutil.add_weight(cardinal.fst, 1.1) + | pynutil.add_weight(money.fst, 1.1) + | pynutil.add_weight(telephone.fst, 1.11) + | pynutil.add_weight(electronic.fst, 1.11) + | pynutil.add_weight(word_graph, 100) + ) + + # Wrap tokens properly + token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }") + punct_graph = ( + pynutil.insert("tokens { ") + pynutil.add_weight(punctuation.fst, weight=2.1) + pynutil.insert(" }") + ) + + # Simple graph structure + graph = token + pynini.closure( + pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space) + token + ) + + # Allow punctuation + graph |= punct_graph + + self.fst = delete_space + graph + delete_space + + if far_file: + generator_main(far_file, {"tokenize_and_classify": self.fst}) + logger.info(f"ClassifyFst grammars are saved to {far_file}.") + + +if __name__ == "__main__": + ClassifyFst(input_case="cased", deterministic=False) diff --git a/nemo_text_processing/text_normalization/pt/taggers/whitelist.py b/nemo_text_processing/text_normalization/pt/taggers/whitelist.py new file mode 100644 index 000000000..784825165 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/whitelist.py @@ -0,0 +1,111 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.en.utils import augment_labels_with_punct_at_end +from nemo_text_processing.text_normalization.pt.graph_utils import ( + INPUT_CASED, + INPUT_LOWER_CASED, + NEMO_SIGMA, + NEMO_UPPER, + GraphFst, + convert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +def _empty_fst() -> "pynini.FstLike": + """FST that accepts nothing (no whitelist rows).""" + return pynini.intersect(pynini.accep("a"), pynini.accep("b")).optimize() + + +def get_formats(input_f, input_case=INPUT_CASED, is_default=True): + """Abbreviation format variants (same idea as EN whitelist).""" + multiple_formats = load_labels(input_f) + if not multiple_formats: + return _empty_fst() + additional_options = [] + for x, y in multiple_formats: + if input_case == INPUT_LOWER_CASED: + x = x.lower() + additional_options.append((f"{x}.", y)) + additional_options.append((f"{x[0].upper() + x[1:]}", f"{y[0].upper() + y[1:]}")) + additional_options.append((f"{x[0].upper() + x[1:]}.", f"{y[0].upper() + y[1:]}")) + multiple_formats.extend(additional_options) + + if not is_default: + multiple_formats = [(x, f"|raw_start|{x}|raw_end||norm_start|{y}|norm_end|") for (x, y) in multiple_formats] + + return pynini.string_map(multiple_formats) + + +class WhiteListFst(GraphFst): + """ + Whitelist classifier for pt-BR TN. Data lives under pt/data/whitelist/ (may be empty). + """ + + def __init__(self, input_case: str, deterministic: bool = True, input_file: str = None): + super().__init__(name="whitelist", kind="classify", deterministic=deterministic) + + def _get_whitelist_graph(input_case, file, keep_punct_add_end: bool = False): + whitelist = load_labels(file) if os.path.isfile(file) else [] + if not whitelist: + return _empty_fst() + if input_case == INPUT_LOWER_CASED: + whitelist = [[x.lower(), y] for x, y in whitelist] + else: + whitelist = [[x, y] for x, y in whitelist] + + if keep_punct_add_end: + whitelist.extend(augment_labels_with_punct_at_end(whitelist)) + + return pynini.string_map(whitelist) + + graph = _get_whitelist_graph(input_case, get_abs_path("data/whitelist/tts.tsv")) + + symbol_path = get_abs_path("data/whitelist/symbol.tsv") + if os.path.isfile(symbol_path) and load_labels(symbol_path): + graph |= pynini.compose( + pynini.difference(NEMO_SIGMA, pynini.accep("/")).optimize(), + _get_whitelist_graph(input_case, symbol_path), + ).optimize() + + for x in [".", ". "]: + graph |= ( + NEMO_UPPER + + pynini.closure(pynutil.delete(x) + NEMO_UPPER, 2) + + pynini.closure(pynutil.delete("."), 0, 1) + ) + + if not deterministic: + alt_path = get_abs_path("data/whitelist/alternatives.tsv") + if os.path.isfile(alt_path) and load_labels(alt_path): + graph |= _get_whitelist_graph(input_case, alt_path, keep_punct_add_end=True) + fmt_path = get_abs_path("data/whitelist/alternatives_all_format.tsv") + if os.path.isfile(fmt_path) and load_labels(fmt_path): + graph |= get_formats(fmt_path, input_case=input_case) + + if input_file: + whitelist_provided = _get_whitelist_graph(input_case, input_file) + if not deterministic: + graph |= whitelist_provided + else: + graph = whitelist_provided + + self.graph = convert_space(graph).optimize() + self.fst = (pynutil.insert("name: \"") + self.graph + pynutil.insert("\"")).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/word.py b/nemo_text_processing/text_normalization/pt/taggers/word.py new file mode 100644 index 000000000..01dcc389b --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/word.py @@ -0,0 +1,81 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.examples import plurals +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + MIN_NEG_WEIGHT, + NEMO_ALPHA, + NEMO_DIGIT, + NEMO_NOT_SPACE, + NEMO_SIGMA, + GraphFst, + convert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path + + +class WordFst(GraphFst): + """ + Finite state transducer for classifying words (pt-BR pipeline). + Same structure as the English word tagger; uses PT data paths and the passed PunctuationFst. + """ + + def __init__(self, punctuation: GraphFst, deterministic: bool = True): + super().__init__(name="word", kind="classify", deterministic=deterministic) + + punct = punctuation.graph + default_graph = pynini.closure(pynini.difference(NEMO_NOT_SPACE, punct.project("input")), 1) + symbols_to_exclude = (pynini.union("$", "€", "₩", "£", "¥", "#", "%") | NEMO_DIGIT).optimize() + graph = pynini.closure(pynini.difference(NEMO_NOT_SPACE, symbols_to_exclude), 1) + graph = pynutil.add_weight(graph, MIN_NEG_WEIGHT) | default_graph + + phoneme_unit = pynini.closure(NEMO_ALPHA, 1) + pynini.closure(NEMO_DIGIT) + phoneme = ( + pynini.accep(pynini.escape("[")) + + pynini.closure(phoneme_unit + pynini.accep(" ")) + + phoneme_unit + + pynini.accep(pynini.escape("]")) + ) + + punct_marks = pynini.union(*punctuation.punct_marks).optimize() + stress = pynini.union("ˈ", "'", "ˌ") + ipa_phoneme_unit = pynini.string_file(get_abs_path("data/whitelist/ipa_symbols.tsv")) + ipa_phonemes = ( + pynini.closure(stress, 0, 1) + + pynini.closure(ipa_phoneme_unit, 1) + + pynini.closure(stress | ipa_phoneme_unit) + ) + delim = (punct_marks | pynini.accep(" ")) ** (1, ...) + ipa_phonemes = ipa_phonemes + pynini.closure(delim + ipa_phonemes) + pynini.closure(delim, 0, 1) + ipa_phonemes = (pynini.accep(pynini.escape("[")) + ipa_phonemes + pynini.accep(pynini.escape("]"))).optimize() + + if not deterministic: + phoneme = ( + pynini.accep(pynini.escape("[")) + + pynini.closure(pynini.accep(" "), 0, 1) + + pynini.closure(phoneme_unit + pynini.accep(" ")) + + phoneme_unit + + pynini.closure(pynini.accep(" "), 0, 1) + + pynini.accep(pynini.escape("]")) + ).optimize() + ipa_phonemes = ( + pynini.accep(pynini.escape("[")) + ipa_phonemes + pynini.accep(pynini.escape("]")) + ).optimize() + + phoneme |= ipa_phonemes + self.graph = plurals._priority_union(convert_space(phoneme.optimize()), graph, NEMO_SIGMA) + self.fst = (pynutil.insert("name: \"") + self.graph + pynutil.insert("\"")).optimize() diff --git a/nemo_text_processing/text_normalization/pt/utils.py b/nemo_text_processing/text_normalization/pt/utils.py new file mode 100644 index 000000000..da4be3f89 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/utils.py @@ -0,0 +1,49 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Portuguese (PT) text normalization utilities. + +Provides get_abs_path for resolving data paths and load_labels for reading TSV label files. +""" +import csv +import os + + +def get_abs_path(rel_path: str) -> str: + """ + Resolve a path relative to this module to an absolute path. + + Args: + rel_path: path relative to the PT text normalization data directory. + + Returns: + Absolute path string. + """ + return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path + + +def load_labels(abs_path: str): + """ + Load a TSV file as a list of rows (list of lists). + + Args: + abs_path: absolute path to a UTF-8 TSV file. + + Returns: + List of rows, each row a list of fields (e.g. from csv.reader). + """ + with open(abs_path, encoding="utf-8") as label_tsv: + labels = list(csv.reader(label_tsv, delimiter="\t")) + return labels diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/__init__.py b/nemo_text_processing/text_normalization/pt/verbalizers/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/cardinal.py b/nemo_text_processing/text_normalization/pt/verbalizers/cardinal.py new file mode 100644 index 000000000..ef102a2a9 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/cardinal.py @@ -0,0 +1,68 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_NOT_QUOTE, + GraphFst, + insert_space, + shift_cardinal_gender_pt, +) + + +class CardinalFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese cardinal numbers, e.g. + cardinal { integer: "dois" } -> dois + cardinal { integer: "dois" } -> duas (feminine context via shift_cardinal_gender_pt) + cardinal { negative: "true" integer: "cinco" } -> menos cinco + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="cardinal", kind="verbalize", deterministic=deterministic) + optional_sign = pynini.closure(pynini.cross("negative: \"true\" ", "menos") + insert_space, 0, 1) + self.optional_sign = optional_sign + + integer = pynini.closure(NEMO_NOT_QUOTE, 1) + self.integer = pynutil.delete(" \"") + integer + pynutil.delete("\"") + + integer = pynutil.delete("integer:") + self.integer + + # Generate masculine form (default) + graph_masc = optional_sign + integer + + # Generate feminine form using Portuguese gender conversion + graph_fem = shift_cardinal_gender_pt(graph_masc) + + self.graph_masc = pynini.optimize(graph_masc) + self.graph_fem = pynini.optimize(graph_fem) + + # Default to masculine for standalone numbers + # Context-aware gender selection will be handled by higher-level components + graph = graph_masc + + if not deterministic: + # For alternate renderings and contractions + # Portuguese doesn't have apocope like Spanish, but may have contractions + pass + + delete_tokens = self.delete_tokens(graph) + self.fst = delete_tokens.optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/date.py b/nemo_text_processing/text_normalization/pt/verbalizers/date.py new file mode 100644 index 000000000..d24a37e0e --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/date.py @@ -0,0 +1,52 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_NOT_QUOTE, + GraphFst, + delete_preserve_order, + delete_space, + insert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class DateFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese (Brazilian) dates, e.g. + date { day: "quinze" month: "março" year: "dois mil e vinte e quatro" preserve_order: true } + -> quinze de março de dois mil e vinte e quatro + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="date", kind="verbalize", deterministic=deterministic) + + vrows = load_labels(get_abs_path("data/date/verbal_phrases.tsv")) + vp = {r[0].strip(): r[1].strip() for r in vrows if len(r) >= 2 and r[0].strip()} + prep_word = vp.get("preposition", "de") + + quoted = pynini.closure(NEMO_NOT_QUOTE, 1) + + day_expr = pynutil.delete('day: "') + quoted + pynutil.delete('"') + month_expr = pynutil.delete('month: "') + quoted + pynutil.delete('"') + year_expr = pynutil.delete('year: "') + quoted + pynutil.delete('"') + + ws = delete_space + insert_space + glue = ws + pynutil.insert(prep_word) + insert_space + ws + + graph_dmy = day_expr + glue + month_expr + glue + year_expr + delete_preserve_order + self.fst = self.delete_tokens(graph_dmy).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/decimal.py b/nemo_text_processing/text_normalization/pt/verbalizers/decimal.py new file mode 100644 index 000000000..be2bbc80e --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/decimal.py @@ -0,0 +1,67 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space, insert_space +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class DecimalFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese decimal numbers, e.g. + decimal { integer_part: "um" fractional_part: "vinte e seis" } -> um vírgula vinte e seis + decimal { negative: "true" integer_part: "um" ... } -> menos um vírgula ... + decimal { integer_part: "um" quantity: "milhão" } -> um milhão + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="decimal", kind="verbalize", deterministic=deterministic) + labels = load_labels(get_abs_path("data/numbers/decimal_specials.tsv")) + spec = {r[0]: r[1] for r in labels if len(r) >= 2} + sep = spec.get("separator", "vírgula") + minus = spec.get("minus", "menos") + + optional_sign = pynini.closure(pynini.cross('negative: "true" ', minus) + insert_space, 0, 1) + + integer = pynutil.delete('integer_part: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + fractional = pynutil.delete('fractional_part: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + quantity = ( + delete_space + + insert_space + + pynutil.delete('quantity: "') + + pynini.closure(NEMO_NOT_QUOTE, 1) + + pynutil.delete('"') + ) + + integer_quantity = integer + quantity + decimal_part = ( + integer + + delete_space + + insert_space + + pynutil.insert(sep) + + insert_space + + fractional + + pynini.closure(quantity, 0, 1) + ) + + graph = optional_sign + pynini.union(integer_quantity, decimal_part) + + self.numbers = graph.optimize() + self.fst = self.delete_tokens(graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/electronic.py b/nemo_text_processing/text_normalization/pt/verbalizers/electronic.py new file mode 100644 index 000000000..cbe3317b5 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/electronic.py @@ -0,0 +1,79 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_NOT_QUOTE, + NEMO_SIGMA, + NEMO_SPACE, + GraphFst, + delete_preserve_order, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path + +digit_no_zero = pynini.string_file(get_abs_path("data/numbers/digit.tsv")) +zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv")) +graph_symbols = pynini.string_file(get_abs_path("data/electronic/symbols.tsv")) +server_common = pynini.string_file(get_abs_path("data/electronic/server_name.tsv")) +domain_common = pynini.string_file(get_abs_path("data/electronic/domain.tsv")) +spoken_unit = pynini.string_file(get_abs_path("data/electronic/electronic_spoken_unit.tsv")) + + +class ElectronicFst(GraphFst): + """ + Finite state transducer for verbalizing electronic. + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="electronic", kind="verbalize", deterministic=deterministic) + + graph_digit = digit_no_zero | zero + + def add_space_after_char(): + return pynini.closure(NEMO_NOT_QUOTE - pynini.accep(NEMO_SPACE) + pynutil.insert(NEMO_SPACE)) + ( + NEMO_NOT_QUOTE - pynini.accep(NEMO_SPACE) + ) + + verbalize_characters = pynini.cdrewrite(graph_symbols | graph_digit, "", "", NEMO_SIGMA) + + # Prefer whole tokens (server names, TLDs, company/common words) over letter-by-letter. + user_segment = pynutil.add_weight(NEMO_NOT_QUOTE, weight=0.0001) | server_common | spoken_unit + user_name = ( + pynutil.delete('username: "') + + (user_segment + pynini.closure(pynutil.insert(NEMO_SPACE) + user_segment)) + + pynutil.delete('"') + ) + user_name @= verbalize_characters + + convert_defaults = ( + pynutil.add_weight(NEMO_NOT_QUOTE, weight=0.0001) | domain_common | server_common | spoken_unit + ) + domain = convert_defaults + pynini.closure(pynutil.insert(NEMO_SPACE) + convert_defaults) + domain @= verbalize_characters + domain = pynutil.delete('domain: "') + domain + pynutil.delete('"') + + protocol = ( + pynutil.delete('protocol: "') + + add_space_after_char() @ pynini.cdrewrite(graph_symbols, "", "", NEMO_SIGMA) + + pynutil.delete('"') + ) + + self.graph = (pynini.closure(protocol + NEMO_SPACE, 0, 1) + domain) | ( + user_name + NEMO_SPACE + pynutil.insert("arroba" + NEMO_SPACE) + domain + | (pynutil.insert("arroba" + NEMO_SPACE) + user_name) + ) + + self.fst = self.delete_tokens(self.graph + delete_preserve_order).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/fraction.py b/nemo_text_processing/text_normalization/pt/verbalizers/fraction.py new file mode 100644 index 000000000..fc5b286db --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/fraction.py @@ -0,0 +1,109 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_NOT_QUOTE, GraphFst, insert_space +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class FractionFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese fraction numbers, e.g. + fraction { numerator: "um" denominator: "meio" morphosyntactic_features: "ordinal" } -> um meio + fraction { integer_part: "dois" numerator: "três" denominator: "quarto" } -> dois e três quartos + fraction { numerator: "dois" denominator: "onze" morphosyntactic_features: "avos" } -> dois onze avos + + Denominator routing (set by the tagger, unchanged here): + + * **Ordinal** (``morphosyntactic_features: "ordinal"``): denominators 2–10, 100, 1000 — spoken as + ordinals with plural ``s`` on the denominator when the numerator is not ``um`` (``três quartos``). + * **Avos** (``… "avos"``): all other positive integer denominators — cardinal denominator + the word + ``avos`` (``três onze avos``). Optional ``sobre`` between numerator and denominator is controlled by + ``data/fractions/specials.tsv`` key ``avos_between`` (empty = single space; ``sobre`` = ``… sobre …``). + * **Mixed** numbers use ``connector`` from the same TSV (default `` e ``) after the integer part. + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="fraction", kind="verbalize", deterministic=deterministic) + labels = load_labels(get_abs_path("data/fractions/specials.tsv")) + spec = {r[0]: r[1] for r in labels if len(r) >= 2} + connector_raw = spec.get("connector", " e ").strip() + connector = insert_space + pynutil.insert(connector_raw) + insert_space + minus = spec.get("minus", "menos ").rstrip() + plural_suffix = spec.get("plural_suffix", "s") + avos_word = spec.get("avos_suffix", " avos").strip() + avos_between = spec.get("avos_between", "").strip() + numerator_one_val = spec.get("numerator_one", "um") + denominator_half_val = spec.get("denominator_half", "meio") + + optional_sign = pynini.closure(pynini.cross('negative: "true" ', minus) + insert_space, 0, 1) + + integer = pynutil.delete('integer_part: "') + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete('" ') + + numerator_one = pynutil.delete('numerator: "') + pynini.accep(numerator_one_val) + pynutil.delete('" ') + numerator_rest = ( + pynutil.delete('numerator: "') + + pynini.difference(pynini.closure(NEMO_NOT_QUOTE), pynini.accep(numerator_one_val)) + + pynutil.delete('" ') + ) + + denom_ordinal = ( + pynutil.delete('denominator: "') + + pynini.closure(NEMO_NOT_QUOTE) + + pynutil.delete('" morphosyntactic_features: "ordinal"') + ) + denom_meio = ( + pynutil.delete('denominator: "') + + pynini.accep(denominator_half_val) + + pynutil.delete('" morphosyntactic_features: "ordinal"') + ) + denom_avos = ( + pynutil.delete('denominator: "') + + pynini.closure(NEMO_NOT_QUOTE) + + pynutil.delete('" morphosyntactic_features: "avos"') + ) + + fraction_ordinal_singular = numerator_one + insert_space + denom_ordinal + fraction_ordinal_plural = numerator_rest + insert_space + denom_ordinal + pynutil.insert(plural_suffix) + fraction_ordinal = pynini.union(fraction_ordinal_singular, fraction_ordinal_plural) + + if avos_between: + avos_mid = insert_space + pynutil.insert(avos_between) + insert_space + else: + avos_mid = insert_space + + fraction_avos = ( + pynini.union(numerator_one, numerator_rest) + + avos_mid + + denom_avos + + insert_space + + pynutil.insert(avos_word) + ) + + fraction = pynini.union(fraction_ordinal, fraction_avos) + mixed_um_meio = integer + connector + pynutil.delete('numerator: "' + numerator_one_val + '" " ') + denom_meio + optional_integer = pynini.closure(integer + connector + insert_space, 0, 1) + graph = optional_sign + pynini.union( + pynutil.add_weight(mixed_um_meio, -0.01), + optional_integer + fraction, + ) + + self.inner_graph = graph.optimize() + self.fst = self.delete_tokens(graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/measure.py b/nemo_text_processing/text_normalization/pt/verbalizers/measure.py new file mode 100644 index 000000000..bb94ef783 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/measure.py @@ -0,0 +1,98 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_NOT_QUOTE, + NEMO_WHITE_SPACE, + GraphFst, + delete_preserve_order, + delete_space, +) + + +class MeasureFst(GraphFst): + """ + Finite state transducer for verbalizing measure (pt-BR), e.g. + measure { cardinal { integer: "duzentos" } units: "gramas" } -> duzentos gramas + measure { cardinal { integer: "um" } units: "hora" } -> uma hora + + Args: + decimal: DecimalFst verbalizer + cardinal: CardinalFst verbalizer + fraction: FractionFst verbalizer + deterministic: if True will provide a single transduction option, + for False multiple transduction are generated (used for audio-based normalization) + """ + + def __init__(self, decimal: GraphFst, cardinal: GraphFst, fraction: GraphFst, deterministic: bool = True): + super().__init__(name="measure", kind="verbalize", deterministic=deterministic) + + hours_unit = ( + pynutil.delete('units: "') + + pynini.union(pynini.accep("hora"), pynini.accep("horas")) + + pynutil.delete('"') + ) + non_hours_unit = ( + pynutil.delete('units: "') + + pynini.difference(pynini.closure(NEMO_NOT_QUOTE, 1), pynini.union("hora", "horas")) + + pynutil.delete('"') + ) + + graph_cardinal = ( + pynutil.delete("cardinal {") + + delete_space + + cardinal.graph_fem + + delete_space + + pynutil.delete("}") + + NEMO_WHITE_SPACE + + hours_unit + ) + graph_cardinal |= ( + pynutil.delete("cardinal {") + + delete_space + + cardinal.graph_masc + + delete_space + + pynutil.delete("}") + + NEMO_WHITE_SPACE + + non_hours_unit + ) + + graph_decimal = ( + pynutil.delete("decimal {") + + delete_space + + decimal.numbers + + delete_space + + pynutil.delete("}") + + NEMO_WHITE_SPACE + + (hours_unit | non_hours_unit) + ) + + graph_fraction = ( + pynutil.delete("fraction {") + + delete_space + + fraction.inner_graph + + delete_space + + pynutil.delete("}") + + NEMO_WHITE_SPACE + + (hours_unit | non_hours_unit) + ) + + graph = graph_cardinal | graph_decimal | graph_fraction + graph += delete_preserve_order + + delete_tokens = self.delete_tokens(graph) + self.fst = delete_tokens.optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/money.py b/nemo_text_processing/text_normalization/pt/verbalizers/money.py new file mode 100644 index 000000000..4b247953c --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/money.py @@ -0,0 +1,96 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the "License". +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_NOT_QUOTE, + NEMO_SIGMA, + NEMO_SPACE, + GraphFst, + delete_preserve_order, + insert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class MoneyFst(GraphFst): + """ + Finite state transducer for verbalizing money (pt-BR), e.g. + money { currency_maj: "reais" integer_part: "doze" } -> doze reais + money { ... fractional_part: "cinco" currency_min: "centavos" ... } -> doze reais e cinco centavos + + Args: + decimal: DecimalFst verbalizer (for decimal amounts embedded in money) + deterministic: if True will provide a single transduction option, + for False multiple transduction are generated (used for audio-based normalization) + """ + + def __init__(self, decimal: GraphFst, deterministic: bool = True): + super().__init__(name="money", kind="verbalize", deterministic=deterministic) + + scales_data = load_labels(get_abs_path("data/numbers/scales.tsv")) + currency_plural_data = load_labels(get_abs_path("data/money/currency_major_plural.tsv")) + + scale_words = [] + for row in scales_data[1:]: + if len(row) < 2: + continue + one_label = row[0].strip() + plural = row[1].strip() + if not one_label or not plural: + continue + scale_words.extend((one_label.split()[-1], plural)) + + curr_words = [row[1].strip() for row in currency_plural_data if len(row) >= 2 and row[1].strip()] + + scales = pynini.union(*[pynini.accep(w) + NEMO_SPACE for w in scale_words]).optimize() + currencies = pynini.union(*curr_words).optimize() + + maj = pynutil.delete('currency_maj: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + min_unit = pynutil.delete('currency_min: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + + fractional_part = ( + pynutil.delete('fractional_part: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + ) + integer_part = pynutil.delete('integer_part: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + + connector_minor = pynutil.insert("e") + insert_space + if not deterministic: + connector_minor |= pynutil.insert("com") + insert_space + + graph_integer = integer_part + NEMO_SPACE + maj + + graph_integer_with_minor = ( + integer_part + + NEMO_SPACE + + maj + + NEMO_SPACE + + connector_minor + + fractional_part + + NEMO_SPACE + + min_unit + + delete_preserve_order + ) + + graph_decimal = decimal.numbers + NEMO_SPACE + maj + + graph_minor = fractional_part + NEMO_SPACE + min_unit + delete_preserve_order + + graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor + graph @= pynini.cdrewrite(pynutil.insert("de") + insert_space, scales, currencies, NEMO_SIGMA) + + delete_tokens = self.delete_tokens(graph) + self.fst = delete_tokens.optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/ordinal.py b/nemo_text_processing/text_normalization/pt/verbalizers/ordinal.py new file mode 100644 index 000000000..9be8876fd --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/ordinal.py @@ -0,0 +1,47 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst +from nemo_text_processing.text_normalization.pt.utils import get_abs_path + + +class OrdinalFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese ordinals, e.g. + ordinal { integer: "primeiro" morphosyntactic_features: "gender_masc" } -> primeiro + ordinal { integer: "primeira" morphosyntactic_features: "gender_fem" } -> primeira (feminine rewrite applied) + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="ordinal", kind="verbalize", deterministic=deterministic) + integer = pynutil.delete('integer: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + + fem_rewrite = pynini.string_file(get_abs_path("data/ordinals/feminine.tsv")) + feminine_rewrite = pynini.cdrewrite( + fem_rewrite, + "", + pynini.union(NEMO_SPACE, pynini.accep("[EOS]")), + NEMO_SIGMA, + ) + + graph_masc = integer + pynutil.delete(' morphosyntactic_features: "gender_masc"') + graph_fem = (integer @ feminine_rewrite) + pynutil.delete(' morphosyntactic_features: "gender_fem"') + self.fst = self.delete_tokens(pynini.union(graph_masc, graph_fem)).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/telephone.py b/nemo_text_processing/text_normalization/pt/verbalizers/telephone.py new file mode 100644 index 000000000..eae320638 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/telephone.py @@ -0,0 +1,57 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space, insert_space + + +class TelephoneFst(GraphFst): + """ + Finite state transducer for verbalizing telephone. + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="telephone", kind="verbalize", deterministic=deterministic) + + optional_country_code = pynini.closure( + pynutil.delete('country_code: "') + + pynini.closure(NEMO_NOT_QUOTE, 1) + + pynutil.delete('"') + + delete_space + + insert_space, + 0, + 1, + ) + + number_part = ( + pynutil.delete('number_part: "') + + pynini.closure(NEMO_NOT_QUOTE, 1) + + pynini.closure(pynutil.add_weight(pynutil.delete(" "), -0.0001), 0, 1) + + pynutil.delete('"') + ) + + optional_extension = pynini.closure( + delete_space + + insert_space + + pynutil.delete('extension: "') + + pynini.closure(NEMO_NOT_QUOTE, 1) + + pynutil.delete('"'), + 0, + 1, + ) + + graph = optional_country_code + number_part + optional_extension + self.fst = self.delete_tokens(graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/time.py b/nemo_text_processing/text_normalization/pt/verbalizers/time.py new file mode 100644 index 000000000..dfebdab3e --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/time.py @@ -0,0 +1,125 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_NOT_QUOTE, + GraphFst, + delete_preserve_order, + delete_space, + insert_space, +) + + +class TimeFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese time, e.g. + time { hours: "catorze" minutes: "trinta" preserve_order: true } -> catorze horas e trinta + time { hours: "um" minutes: "trinta" preserve_order: true } -> uma hora e trinta + time { hours: "dois" minutes: "quinze" preserve_order: true } -> duas horas e quinze + time { hours: "onze" suffix: "da manhã" preserve_order: true } -> onze horas da manhã + time { hours: "vinte e um" minutes: "dezoito" suffix: "da tarde" preserve_order: true } + -> vinte e uma horas e dezoito da tarde + + Args: + deterministic: if True will provide a single transduction option, + for False multiple transduction are generated (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="time", kind="verbalize", deterministic=deterministic) + + quoted = pynini.closure(NEMO_NOT_QUOTE, 1) + + minutes_val = pynutil.delete('minutes: "') + quoted + pynutil.delete('"') + seconds_val = pynutil.delete('seconds: "') + quoted + pynutil.delete('"') + suffix_val = pynutil.delete('suffix: "') + quoted + pynutil.delete('"') + + gap = delete_space + insert_space + suffix_out = pynini.closure(gap + suffix_val, 0, 1) + + hours_default = pynutil.delete('hours: "') + quoted + pynutil.delete('"') + gap + pynutil.insert("horas") + # Match whitespace after the closing quote (same as hours_default's gap) so the path composes + # with minutes/suffix fields; otherwise only the generic "… horas" branch accepts the token. + hours_um = ( + pynutil.delete('hours: "') + + pynutil.delete("um") + + pynutil.delete('"') + + delete_space + + pynutil.insert("uma hora") + ) + hours_dois = ( + pynutil.delete('hours: "') + + pynutil.delete("dois") + + pynutil.delete('"') + + delete_space + + pynutil.insert("duas horas") + ) + hours_vinte_um = ( + pynutil.delete('hours: "') + + pynutil.delete("vinte e um") + + pynutil.delete('"') + + delete_space + + pynutil.insert("vinte e uma horas") + ) + hours_vinte_dois = ( + pynutil.delete('hours: "') + + pynutil.delete("vinte e dois") + + pynutil.delete('"') + + delete_space + + pynutil.insert("vinte e duas horas") + ) + # Prefer feminine hour phrases over the generic ``… horas`` path (tie-break by weight). + hour_phrase = ( + pynutil.add_weight(hours_um, -0.01) + | pynutil.add_weight(hours_dois, -0.01) + | pynutil.add_weight(hours_vinte_um, -0.01) + | pynutil.add_weight(hours_vinte_dois, -0.01) + | hours_default + ).optimize() + + graph_hms = ( + hour_phrase + + insert_space + + pynutil.insert("e") + + insert_space + + minutes_val + + gap + + pynutil.insert("minutos") + + insert_space + + pynutil.insert("e") + + insert_space + + seconds_val + + gap + + pynutil.insert("segundos") + + suffix_out + + delete_preserve_order + ) + + with_minutes = ( + hour_phrase + + insert_space + + pynutil.insert("e") + + insert_space + + minutes_val + + suffix_out + + delete_preserve_order + ) + + hours_only = hour_phrase + suffix_out + delete_preserve_order + + graph = pynini.union(graph_hms, with_minutes, hours_only).optimize() + self.fst = self.delete_tokens(graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/verbalize.py b/nemo_text_processing/text_normalization/pt/verbalizers/verbalize.py new file mode 100644 index 000000000..9a6910d6a --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/verbalize.py @@ -0,0 +1,66 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from nemo_text_processing.text_normalization.pt.graph_utils import GraphFst +from nemo_text_processing.text_normalization.pt.verbalizers.cardinal import CardinalFst +from nemo_text_processing.text_normalization.pt.verbalizers.date import DateFst +from nemo_text_processing.text_normalization.pt.verbalizers.decimal import DecimalFst +from nemo_text_processing.text_normalization.pt.verbalizers.electronic import ElectronicFst +from nemo_text_processing.text_normalization.pt.verbalizers.fraction import FractionFst +from nemo_text_processing.text_normalization.pt.verbalizers.measure import MeasureFst +from nemo_text_processing.text_normalization.pt.verbalizers.money import MoneyFst +from nemo_text_processing.text_normalization.pt.verbalizers.ordinal import OrdinalFst +from nemo_text_processing.text_normalization.pt.verbalizers.telephone import TelephoneFst +from nemo_text_processing.text_normalization.pt.verbalizers.time import TimeFst + + +class VerbalizeFst(GraphFst): + """ + Composes Portuguese verbalizer grammars (cardinal, ordinal, fraction, decimal). + For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File. + More details to deployment at NeMo/tools/text_processing_deployment. + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic) + + cardinal = CardinalFst(deterministic=deterministic) + ordinal = OrdinalFst(deterministic=deterministic) + fraction = FractionFst(deterministic=deterministic) + decimal = DecimalFst(deterministic=deterministic) + measure = MeasureFst(decimal=decimal, cardinal=cardinal, fraction=fraction, deterministic=deterministic) + money = MoneyFst(decimal=decimal, deterministic=deterministic) + date = DateFst(deterministic=deterministic) + time = TimeFst(deterministic=deterministic) + telephone = TelephoneFst(deterministic=deterministic) + electronic = ElectronicFst(deterministic=deterministic) + graph = ( + fraction.fst + | decimal.fst + | date.fst + | time.fst + | measure.fst + | money.fst + | ordinal.fst + | cardinal.fst + | telephone.fst + | electronic.fst + ) + + self.fst = graph diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/verbalize_final.py b/nemo_text_processing/text_normalization/pt/verbalizers/verbalize_final.py new file mode 100644 index 000000000..84ed9ac39 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/verbalize_final.py @@ -0,0 +1,71 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + GraphFst, + delete_extra_space, + delete_space, + generator_main, +) +from nemo_text_processing.text_normalization.pt.verbalizers.verbalize import VerbalizeFst +from nemo_text_processing.text_normalization.pt.verbalizers.word import WordFst +from nemo_text_processing.utils.logging import logger + + +class VerbalizeFinalFst(GraphFst): + """ + Finite state transducer that verbalizes an entire Portuguese sentence, e.g. + tokens { cardinal { integer: "dois" } } tokens { name: "e" } tokens { cardinal { integer: "três" } } -> dois e três + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache. + overwrite_cache: set to True to overwrite .far files + """ + + def __init__(self, deterministic: bool = True, cache_dir: str = None, overwrite_cache: bool = False): + super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic) + + far_file = None + if cache_dir is not None and cache_dir != "None": + os.makedirs(cache_dir, exist_ok=True) + far_file = os.path.join(cache_dir, f"pt_tn_{deterministic}_deterministic_verbalizer.far") + if not overwrite_cache and far_file and os.path.exists(far_file): + self.fst = pynini.Far(far_file, mode="r")["verbalize"] + logger.info(f'VerbalizeFinalFst graph was restored from {far_file}.') + else: + + verbalize = VerbalizeFst(deterministic=deterministic).fst + word = WordFst(deterministic=deterministic).fst + types = verbalize | word + graph = ( + pynutil.delete("tokens") + + delete_space + + pynutil.delete("{") + + delete_space + + types + + delete_space + + pynutil.delete("}") + ) + graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space + + self.fst = graph.optimize() + if far_file: + generator_main(far_file, {"verbalize": self.fst}) diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/word.py b/nemo_text_processing/text_normalization/pt/verbalizers/word.py new file mode 100644 index 000000000..2b53ad1ff --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/word.py @@ -0,0 +1,32 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space + + +class WordFst(GraphFst): + """ + Verbalizes tokens { name: "..." } for the pt-BR pipeline. + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="word", kind="verbalize", deterministic=deterministic) + chars = pynini.closure(NEMO_CHAR - " ", 1) + char = pynutil.delete("name:") + delete_space + pynutil.delete("\"") + chars + pynutil.delete("\"") + graph = char @ pynini.cdrewrite(pynini.cross("\u00a0", " "), "", "", NEMO_SIGMA) + + self.fst = graph.optimize() diff --git a/nemo_text_processing/text_normalization/run_evaluate.py b/nemo_text_processing/text_normalization/run_evaluate.py index 26a3fc7b6..3a1964bbd 100644 --- a/nemo_text_processing/text_normalization/run_evaluate.py +++ b/nemo_text_processing/text_normalization/run_evaluate.py @@ -35,7 +35,7 @@ def parse_args(): parser.add_argument( "--lang", help="language", - choices=['ar', 'de', 'en', 'es', 'fr', 'hu', 'it', 'ru', 'sv', 'zh', 'hy', 'hi', 'ko', 'vi'], + choices=['ar', 'de', 'en', 'es', 'fr', 'hu', 'it', 'ru', 'sv', 'zh', 'hy', 'hi', 'ko', 'vi', 'pt'], default="en", type=str, ) diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_cardinal.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_cardinal.txt new file mode 100644 index 000000000..be8057d71 --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_cardinal.txt @@ -0,0 +1,121 @@ +0~zero +1~um +2~dois +3~três +4~quatro +5~cinco +6~seis +7~sete +8~oito +9~nove +10~dez +11~onze +12~doze +13~treze +14~catorze +15~quinze +16~dezesseis +17~dezessete +18~dezoito +19~dezenove +20~vinte +21~vinte e um +22~vinte e dois +23~vinte e três +24~vinte e quatro +25~vinte e cinco +26~vinte e seis +27~vinte e sete +28~vinte e oito +29~vinte e nove +30~trinta +40~quarenta +50~cinquenta +60~sessenta +70~setenta +80~oitenta +90~noventa +100~cem +101~cento e um +102~cento e dois +110~cento e dez +120~cento e vinte +130~cento e trinta +200~duzentos +300~trezentos +400~quatrocentos +500~quinhentos +600~seiscentos +700~setecentos +800~oitocentos +900~novecentos +1000~mil +1 000~mil +1.000~mil +1010~mil e dez +1020~mil e vinte +1100~mil e cem +1110~mil cento e dez +1111~mil cento e onze +2000~dois mil +2002~dois mil e dois +2010~dois mil e dez +2020~dois mil e vinte +2100~dois mil e cem +2110~dois mil cento e dez +2111~dois mil cento e onze +10000~dez mil +10 000~dez mil +10.000~dez mil +100000~cem mil +100 000~cem mil +100.000~cem mil +1 000 000~um milhão +1.000.000~um milhão +2.000.000~dois milhões +1.000.000.000~um bilhão +1000000000~um bilhão +2.000.000.000~dois bilhões +2000000000~dois bilhões +3 000 000 000 000~três trilhões +3.000.000.000.000~três trilhões +1001~mil e um +1010~mil e dez +1100~mil e cem +1101~mil cento e um +1111~mil cento e onze +1999~mil novecentos e noventa e nove +100000~cem mil +100001~cem mil e um +101000~cento e um mil +101001~cento e um mil e um +110000~cento e dez mil +111000~cento e onze mil +111111~cento e onze mil cento e onze +1001000~um milhão e mil +1001001~um milhão mil e um +1010000~um milhão e dez mil +1010101~um milhão dez mil cento e um +1100000~um milhão e cem mil +1110000~um milhão cento e dez mil +1001010101~um bilhão um milhão dez mil cento e um +1010101010~um bilhão dez milhões cento e um mil e dez +1234567890~um bilhão duzentos e trinta e quatro milhões quinhentos e sessenta e sete mil oitocentos e noventa +987654321~novecentos e oitenta e sete milhões seiscentos e cinquenta e quatro mil trezentos e vinte e um +999999999~novecentos e noventa e nove milhões novecentos e noventa e nove mil novecentos e noventa e nove +2000000001~dois bilhões e um +3000001000~três bilhões e mil +4000100000~quatro bilhões e cem mil +5000000100~cinco bilhões e cem +6001000000~seis bilhões e um milhão +1000000000000~um trilhão +1000000000001~um trilhão e um +1230000000000~um trilhão duzentos e trinta bilhões +3004005006007~três trilhões quatro bilhões cinco milhões seis mil e sete +1000001~um milhão e um +1001100~um milhão mil e cem +1001110~um milhão mil cento e dez +47701~quarenta e sete mil setecentos e um +394506~trezentos e noventa e quatro mil quinhentos e seis +3022110709~três bilhões vinte e dois milhões cento e dez mil setecentos e nove +302210709~trezentos e dois milhões duzentos e dez mil setecentos e nove \ No newline at end of file diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_date.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_date.txt new file mode 100644 index 000000000..0338d420c --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_date.txt @@ -0,0 +1,26 @@ +15/03/2024~quinze de março de dois mil e vinte e quatro +01/01/2000~primeiro de janeiro de dois mil +31/12/1999~trinta e um de dezembro de mil novecentos e noventa e nove +15.03.2024~quinze de março de dois mil e vinte e quatro +15-03-2024~quinze de março de dois mil e vinte e quatro +1/5/2025~primeiro de maio de dois mil e vinte e cinco +07/08/2010~sete de agosto de dois mil e dez +9/6/2024~nove de junho de dois mil e vinte e quatro +2/3/2000~dois de março de dois mil +29/02/2024~vinte e nove de fevereiro de dois mil e vinte e quatro +25/12/2023~vinte e cinco de dezembro de dois mil e vinte e três +31/01/2024~trinta e um de janeiro de dois mil e vinte e quatro +06/09/2024~seis de setembro de dois mil e vinte e quatro +10/10/2010~dez de outubro de dois mil e dez +28.02.2023~vinte e oito de fevereiro de dois mil e vinte e três +01-06-1995~primeiro de junho de mil novecentos e noventa e cinco +15 de março de 2024~quinze de março de dois mil e vinte e quatro +1 de janeiro de 2000~primeiro de janeiro de dois mil +10 de Dezembro de 1999~dez de dezembro de mil novecentos e noventa e nove +2024-03-15~quinze de março de dois mil e vinte e quatro +2024-3-5~cinco de março de dois mil e vinte e quatro +2024-12-25~vinte e cinco de dezembro de dois mil e vinte e quatro +03/15/2024~quinze de março de dois mil e vinte e quatro +2024/03/15~quinze de março de dois mil e vinte e quatro +2024.03.15~quinze de março de dois mil e vinte e quatro +03/04/2024~três de abril de dois mil e vinte e quatro diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_decimal.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_decimal.txt new file mode 100644 index 000000000..0bea9be23 --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_decimal.txt @@ -0,0 +1,58 @@ +0,1~zero vírgula um +0,2~zero vírgula dois +0,5~zero vírgula cinco +0,9~zero vírgula nove +0,01~zero vírgula zero um +0,02~zero vírgula zero dois +0,05~zero vírgula zero cinco +0,10~zero vírgula um zero +0,11~zero vírgula um um +0,15~zero vírgula um cinco +0,20~zero vírgula dois zero +0,25~zero vírgula dois cinco +0,50~zero vírgula cinco zero +0,99~zero vírgula nove nove +1,1~um vírgula um +1,2~um vírgula dois +1,5~um vírgula cinco +1,10~um vírgula um zero +1,15~um vírgula um cinco +1,20~um vírgula dois zero +1,26~um vírgula dois seis +1,33~um vírgula três três +1,50~um vírgula cinco zero +3,141~três vírgula um quatro um +3,256~três vírgula dois cinco seis +3,999~três vírgula nove nove nove +3,1415~três vírgula um quatro um cinco +3,1001~três vírgula um zero zero um +3,014~três vírgula zero um quatro +3,0141~três vírgula zero um quatro um +3,1005~três vírgula um zero zero cinco +3,1050~três vírgula um zero cinco zero +-1,2~menos um vírgula dois +-1,26~menos um vírgula dois seis +-3,5~menos três vírgula cinco +-0,5~menos zero vírgula cinco +1,2 milhões~um vírgula dois milhões +1,5 milhões~um vírgula cinco milhões +1,25 milhões~um vírgula dois cinco milhões +2,5 bilhões~dois vírgula cinco bilhões +3,75 bilhões~três vírgula sete cinco bilhões +0,001~zero vírgula zero zero um +0,0001~zero vírgula zero zero zero um +1,001~um vírgula zero zero um +1,010~um vírgula zero um zero +1,100~um vírgula um zero zero +10,01~dez vírgula zero um +10,001~dez vírgula zero zero um +100,5~cem vírgula cinco +100,05~cem vírgula zero cinco +3,14~três vírgula um quatro +3,141~três vírgula um quatro um +3,1415~três vírgula um quatro um cinco +3,14159~três vírgula um quatro um cinco nove +1,1234567~um vírgula um dois três quatro cinco seis sete +3,1415926535~três vírgula um quatro um cinco nove dois seis cinco três cinco +12,27~doze vírgula dois sete +87,69~oitenta e sete vírgula seis nove diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_electronic.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_electronic.txt new file mode 100644 index 000000000..3aee0be70 --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_electronic.txt @@ -0,0 +1,13 @@ +test@gmail.com~t e s t arroba gmail ponto com +a.bc@gmail.com~a ponto b c arroba gmail ponto com +admin@company.com.br~a d m i n arroba c o m p a n y ponto com ponto br +www.google.com~w w w ponto google ponto com +https://www.nvidia.com~h t t p s dois pontos barra barra w w w ponto nvidia ponto com +http://site.com.br~h t t p dois pontos barra barra s i t e ponto com ponto br +nvidia.com~nvidia ponto com +@usuario~arroba usuario +mail@google.com~m a i l arroba google ponto com +support@microsoft.com~s u p p o r t arroba microsoft ponto com +https://www.amazon.com.br~h t t p s dois pontos barra barra w w w ponto amazon ponto com ponto br +u.s.e.r@facebook.com~u ponto s ponto e ponto r arroba facebook ponto com +api.netflix.com~a p i ponto netflix ponto com diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_fraction.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_fraction.txt new file mode 100644 index 000000000..e59642bac --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_fraction.txt @@ -0,0 +1,22 @@ +1/2~um meio +1/3~um terço +1/4~um quarto +2/3~dois terços +3/4~três quartos +1/5~um quinto +2/5~dois quintos +1/6~um sexto +5/6~cinco sextos +1/8~um oitavo +3/8~três oitavos +7/8~sete oitavos +1/10~um décimo +3/10~três décimos +3/11~três onze avos +5/13~cinco treze avos +1/100~um centésimo +1/1000~um milésimo +1 1/2~um e um meio +2 1/4~dois e um quarto +3 2/3~três e dois terços +47701/913~quarenta e sete mil setecentos e um novecentos e treze avos \ No newline at end of file diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_measure.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_measure.txt new file mode 100644 index 000000000..34a082108 --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_measure.txt @@ -0,0 +1,18 @@ +200 g~duzentos gramas +1 kg~um quilo~um quilograma +5 kg~cinco quilos~cinco quilogramas +200 m~duzentos metros +1 km~um quilômetro +5 km~cinco quilômetros +100 m~cem metros +1 l~um litro +2 l~dois litros +500 ml~quinhentos mililitros +1 m²~um metro quadrado +10 m²~dez metros quadrados +25°C~vinte e cinco graus celsius +-5°C~menos cinco graus celsius +1 h~uma hora +2 h~duas horas +30 min~trinta minutos +45 s~quarenta e cinco segundos diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_money.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_money.txt new file mode 100644 index 000000000..ea0680741 --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_money.txt @@ -0,0 +1,25 @@ +R$ 1~um real +R$ 12~doze reais +R$ 100~cem reais +R$ 200~duzentos reais +R$ 12,05~doze reais e cinco centavos +R$ 1,01~um real e um centavo +R$ 199,99~cento e noventa e nove reais e noventa e nove centavos +R$ 0,20~vinte centavos +R$ 0,25~vinte e cinco centavos +R$ 0,50~cinquenta centavos +$ 1~um dólar +$ 12~doze dólares +$ 12,05~doze dólares e cinco centavos +$ 29,50~vinte e nove dólares e cinquenta centavos +$ 75,63~setenta e cinco dólares e sessenta e três centavos~setenta e cinco dólares com sessenta e três centavos +€ 1~um euro +€ 12~doze euros +€ 12,05~doze euros e cinco centavos +R$ 1000~mil reais +R$ 1000000~um milhão de reais +$ 1000~mil dólares +$ 1000000~um milhão de dólares +R$ 181809~cento e oitenta e um mil oitocentos e nove reais +R$ 181819~cento e oitenta e um mil oitocentos e dezenove reais +R$ 1811605~um milhão oitocentos e onze mil seiscentos e cinco reais diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_ordinal.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_ordinal.txt new file mode 100644 index 000000000..f9a58a9ce --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_ordinal.txt @@ -0,0 +1,39 @@ +1º~primeiro +2º~segundo +3º~terceiro +4º~quarto +5º~quinto +6º~sexto +7º~sétimo +8º~oitavo +9º~nono +10º~décimo +11º~décimo primeiro +12º~décimo segundo +13º~décimo terceiro +20º~vigésimo +21º~vigésimo primeiro +22º~vigésimo segundo +23º~vigésimo terceiro +100º~centésimo +111º~centésimo décimo primeiro +134º~centésimo trigésimo quarto +1ª~primeira +2ª~segunda +3ª~terceira +4ª~quarta +5ª~quinta +6ª~sexta +7ª~sétima +8ª~oitava +9ª~nona +10ª~décima +11ª~décima primeira +12ª~décima segunda +13ª~décima terceira +20ª~vigésima +21ª~vigésima primeira +22ª~vigésima segunda +23ª~vigésima terceira +100ª~centésima +11ª casa~décima primeira casa \ No newline at end of file diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_telephone.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_telephone.txt new file mode 100644 index 000000000..a5b085469 --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_telephone.txt @@ -0,0 +1,7 @@ +(11) 99999-8888~um um nove nove nove nove nove oito oito oito oito +11 3333-4444~um um três três três três quatro quatro quatro quatro +555-1234~cinco cinco cinco um dois três quatro +99999-1234~nove nove nove nove nove um dois três quatro ++55 (11) 3333-4444~mais cinco cinco um um três três três três quatro quatro quatro quatro +192.168.1.1~um nove dois ponto um seis oito ponto um ponto um +(11) 3333-4444 ext. 12~um um três três três três quatro quatro quatro quatro extensão um dois \ No newline at end of file diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_time.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_time.txt new file mode 100644 index 000000000..b9ae642e7 --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_time.txt @@ -0,0 +1,30 @@ +14:30~catorze horas e trinta +14h30~catorze horas e trinta +14h~catorze horas +09:05~nove horas e cinco +00:15~zero horas e quinze +23:59~vinte e três horas e cinquenta e nove +12:00~doze horas +14.30~catorze horas e trinta +08:00~oito horas +00:00~zero horas +9:30~nove horas e trinta +14H30~catorze horas e trinta +6h05~seis horas e cinco +23:00~vinte e três horas +10:10~dez horas e dez +7h~sete horas +00h~zero horas +12h30~doze horas e trinta +14:30:05~catorze horas e trinta minutos e cinco segundos +09:00:31~nove horas e zero minutos e trinta e um segundos +14:30:00~catorze horas e trinta minutos e zero segundos +09:00:00~nove horas e zero minutos e zero segundos +11:00 da manhã~onze horas da manhã +3:30 da tarde~três horas e trinta da tarde +15h da tarde~três horas da tarde +16:00 da tarde~quatro horas da tarde +14:30:05 da tarde~duas horas e trinta minutos e cinco segundos da tarde +21:18:14 da manhã~vinte e uma horas e dezoito minutos e catorze segundos da manhã +23:18 da tarde~vinte e três horas e dezoito da tarde +22h09 da tarde~vinte e duas horas e nove da tarde diff --git a/tests/nemo_text_processing/pt/test_cardinal.py b/tests/nemo_text_processing/pt/test_cardinal.py index dafa3e358..901bd7008 100644 --- a/tests/nemo_text_processing/pt/test_cardinal.py +++ b/tests/nemo_text_processing/pt/test_cardinal.py @@ -16,6 +16,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -30,3 +31,12 @@ class TestCardinal: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_cardinal.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_date.py b/tests/nemo_text_processing/pt/test_date.py index 88ea91a28..56a987e8f 100644 --- a/tests/nemo_text_processing/pt/test_date.py +++ b/tests/nemo_text_processing/pt/test_date.py @@ -16,6 +16,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -29,3 +30,12 @@ class TestDate: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_date.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_decimal.py b/tests/nemo_text_processing/pt/test_decimal.py index afbec329b..b66485a9b 100644 --- a/tests/nemo_text_processing/pt/test_decimal.py +++ b/tests/nemo_text_processing/pt/test_decimal.py @@ -16,6 +16,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -29,3 +30,12 @@ class TestDecimal: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_decimal.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_electronic.py b/tests/nemo_text_processing/pt/test_electronic.py index bff47d1fe..9f462c5db 100644 --- a/tests/nemo_text_processing/pt/test_electronic.py +++ b/tests/nemo_text_processing/pt/test_electronic.py @@ -16,6 +16,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -29,3 +30,12 @@ class TestElectronic: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_electronic.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_fraction.py b/tests/nemo_text_processing/pt/test_fraction.py new file mode 100644 index 000000000..16e6c5f30 --- /dev/null +++ b/tests/nemo_text_processing/pt/test_fraction.py @@ -0,0 +1,32 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from parameterized import parameterized + +from nemo_text_processing.text_normalization.normalize import Normalizer +from ..utils import CACHE_DIR, parse_test_case_file + + +class TestFraction: + normalizer = Normalizer( + lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased', post_process=True + ) + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_fraction.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False, punct_post_process=False) + assert pred == expected, f"input: {test_input}" diff --git a/tests/nemo_text_processing/pt/test_measure.py b/tests/nemo_text_processing/pt/test_measure.py index 9dcfc8548..603c967f4 100644 --- a/tests/nemo_text_processing/pt/test_measure.py +++ b/tests/nemo_text_processing/pt/test_measure.py @@ -17,6 +17,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -30,3 +31,15 @@ class TestMeasure: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_measure.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + if isinstance(expected, list): + assert pred in expected + else: + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_money.py b/tests/nemo_text_processing/pt/test_money.py index 632bdb458..d440a93e8 100644 --- a/tests/nemo_text_processing/pt/test_money.py +++ b/tests/nemo_text_processing/pt/test_money.py @@ -17,6 +17,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -30,3 +31,15 @@ class TestMoney: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_money.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + if isinstance(expected, list): + assert pred in expected + else: + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_ordinal.py b/tests/nemo_text_processing/pt/test_ordinal.py index a830e2d21..c2e7dfb71 100644 --- a/tests/nemo_text_processing/pt/test_ordinal.py +++ b/tests/nemo_text_processing/pt/test_ordinal.py @@ -17,7 +17,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer - +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -30,3 +30,12 @@ class TestOrdinal: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_ordinal.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_sparrowhawk_normalization.sh b/tests/nemo_text_processing/pt/test_sparrowhawk_normalization.sh new file mode 100755 index 000000000..c9f39aaa7 --- /dev/null +++ b/tests/nemo_text_processing/pt/test_sparrowhawk_normalization.sh @@ -0,0 +1,91 @@ +#! /bin/sh + +GRAMMARS_DIR=${1:-"/workspace/sparrowhawk/documentation/grammars"} +PROJECT_DIR=${2:-"/workspace/tests"} + +runtest () { + input=$1 + echo "INPUT is $input" + cd ${GRAMMARS_DIR} + + # read test file + while read testcase; do + IFS='~' read -a testcase_tokenized <<< $testcase + written=${testcase_tokenized[0]} + # only tests against first possible option when there are multiple shortest paths + spoken=${testcase_tokenized[1]} + + # replace non breaking space with breaking space + denorm_pred=$(echo $written | normalizer_main --config=sparrowhawk_configuration.ascii_proto 2>&1 | tail -n 1 | sed 's/\xC2\xA0/ /g') + + # trim white space + spoken="$(echo -e "${spoken}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" + denorm_pred="$(echo -e "${denorm_pred}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')" + + # input expected actual + assertEquals "$written" "$spoken" "$denorm_pred" + done < "$input" +} + +testTNCardinal() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_cardinal.txt + runtest $input +} + +testTNDecimal() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_decimal.txt + runtest $input +} + +testTNOrdinal() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_ordinal.txt + runtest $input +} + +testTNFraction() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_fraction.txt + runtest $input +} + +testTNDate() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_date.txt + runtest $input +} + +testTNTime() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_time.txt + runtest $input +} + +testTNMeasure() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_measure.txt + runtest $input +} + +testTNMoney() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_money.txt + runtest $input +} + +# testTNWhitelist() { +# input=$PROJECT_DIR/pt/data_text_normalization/test_cases_whitelist.txt +# runtest $input +# } + +testTNTelephone() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_telephone.txt + runtest $input +} + +testTNElectronic() { + input=$PROJECT_DIR/pt/data_text_normalization/test_cases_electronic.txt + runtest $input +} + +# testTNWord() { +# input=$PROJECT_DIR/pt/data_text_normalization/test_cases_word.txt +# runtest $input +# } + +# Load shUnit2 +. $PROJECT_DIR/../shunit2/shunit2 diff --git a/tests/nemo_text_processing/pt/test_telephone.py b/tests/nemo_text_processing/pt/test_telephone.py index e27c47e1c..f69f0b5a3 100644 --- a/tests/nemo_text_processing/pt/test_telephone.py +++ b/tests/nemo_text_processing/pt/test_telephone.py @@ -17,6 +17,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -30,3 +31,12 @@ class TestTelephone: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_telephone.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_time.py b/tests/nemo_text_processing/pt/test_time.py index e43c61ac6..2a1a8f454 100644 --- a/tests/nemo_text_processing/pt/test_time.py +++ b/tests/nemo_text_processing/pt/test_time.py @@ -16,7 +16,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer - +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -29,3 +29,12 @@ class TestTime: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_time.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected diff --git a/tools/text_processing_deployment/pynini_export.py b/tools/text_processing_deployment/pynini_export.py index 3e80b56ff..d6dfebdde 100644 --- a/tools/text_processing_deployment/pynini_export.py +++ b/tools/text_processing_deployment/pynini_export.py @@ -139,7 +139,7 @@ def parse_args(): if __name__ == '__main__': args = parse_args() - if args.language in ['pt', 'ru', 'es_en', 'mr'] and args.grammars == 'tn_grammars': + if args.language in ['ru', 'es_en', 'mr'] and args.grammars == 'tn_grammars': raise ValueError('Only ITN grammars could be deployed in Sparrowhawk for the selected languages.') TNPostProcessingFst = None ITNPostProcessingFst = None @@ -194,6 +194,10 @@ def parse_args(): from nemo_text_processing.inverse_text_normalization.pt.verbalizers.verbalize import ( VerbalizeFst as ITNVerbalizeFst, ) + from nemo_text_processing.text_normalization.pt.taggers.tokenize_and_classify import ( + ClassifyFst as TNClassifyFst, + ) + from nemo_text_processing.text_normalization.pt.verbalizers.verbalize import VerbalizeFst as TNVerbalizeFst elif args.language == 'fr': from nemo_text_processing.inverse_text_normalization.fr.taggers.tokenize_and_classify import ( ClassifyFst as ITNClassifyFst, diff --git a/tools/text_processing_deployment/sh_test.sh b/tools/text_processing_deployment/sh_test.sh index 3e31de37c..3bd1a2faa 100644 --- a/tools/text_processing_deployment/sh_test.sh +++ b/tools/text_processing_deployment/sh_test.sh @@ -19,7 +19,7 @@ GRAMMARS="itn_grammars" # tn_grammars INPUT_CASE="lower_cased" # cased -LANGUAGE="en" # language, {'en', 'es', 'de','zh'} supports both TN and ITN, {'pt', 'ru', 'fr', 'vi'} supports ITN only +LANGUAGE="en" # language, {'en', 'es', 'de','zh', 'pt'} supports both TN and ITN, {'ru', 'fr', 'vi', 'pt'} supports ITN only OVERWRITE_CACHE="False" # Set to False to re-use .far files WHITELIST="" # Path to a whitelist file, if None the default will be used FAR_PATH=$(pwd) # Path where the grammars should be written