github.com/wdvxdr1123/go-silk@v0.0.0-20210316130616-d47b553def60/sdk/skype_silk_sdk_64.go (about)

     1  // Code generated by 'ccgo -export-fields F -pkgname silk -replace-fd-zero -compiledb -trace-translation-units "" -o silk_windows_amd64.go SKP_Silk_typedef.h SKP_Silk_A2NLSF.c SKP_Silk_ana_filt_bank_1.c SKP_Silk_apply_sine_window.c SKP_Silk_array_maxabs.c SKP_Silk_autocorr.c SKP_Silk_biquad.c SKP_Silk_biquad_alt.c SKP_Silk_burg_modified.c SKP_Silk_bwexpander.c SKP_Silk_bwexpander_32.c SKP_Silk_CNG.c SKP_Silk_code_signs.c SKP_Silk_common_pitch_est_defines.h SKP_Silk_control.h SKP_Silk_control_audio_bandwidth.c SKP_Silk_control_codec_FIX.c SKP_Silk_corrMatrix_FIX.c SKP_Silk_create_init_destroy.c SKP_Silk_decoder_set_fs.c SKP_Silk_decode_core.c SKP_Silk_decode_frame.c SKP_Silk_decode_parameters.c SKP_Silk_decode_pitch.c SKP_Silk_decode_pulses.c SKP_Silk_dec_API.c SKP_Silk_define.h SKP_Silk_detect_SWB_input.c SKP_Silk_encode_frame_FIX.c SKP_Silk_encode_parameters.c SKP_Silk_encode_pulses.c SKP_Silk_enc_API.c SKP_Silk_errors.h SKP_Silk_find_LPC_FIX.c SKP_Silk_find_LTP_FIX.c SKP_Silk_find_pitch_lags_FIX.c SKP_Silk_find_pred_coefs_FIX.c SKP_Silk_gain_quant.c SKP_Silk_HP_variable_cutoff_FIX.c SKP_Silk_init_encoder_FIX.c SKP_Silk_Inlines.h SKP_Silk_inner_prod_aligned.c SKP_Silk_interpolate.c SKP_Silk_k2a.c SKP_Silk_k2a_Q16.c SKP_Silk_LBRR_reset.c SKP_Silk_lin2log.c SKP_Silk_log2lin.c SKP_Silk_LPC_inv_pred_gain.c SKP_Silk_LPC_synthesis_filter.c SKP_Silk_LPC_synthesis_order16.c SKP_Silk_LP_variable_cutoff.c SKP_Silk_LSF_cos_table.c SKP_Silk_LTP_analysis_filter_FIX.c SKP_Silk_LTP_scale_ctrl_FIX.c SKP_Silk_MA.c SKP_Silk_macros.h SKP_Silk_main.h SKP_Silk_main_FIX.h SKP_Silk_NLSF2A.c SKP_Silk_NLSF2A_stable.c SKP_Silk_NLSF_MSVQ_decode.c SKP_Silk_NLSF_MSVQ_encode_FIX.c SKP_Silk_NLSF_stabilize.c SKP_Silk_NLSF_VQ_rate_distortion_FIX.c SKP_Silk_NLSF_VQ_sum_error_FIX.c SKP_Silk_NLSF_VQ_weights_laroia.c SKP_Silk_noise_shape_analysis_FIX.c SKP_Silk_NSQ.c SKP_Silk_NSQ_del_dec.c SKP_Silk_pitch_analysis_core.c SKP_Silk_pitch_est_defines.h SKP_Silk_pitch_est_tables.c SKP_Silk_PLC.c SKP_Silk_PLC.h SKP_Silk_prefilter_FIX.c SKP_Silk_process_gains_FIX.c SKP_Silk_process_NLSFs_FIX.c SKP_Silk_quant_LTP_gains_FIX.c SKP_Silk_range_coder.c SKP_Silk_regularize_correlations_FIX.c SKP_Silk_resampler.c SKP_Silk_resampler_down2.c SKP_Silk_resampler_down2_3.c SKP_Silk_resampler_down3.c SKP_Silk_resampler_private.h SKP_Silk_resampler_private_AR2.c SKP_Silk_resampler_private_ARMA4.c SKP_Silk_resampler_private_copy.c SKP_Silk_resampler_private_down4.c SKP_Silk_resampler_private_down_FIR.c SKP_Silk_resampler_private_IIR_FIR.c SKP_Silk_resampler_private_up2_HQ.c SKP_Silk_resampler_private_up4.c SKP_Silk_resampler_rom.c SKP_Silk_resampler_rom.h SKP_Silk_resampler_structs.h SKP_Silk_resampler_up2.c SKP_Silk_residual_energy16_FIX.c SKP_Silk_residual_energy_FIX.c SKP_Silk_scale_copy_vector16.c SKP_Silk_scale_vector.c SKP_Silk_schur.c SKP_Silk_schur64.c SKP_Silk_SDK_API.h SKP_Silk_setup_complexity.h SKP_Silk_shell_coder.c SKP_Silk_sigm_Q15.c SKP_Silk_SigProc_FIX.h SKP_Silk_solve_LS_FIX.c SKP_Silk_sort.c SKP_Silk_structs.h SKP_Silk_structs_FIX.h SKP_Silk_sum_sqr_shift.c SKP_Silk_tables.h SKP_Silk_tables_gain.c SKP_Silk_tables_LTP.c SKP_Silk_tables_NLSF_CB0_10.c SKP_Silk_tables_NLSF_CB0_10.h SKP_Silk_tables_NLSF_CB0_16.c SKP_Silk_tables_NLSF_CB0_16.h SKP_Silk_tables_NLSF_CB1_10.c SKP_Silk_tables_NLSF_CB1_10.h SKP_Silk_tables_NLSF_CB1_16.c SKP_Silk_tables_NLSF_CB1_16.h SKP_Silk_tables_other.c SKP_Silk_tables_pitch_lag.c SKP_Silk_tables_pulses_per_block.c SKP_Silk_tables_sign.c SKP_Silk_tables_type_offset.c SKP_Silk_tuning_parameters.h SKP_Silk_VAD.c SKP_Silk_VQ_nearest_neighbor_FIX.c SKP_Silk_warped_autocorrelation_FIX.c "" -DNDEBUG', DO NOT EDIT.
     2  //+build amd64 arm64
     3  
     4  package sdk
     5  
     6  import (
     7  	"math"
     8  	"reflect"
     9  	"sync/atomic"
    10  	"unsafe"
    11  
    12  	"modernc.org/libc"
    13  	"modernc.org/libc/sys/types"
    14  )
    15  
    16  var _ = math.Pi
    17  var _ reflect.Kind
    18  var _ atomic.Value
    19  var _ unsafe.Pointer
    20  var _ types.Size_t
    21  
    22  type ptrdiff_t = int64 /* <builtin>:3:26 */
    23  
    24  type size_t = uint64 /* <builtin>:9:23 */
    25  
    26  type wchar_t = uint16 /* <builtin>:15:24 */
    27  
    28  type va_list = uintptr /* <builtin>:50:27 */
    29  
    30  type ssize_t = int64 /* corecrt.h:50:35 */
    31  
    32  type rsize_t = size_t /* corecrt.h:57:16 */
    33  
    34  type intptr_t = int64 /* corecrt.h:67:35 */
    35  
    36  type uintptr_t = uint64 /* corecrt.h:80:44 */
    37  
    38  type wint_t = uint16   /* corecrt.h:111:24 */
    39  type wctype_t = uint16 /* corecrt.h:112:24 */
    40  
    41  type errno_t = int32 /* corecrt.h:118:13 */
    42  
    43  type time_t = int64 /* corecrt.h:143:20 */
    44  
    45  type threadlocaleinfostruct = struct {
    46  	Frefcount      int32
    47  	Flc_codepage   uint32
    48  	Flc_collate_cp uint32
    49  	Flc_handle     [6]uint32
    50  	Flc_id         [6]LC_ID
    51  	Flc_category   [6]struct {
    52  		Flocale    uintptr
    53  		Fwlocale   uintptr
    54  		Frefcount  uintptr
    55  		Fwrefcount uintptr
    56  	}
    57  	Flc_clike            int32
    58  	Fmb_cur_max          int32
    59  	Flconv_intl_refcount uintptr
    60  	Flconv_num_refcount  uintptr
    61  	Flconv_mon_refcount  uintptr
    62  	Flconv               uintptr
    63  	Fctype1_refcount     uintptr
    64  	Fctype1              uintptr
    65  	Fpctype              uintptr
    66  	Fpclmap              uintptr
    67  	Fpcumap              uintptr
    68  	Flc_time_curr        uintptr
    69  } /* corecrt.h:435:1 */
    70  
    71  type pthreadlocinfo = uintptr /* corecrt.h:437:39 */
    72  type pthreadmbcinfo = uintptr /* corecrt.h:438:36 */
    73  
    74  type localeinfo_struct = struct {
    75  	Flocinfo pthreadlocinfo
    76  	Fmbcinfo pthreadmbcinfo
    77  } /* corecrt.h:441:9 */
    78  
    79  type _locale_tstruct = localeinfo_struct /* corecrt.h:444:3 */
    80  type _locale_t = uintptr                 /* corecrt.h:444:19 */
    81  
    82  type tagLC_ID = struct {
    83  	FwLanguage uint16
    84  	FwCountry  uint16
    85  	FwCodePage uint16
    86  } /* corecrt.h:435:1 */
    87  
    88  type LC_ID = tagLC_ID  /* corecrt.h:452:3 */
    89  type LPLC_ID = uintptr /* corecrt.h:452:9 */
    90  
    91  type threadlocinfo = threadlocaleinfostruct /* corecrt.h:487:3 */
    92  
    93  // ISO C Standard:  7.17  Common definitions  <stddef.h>
    94  
    95  // Any one of these symbols __need_* means that GNU libc
    96  //    wants us just to define one data type.  So don't define
    97  //    the symbols that indicate this file's entire job has been done.
    98  
    99  // In 4.3bsd-net2, machine/ansi.h defines these symbols, which are
   100  //    defined if the corresponding type is *not* defined.
   101  //    FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_
   102  
   103  // Sequent's header files use _PTRDIFF_T_ in some conflicting way.
   104  //    Just ignore it.
   105  
   106  // On VxWorks, <type/vxTypesBase.h> may have defined macros like
   107  //    _TYPE_size_t which will typedef size_t.  fixincludes patched the
   108  //    vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is
   109  //    not defined, and so that defining this macro defines _GCC_SIZE_T.
   110  //    If we find that the macros are still defined at this point, we must
   111  //    invoke them so that the type is defined as expected.
   112  
   113  // In case nobody has defined these types, but we aren't running under
   114  //    GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and
   115  //    __WCHAR_TYPE__ have reasonable values.  This can happen if the
   116  //    parts of GCC is compiled by an older compiler, that actually
   117  //    include gstddef.h, such as collect2.
   118  
   119  // Signed type of difference of two pointers.
   120  
   121  // Define this type if we are doing the whole job,
   122  //    or if we want this type in particular.
   123  
   124  // Unsigned type of `sizeof' something.
   125  
   126  // Define this type if we are doing the whole job,
   127  //    or if we want this type in particular.
   128  
   129  // Wide character type.
   130  //    Locale-writers should change this as necessary to
   131  //    be big enough to hold unique values not between 0 and 127,
   132  //    and not (wchar_t) -1, for each defined multibyte character.
   133  
   134  // Define this type if we are doing the whole job,
   135  //    or if we want this type in particular.
   136  
   137  //  In 4.3bsd-net2, leave these undefined to indicate that size_t, etc.
   138  //     are already defined.
   139  //  BSD/OS 3.1 and FreeBSD [23].x require the MACHINE_ANSI_H check here.
   140  
   141  // A null pointer constant.
   142  
   143  // Copyright (C) 1989-2019 Free Software Foundation, Inc.
   144  //
   145  // This file is part of GCC.
   146  //
   147  // GCC is free software; you can redistribute it and/or modify
   148  // it under the terms of the GNU General Public License as published by
   149  // the Free Software Foundation; either version 3, or (at your option)
   150  // any later version.
   151  //
   152  // GCC is distributed in the hope that it will be useful,
   153  // but WITHOUT ANY WARRANTY; without even the implied warranty of
   154  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   155  // GNU General Public License for more details.
   156  //
   157  // Under Section 7 of GPL version 3, you are granted additional
   158  // permissions described in the GCC Runtime Library Exception, version
   159  // 3.1, as published by the Free Software Foundation.
   160  //
   161  // You should have received a copy of the GNU General Public License and
   162  // a copy of the GCC Runtime Library Exception along with this program;
   163  // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
   164  // <http://www.gnu.org/licenses/>.
   165  
   166  // ISO C Standard:  7.17  Common definitions  <stddef.h>
   167  
   168  // Any one of these symbols __need_* means that GNU libc
   169  //    wants us just to define one data type.  So don't define
   170  //    the symbols that indicate this file's entire job has been done.
   171  // snaroff@next.com says the NeXT needs this.
   172  
   173  // This avoids lossage on SunOS but only if stdtypes.h comes first.
   174  //    There's no way to win with the other order!  Sun lossage.
   175  
   176  // Sequent's header files use _PTRDIFF_T_ in some conflicting way.
   177  //    Just ignore it.
   178  
   179  // On VxWorks, <type/vxTypesBase.h> may have defined macros like
   180  //    _TYPE_size_t which will typedef size_t.  fixincludes patched the
   181  //    vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is
   182  //    not defined, and so that defining this macro defines _GCC_SIZE_T.
   183  //    If we find that the macros are still defined at this point, we must
   184  //    invoke them so that the type is defined as expected.
   185  
   186  // In case nobody has defined these types, but we aren't running under
   187  //    GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and
   188  //    __WCHAR_TYPE__ have reasonable values.  This can happen if the
   189  //    parts of GCC is compiled by an older compiler, that actually
   190  //    include gstddef.h, such as collect2.
   191  
   192  // Signed type of difference of two pointers.
   193  
   194  // Define this type if we are doing the whole job,
   195  //    or if we want this type in particular.
   196  
   197  // If this symbol has done its job, get rid of it.
   198  
   199  // Unsigned type of `sizeof' something.
   200  
   201  // Define this type if we are doing the whole job,
   202  //    or if we want this type in particular.
   203  
   204  // Wide character type.
   205  //    Locale-writers should change this as necessary to
   206  //    be big enough to hold unique values not between 0 and 127,
   207  //    and not (wchar_t) -1, for each defined multibyte character.
   208  
   209  // Define this type if we are doing the whole job,
   210  //    or if we want this type in particular.
   211  
   212  // A null pointer constant.
   213  
   214  // Offset of member MEMBER in a struct of type TYPE.
   215  
   216  // Type whose alignment is supported in every context and is at least
   217  //    as great as that of any standard type not using alignment
   218  //    specifiers.
   219  type max_align_t = struct {
   220  	F__max_align_ll int64
   221  	F__max_align_ld float64
   222  } /* stddef.h:427:3 */
   223  
   224  // 7.18.1.1  Exact-width integer types
   225  type int8_t = int8     /* stdint.h:35:21 */
   226  type uint8_t = uint8   /* stdint.h:36:25 */
   227  type int16_t = int16   /* stdint.h:37:16 */
   228  type uint16_t = uint16 /* stdint.h:38:25 */
   229  type int32_t = int32   /* stdint.h:39:14 */
   230  type uint32_t = uint32 /* stdint.h:40:20 */
   231  type int64_t = int64   /* stdint.h:41:38 */
   232  type uint64_t = uint64 /* stdint.h:42:48 */
   233  
   234  // 7.18.1.2  Minimum-width integer types
   235  type int_least8_t = int8     /* stdint.h:45:21 */
   236  type uint_least8_t = uint8   /* stdint.h:46:25 */
   237  type int_least16_t = int16   /* stdint.h:47:16 */
   238  type uint_least16_t = uint16 /* stdint.h:48:25 */
   239  type int_least32_t = int32   /* stdint.h:49:14 */
   240  type uint_least32_t = uint32 /* stdint.h:50:20 */
   241  type int_least64_t = int64   /* stdint.h:51:38 */
   242  type uint_least64_t = uint64 /* stdint.h:52:48 */
   243  
   244  // 7.18.1.3  Fastest minimum-width integer types
   245  //  Not actually guaranteed to be fastest for all purposes
   246  //  Here we use the exact-width types for 8 and 16-bit ints.
   247  type int_fast8_t = int8     /* stdint.h:58:21 */
   248  type uint_fast8_t = uint8   /* stdint.h:59:23 */
   249  type int_fast16_t = int16   /* stdint.h:60:16 */
   250  type uint_fast16_t = uint16 /* stdint.h:61:25 */
   251  type int_fast32_t = int32   /* stdint.h:62:14 */
   252  type uint_fast32_t = uint32 /* stdint.h:63:24 */
   253  type int_fast64_t = int64   /* stdint.h:64:38 */
   254  type uint_fast64_t = uint64 /* stdint.h:65:48 */
   255  
   256  // 7.18.1.5  Greatest-width integer types
   257  type intmax_t = int64   /* stdint.h:68:38 */
   258  type uintmax_t = uint64 /* stdint.h:69:48 */
   259  
   260  // 7.18.2  Limits of specified-width integer types
   261  
   262  // 7.18.2.1  Limits of exact-width integer types
   263  
   264  // 7.18.2.2  Limits of minimum-width integer types
   265  
   266  // 7.18.2.3  Limits of fastest minimum-width integer types
   267  
   268  // 7.18.2.4  Limits of integer types capable of holding
   269  //     object pointers
   270  
   271  // 7.18.2.5  Limits of greatest-width integer types
   272  
   273  // 7.18.3  Limits of other integer types
   274  
   275  // wint_t is unsigned short for compatibility with MS runtime
   276  
   277  // 7.18.4  Macros for integer constants
   278  
   279  // 7.18.4.1  Macros for minimum-width integer constants
   280  //
   281  //     Accoding to Douglas Gwyn <gwyn@arl.mil>:
   282  // 	"This spec was changed in ISO/IEC 9899:1999 TC1; in ISO/IEC
   283  // 	9899:1999 as initially published, the expansion was required
   284  // 	to be an integer constant of precisely matching type, which
   285  // 	is impossible to accomplish for the shorter types on most
   286  // 	platforms, because C99 provides no standard way to designate
   287  // 	an integer constant with width less than that of type int.
   288  // 	TC1 changed this to require just an integer constant
   289  // 	*expression* with *promoted* type."
   290  //
   291  // 	The trick used here is from Clive D W Feather.
   292  
   293  //  The 'trick' doesn't work in C89 for long long because, without
   294  //     suffix, (val) will be evaluated as int, not intmax_t
   295  
   296  // 7.18.4.2  Macros for greatest-width integer constants
   297  
   298  /* assertions */
   299  
   300  // Copyright (C) 1992-2019 Free Software Foundation, Inc.
   301  //
   302  // This file is part of GCC.
   303  //
   304  // GCC is free software; you can redistribute it and/or modify it under
   305  // the terms of the GNU General Public License as published by the Free
   306  // Software Foundation; either version 3, or (at your option) any later
   307  // version.
   308  //
   309  // GCC is distributed in the hope that it will be useful, but WITHOUT ANY
   310  // WARRANTY; without even the implied warranty of MERCHANTABILITY or
   311  // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   312  // for more details.
   313  //
   314  // Under Section 7 of GPL version 3, you are granted additional
   315  // permissions described in the GCC Runtime Library Exception, version
   316  // 3.1, as published by the Free Software Foundation.
   317  //
   318  // You should have received a copy of the GNU General Public License and
   319  // a copy of the GCC Runtime Library Exception along with this program;
   320  // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
   321  // <http://www.gnu.org/licenses/>.
   322  
   323  // This administrivia gets added to the beginning of limits.h
   324  //    if the system has its own version of limits.h.
   325  
   326  // We use _GCC_LIMITS_H_ because we want this not to match
   327  //    any macros that the system's limits.h uses for its own purposes.
   328  
   329  // Use "..." so that we find syslimits.h only in this same directory.
   330  // syslimits.h stands for the system's own limits.h file.
   331  //    If we can use it ok unmodified, then we install this text.
   332  //    If fixincludes fixes it, then the fixed version is installed
   333  //    instead of this text.
   334  
   335  // *
   336  // This file has no copyright assigned and is placed in the Public Domain.
   337  // This file is part of the mingw-w64 runtime package.
   338  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   339  // *
   340  // This file has no copyright assigned and is placed in the Public Domain.
   341  // This file is part of the mingw-w64 runtime package.
   342  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   343  
   344  // File system limits
   345  //
   346  // NOTE: Apparently the actual size of PATH_MAX is 260, but a space is
   347  //       required for the NUL. TODO: Test?
   348  // NOTE: PATH_MAX is the POSIX equivalent for Microsoft's MAX_PATH; the two
   349  //       are semantically identical, with a limit of 259 characters for the
   350  //       path name, plus one for a terminating NUL, for a total of 260.
   351  
   352  // Copyright (C) 1991-2019 Free Software Foundation, Inc.
   353  //
   354  // This file is part of GCC.
   355  //
   356  // GCC is free software; you can redistribute it and/or modify it under
   357  // the terms of the GNU General Public License as published by the Free
   358  // Software Foundation; either version 3, or (at your option) any later
   359  // version.
   360  //
   361  // GCC is distributed in the hope that it will be useful, but WITHOUT ANY
   362  // WARRANTY; without even the implied warranty of MERCHANTABILITY or
   363  // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   364  // for more details.
   365  //
   366  // Under Section 7 of GPL version 3, you are granted additional
   367  // permissions described in the GCC Runtime Library Exception, version
   368  // 3.1, as published by the Free Software Foundation.
   369  //
   370  // You should have received a copy of the GNU General Public License and
   371  // a copy of the GCC Runtime Library Exception along with this program;
   372  // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
   373  // <http://www.gnu.org/licenses/>.
   374  
   375  // Number of bits in a `char'.
   376  
   377  // Maximum length of a multibyte character.
   378  
   379  // Minimum and maximum values a `signed char' can hold.
   380  
   381  // Maximum value an `unsigned char' can hold.  (Minimum is 0).
   382  
   383  // Minimum and maximum values a `char' can hold.
   384  
   385  // Minimum and maximum values a `signed short int' can hold.
   386  
   387  // Maximum value an `unsigned short int' can hold.  (Minimum is 0).
   388  
   389  // Minimum and maximum values a `signed int' can hold.
   390  
   391  // Maximum value an `unsigned int' can hold.  (Minimum is 0).
   392  
   393  // Minimum and maximum values a `signed long int' can hold.
   394  //    (Same as `int').
   395  
   396  // Maximum value an `unsigned long int' can hold.  (Minimum is 0).
   397  
   398  // Minimum and maximum values a `signed long long int' can hold.
   399  
   400  // Maximum value an `unsigned long long int' can hold.  (Minimum is 0).
   401  
   402  // Minimum and maximum values a `signed long long int' can hold.
   403  
   404  // Maximum value an `unsigned long long int' can hold.  (Minimum is 0).
   405  
   406  // This administrivia gets added to the end of limits.h
   407  //    if the system has its own version of limits.h.
   408  
   409  type _onexit_t = uintptr /* stdlib.h:50:15 */
   410  
   411  type _div_t = struct {
   412  	Fquot int32
   413  	Frem  int32
   414  } /* stdlib.h:60:11 */
   415  
   416  type div_t = _div_t /* stdlib.h:63:5 */
   417  
   418  type _ldiv_t = struct {
   419  	Fquot int32
   420  	Frem  int32
   421  } /* stdlib.h:65:11 */
   422  
   423  type ldiv_t = _ldiv_t /* stdlib.h:68:5 */
   424  
   425  type _LDOUBLE = struct{ Fld [10]uint8 } /* stdlib.h:77:5 */
   426  
   427  type _CRT_DOUBLE = struct{ Fx float64 } /* stdlib.h:84:5 */
   428  
   429  type _CRT_FLOAT = struct{ Ff float32 } /* stdlib.h:88:5 */
   430  
   431  type _LONGDOUBLE = struct{ Fx float64 } /* stdlib.h:95:5 */
   432  
   433  type _LDBL12 = struct{ Fld12 [12]uint8 } /* stdlib.h:102:5 */
   434  
   435  type _purecall_handler = uintptr /* stdlib.h:143:16 */
   436  
   437  type _invalid_parameter_handler = uintptr /* stdlib.h:148:16 */
   438  
   439  type lldiv_t = struct {
   440  	Fquot int64
   441  	Frem  int64
   442  } /* stdlib.h:727:61 */
   443  
   444  // *
   445  // This file has no copyright assigned and is placed in the Public Domain.
   446  // This file is part of the mingw-w64 runtime package.
   447  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   448  
   449  // *
   450  // This file has no copyright assigned and is placed in the Public Domain.
   451  // This file is part of the mingw-w64 runtime package.
   452  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   453  
   454  // Return codes for _heapwalk()
   455  
   456  // Values for _heapinfo.useflag
   457  
   458  // The structure used to walk through the heap with _heapwalk.
   459  type _heapinfo = struct {
   460  	F_pentry  uintptr
   461  	F_size    size_t
   462  	F_useflag int32
   463  	_         [4]byte
   464  } /* malloc.h:46:11 */
   465  
   466  // *
   467  // This file has no copyright assigned and is placed in the Public Domain.
   468  // This file is part of the mingw-w64 runtime package.
   469  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   470  
   471  // *
   472  // This file has no copyright assigned and is placed in the Public Domain.
   473  // This file is part of the mingw-w64 runtime package.
   474  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   475  
   476  // Return codes for _heapwalk()
   477  
   478  // Values for _heapinfo.useflag
   479  
   480  // The structure used to walk through the heap with _heapwalk.
   481  type _HEAPINFO = _heapinfo /* malloc.h:50:5 */
   482  
   483  /***********************************************************************
   484  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   485  Redistribution and use in source and binary forms, with or without
   486  modification, (subject to the limitations in the disclaimer below)
   487  are permitted provided that the following conditions are met:
   488  - Redistributions of source code must retain the above copyright notice,
   489  this list of conditions and the following disclaimer.
   490  - Redistributions in binary form must reproduce the above copyright
   491  notice, this list of conditions and the following disclaimer in the
   492  documentation and/or other materials provided with the distribution.
   493  - Neither the name of Skype Limited, nor the names of specific
   494  contributors, may be used to endorse or promote products derived from
   495  this software without specific prior written permission.
   496  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   497  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   498  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   499  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   500  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   501  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   502  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   503  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   504  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   505  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   506  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   507  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   508  ***********************************************************************/
   509  
   510  /*																		*
   511   * File Name:	SKP_Silk_resampler_structs.h							*
   512   *																		*
   513   * Description: Structs for IIR/FIR resamplers							*
   514   *                                                                      *
   515   * Copyright 2010 (c), Skype Limited                                    *
   516   * All rights reserved.													*
   517   *																		*
   518   *                                                                      */
   519  
   520  /***********************************************************************
   521  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   522  Redistribution and use in source and binary forms, with or without
   523  modification, (subject to the limitations in the disclaimer below)
   524  are permitted provided that the following conditions are met:
   525  - Redistributions of source code must retain the above copyright notice,
   526  this list of conditions and the following disclaimer.
   527  - Redistributions in binary form must reproduce the above copyright
   528  notice, this list of conditions and the following disclaimer in the
   529  documentation and/or other materials provided with the distribution.
   530  - Neither the name of Skype Limited, nor the names of specific
   531  contributors, may be used to endorse or promote products derived from
   532  this software without specific prior written permission.
   533  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   534  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   535  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   536  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   537  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   538  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   539  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   540  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   541  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   542  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   543  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   544  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   545  ***********************************************************************/
   546  
   547  /* Flag to enable support for input/output sampling rates above 48 kHz. Turn off for embedded devices */
   548  
   549  type _SKP_Silk_resampler_state_struct = struct {
   550  	FsIIR               [6]int32
   551  	FsFIR               [16]int32
   552  	FsDown2             [2]int32
   553  	Fresampler_function uintptr
   554  	Fup2_function       uintptr
   555  	FbatchSize          int32
   556  	FinvRatio_Q16       int32
   557  	FFIR_Fracs          int32
   558  	Finput2x            int32
   559  	FCoefs              uintptr
   560  	FsDownPre           [2]int32
   561  	FsUpPost            [2]int32
   562  	Fdown_pre_function  uintptr
   563  	Fup_post_function   uintptr
   564  	FbatchSizePrePost   int32
   565  	Fratio_Q16          int32
   566  	FnPreDownsamplers   int32
   567  	FnPostUpsamplers    int32
   568  	Fmagic_number       int32
   569  	_                   [4]byte
   570  } /* SKP_Silk_resampler_structs.h:53:9 */
   571  
   572  /***********************************************************************
   573  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   574  Redistribution and use in source and binary forms, with or without
   575  modification, (subject to the limitations in the disclaimer below)
   576  are permitted provided that the following conditions are met:
   577  - Redistributions of source code must retain the above copyright notice,
   578  this list of conditions and the following disclaimer.
   579  - Redistributions in binary form must reproduce the above copyright
   580  notice, this list of conditions and the following disclaimer in the
   581  documentation and/or other materials provided with the distribution.
   582  - Neither the name of Skype Limited, nor the names of specific
   583  contributors, may be used to endorse or promote products derived from
   584  this software without specific prior written permission.
   585  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   586  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   587  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   588  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   589  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   590  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   591  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   592  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   593  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   594  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   595  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   596  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   597  ***********************************************************************/
   598  
   599  /*																		*
   600   * File Name:	SKP_Silk_resampler_structs.h							*
   601   *																		*
   602   * Description: Structs for IIR/FIR resamplers							*
   603   *                                                                      *
   604   * Copyright 2010 (c), Skype Limited                                    *
   605   * All rights reserved.													*
   606   *																		*
   607   *                                                                      */
   608  
   609  /***********************************************************************
   610  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   611  Redistribution and use in source and binary forms, with or without
   612  modification, (subject to the limitations in the disclaimer below)
   613  are permitted provided that the following conditions are met:
   614  - Redistributions of source code must retain the above copyright notice,
   615  this list of conditions and the following disclaimer.
   616  - Redistributions in binary form must reproduce the above copyright
   617  notice, this list of conditions and the following disclaimer in the
   618  documentation and/or other materials provided with the distribution.
   619  - Neither the name of Skype Limited, nor the names of specific
   620  contributors, may be used to endorse or promote products derived from
   621  this software without specific prior written permission.
   622  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   623  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   624  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   625  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   626  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   627  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   628  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   629  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   630  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   631  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   632  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   633  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   634  ***********************************************************************/
   635  
   636  /* Flag to enable support for input/output sampling rates above 48 kHz. Turn off for embedded devices */
   637  
   638  type SKP_Silk_resampler_state_struct = _SKP_Silk_resampler_state_struct /* SKP_Silk_resampler_structs.h:75:3 */
   639  
   640  /***********************************************************************
   641  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   642  Redistribution and use in source and binary forms, with or without
   643  modification, (subject to the limitations in the disclaimer below)
   644  are permitted provided that the following conditions are met:
   645  - Redistributions of source code must retain the above copyright notice,
   646  this list of conditions and the following disclaimer.
   647  - Redistributions in binary form must reproduce the above copyright
   648  notice, this list of conditions and the following disclaimer in the
   649  documentation and/or other materials provided with the distribution.
   650  - Neither the name of Skype Limited, nor the names of specific
   651  contributors, may be used to endorse or promote products derived from
   652  this software without specific prior written permission.
   653  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   654  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   655  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   656  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   657  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   658  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   659  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   660  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   661  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   662  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   663  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   664  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   665  ***********************************************************************/
   666  
   667  /***********************************************************************
   668  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   669  Redistribution and use in source and binary forms, with or without
   670  modification, (subject to the limitations in the disclaimer below)
   671  are permitted provided that the following conditions are met:
   672  - Redistributions of source code must retain the above copyright notice,
   673  this list of conditions and the following disclaimer.
   674  - Redistributions in binary form must reproduce the above copyright
   675  notice, this list of conditions and the following disclaimer in the
   676  documentation and/or other materials provided with the distribution.
   677  - Neither the name of Skype Limited, nor the names of specific
   678  contributors, may be used to endorse or promote products derived from
   679  this software without specific prior written permission.
   680  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   681  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   682  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   683  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   684  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   685  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   686  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   687  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   688  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   689  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   690  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   691  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   692  ***********************************************************************/
   693  
   694  // This is an inline header file for general platform.
   695  
   696  // (a32 * (SKP_int32)((SKP_int16)(b32))) >> 16 output have to be 32bit int
   697  
   698  // a32 + (b32 * (SKP_int32)((SKP_int16)(c32))) >> 16 output have to be 32bit int
   699  
   700  // (a32 * (b32 >> 16)) >> 16
   701  
   702  // a32 + (b32 * (c32 >> 16)) >> 16
   703  
   704  // (SKP_int32)((SKP_int16)(a3))) * (SKP_int32)((SKP_int16)(b32)) output have to be 32bit int
   705  
   706  // a32 + (SKP_int32)((SKP_int16)(b32)) * (SKP_int32)((SKP_int16)(c32)) output have to be 32bit int
   707  
   708  // (SKP_int32)((SKP_int16)(a32)) * (b32 >> 16)
   709  
   710  // a32 + (SKP_int32)((SKP_int16)(b32)) * (c32 >> 16)
   711  
   712  // a64 + (b32 * c32)
   713  
   714  // (a32 * b32) >> 16
   715  
   716  // a32 + ((b32 * c32) >> 16)
   717  
   718  // (SKP_int32)(((SKP_int64)a32 * b32) >> 32)
   719  
   720  /* add/subtract with output saturated */
   721  
   722  func SKP_Silk_CLZ16(tls *libc.TLS, in16 int16) int32 { /* SKP_Silk_macros.h:79:22: */
   723  	var out32 int32 = 0
   724  	if int32(in16) == 0 {
   725  		return 16
   726  	}
   727  	/* test nibbles */
   728  	if (int32(in16) & 0xFF00) != 0 {
   729  		if (int32(in16) & 0xF000) != 0 {
   730  			in16 >>= 12
   731  		} else {
   732  			out32 = out32 + (4)
   733  			in16 >>= 8
   734  		}
   735  	} else {
   736  		if (int32(in16) & 0xFFF0) != 0 {
   737  			out32 = out32 + (8)
   738  			in16 >>= 4
   739  		} else {
   740  			out32 = out32 + (12)
   741  		}
   742  	}
   743  	/* test bits and return */
   744  	if (int32(in16) & 0xC) != 0 {
   745  		if (int32(in16) & 0x8) != 0 {
   746  			return (out32 + 0)
   747  		} else {
   748  			return (out32 + 1)
   749  		}
   750  	} else {
   751  		if (int32(in16) & 0xE) != 0 {
   752  			return (out32 + 2)
   753  		} else {
   754  			return (out32 + 3)
   755  		}
   756  	}
   757  	return int32(0)
   758  }
   759  
   760  func SKP_Silk_CLZ32(tls *libc.TLS, in32 int32) int32 { /* SKP_Silk_macros.h:115:22: */
   761  	/* test highest 16 bits and convert to SKP_int16 */
   762  	if (uint32(in32) & 0xFFFF0000) != 0 {
   763  		return SKP_Silk_CLZ16(tls, (int16(in32 >> 16)))
   764  	} else {
   765  		return (SKP_Silk_CLZ16(tls, int16(in32)) + 16)
   766  	}
   767  	return int32(0)
   768  }
   769  
   770  /********************************************************************/
   771  /*                                MACROS                            */
   772  /********************************************************************/
   773  
   774  /* Rotate a32 right by 'rot' bits. Negative rot values result in rotating
   775     left. Output is 32bit int.
   776     Note: contemporary compilers recognize the C expressions below and
   777     compile them into 'ror' instructions if available. No need for inline ASM! */
   778  /* PPC must use this generic implementation. */
   779  func SKP_ROR32(tls *libc.TLS, a32 int32, rot int32) int32 { /* SKP_Silk_SigProc_FIX.h:456:22: */
   780  	var x uint32 = uint32(a32)
   781  	var r uint32 = uint32(rot)
   782  	var m uint32 = uint32(-rot)
   783  	if rot <= 0 {
   784  		return (int32((x << m) | (x >> (uint32(32) - m))))
   785  	} else {
   786  		return (int32((x << (uint32(32) - r)) | (x >> r)))
   787  	}
   788  	return int32(0)
   789  }
   790  
   791  /* Allocate SKP_int16 alligned to 4-byte memory address */
   792  
   793  /* Useful Macros that can be adjusted to other platforms */
   794  /* fixed point macros */
   795  
   796  // (a32 * b32) output have to be 32bit int
   797  
   798  // (a32 * b32) output have to be 32bit uint
   799  
   800  // a32 + (b32 * c32) output have to be 32bit int
   801  
   802  /* ((a32 >> 16)  * (b32 >> 16)) output have to be 32bit int */
   803  
   804  /* a32 + ((a32 >> 16)  * (b32 >> 16)) output have to be 32bit int */
   805  
   806  // (a32 * b32)
   807  
   808  /* Adds two signed 32-bit values in a way that can overflow, while not relying on undefined behaviour
   809     (just standard two's complement implementation-specific behaviour) */
   810  /* Subtractss two signed 32-bit values in a way that can overflow, while not relying on undefined behaviour
   811     (just standard two's complement implementation-specific behaviour) */
   812  
   813  /* Multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode) */
   814  
   815  /* Add with saturation for positive input values */
   816  
   817  /* saturates before shifting */
   818  
   819  /* Requires that shift > 0 */
   820  
   821  /* Number of rightshift required to fit the multiplication */
   822  
   823  /* Macro to convert floating-point constants to fixed-point */
   824  func SKP_FIX_CONST(tls *libc.TLS, C float64, Q int32) int32 { /* SKP_Silk_SigProc_FIX.h:568:5: */
   825  	return (int32(((C) * (float64(int64(int64(1)) << (Q)))) + 0.5))
   826  }
   827  
   828  /* SKP_min() versions with typecast in the function call */
   829  func SKP_min_int(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_SigProc_FIX.h:573:20: */
   830  	return func() int32 {
   831  		if (a) < (b) {
   832  			return a
   833  		}
   834  		return b
   835  	}()
   836  }
   837  
   838  func SKP_min_32(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_SigProc_FIX.h:578:22: */
   839  	return func() int32 {
   840  		if (a) < (b) {
   841  			return a
   842  		}
   843  		return b
   844  	}()
   845  }
   846  
   847  /* SKP_min() versions with typecast in the function call */
   848  func SKP_max_int(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_SigProc_FIX.h:584:20: */
   849  	return func() int32 {
   850  		if (a) > (b) {
   851  			return a
   852  		}
   853  		return b
   854  	}()
   855  }
   856  
   857  func SKP_max_16(tls *libc.TLS, a int16, b int16) int16 { /* SKP_Silk_SigProc_FIX.h:588:22: */
   858  	return func() int16 {
   859  		if (int32(a)) > (int32(b)) {
   860  			return a
   861  		}
   862  		return b
   863  	}()
   864  }
   865  
   866  func SKP_max_32(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_SigProc_FIX.h:592:22: */
   867  	return func() int32 {
   868  		if (a) > (b) {
   869  			return a
   870  		}
   871  		return b
   872  	}()
   873  }
   874  
   875  // Static assertion.  Requires support in the compiler.
   876  
   877  /***********************************************************************
   878  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   879  Redistribution and use in source and binary forms, with or without
   880  modification, (subject to the limitations in the disclaimer below)
   881  are permitted provided that the following conditions are met:
   882  - Redistributions of source code must retain the above copyright notice,
   883  this list of conditions and the following disclaimer.
   884  - Redistributions in binary form must reproduce the above copyright
   885  notice, this list of conditions and the following disclaimer in the
   886  documentation and/or other materials provided with the distribution.
   887  - Neither the name of Skype Limited, nor the names of specific
   888  contributors, may be used to endorse or promote products derived from
   889  this software without specific prior written permission.
   890  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   891  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   892  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   893  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   894  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   895  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   896  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   897  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   898  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   899  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   900  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   901  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   902  ***********************************************************************/
   903  
   904  /* count leading zeros of SKP_int64 */
   905  func SKP_Silk_CLZ64(tls *libc.TLS, in int64_t) int32 { /* SKP_Silk_Inlines.h:44:22: */
   906  	var in_upper int32
   907  
   908  	in_upper = (int32((in) >> (32)))
   909  	if in_upper == 0 {
   910  		/* Search in the lower 32 bits */
   911  		return (32 + SKP_Silk_CLZ32(tls, int32(in)))
   912  	} else {
   913  		/* Search in the upper 32 bits */
   914  		return SKP_Silk_CLZ32(tls, in_upper)
   915  	}
   916  	return int32(0)
   917  }
   918  
   919  /* get number of leading zeros and fractional part (the bits right after the leading one */
   920  func SKP_Silk_CLZ_FRAC(tls *libc.TLS, in int32, lz uintptr, frac_Q7 uintptr) { /* SKP_Silk_Inlines.h:59:17: */
   921  	var lzeros int32 = SKP_Silk_CLZ32(tls, in)
   922  
   923  	*(*int32)(unsafe.Pointer(lz)) = lzeros
   924  	*(*int32)(unsafe.Pointer(frac_Q7)) = (SKP_ROR32(tls, in, (24-lzeros)) & 0x7f)
   925  }
   926  
   927  /* Approximation of square root                                          */
   928  /* Accuracy: < +/- 10%  for output values > 15                           */
   929  /*           < +/- 2.5% for output values > 120                          */
   930  func SKP_Silk_SQRT_APPROX(tls *libc.TLS, x int32) int32 { /* SKP_Silk_Inlines.h:72:22: */
   931  	bp := tls.Alloc(8)
   932  	defer tls.Free(8)
   933  
   934  	var y int32
   935  	// var lz int32 at bp, 4
   936  
   937  	// var frac_Q7 int32 at bp+4, 4
   938  
   939  	if x <= 0 {
   940  		return 0
   941  	}
   942  
   943  	SKP_Silk_CLZ_FRAC(tls, x, bp /* &lz */, bp+4 /* &frac_Q7 */)
   944  
   945  	if (*(*int32)(unsafe.Pointer(bp /* lz */)) & 1) != 0 {
   946  		y = 32768
   947  	} else {
   948  		y = 46214 /* 46214 = sqrt(2) * 32768 */
   949  	}
   950  
   951  	/* get scaling right */
   952  	y >>= ((*(*int32)(unsafe.Pointer(bp /* lz */))) >> (1))
   953  
   954  	/* increment using fractional part of input */
   955  	y = ((y) + ((((y) >> 16) * (int32((int16((int32(int16(213))) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */)))))))))) + ((((y) & 0x0000FFFF) * (int32((int16((int32(int16(213))) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */)))))))))) >> 16)))
   956  
   957  	return y
   958  }
   959  
   960  /* Divide two int32 values and return result as int32 in a given Q-domain */
   961  func SKP_DIV32_varQ(tls *libc.TLS, a32 int32, b32 int32, Qres int32) int32 { /* SKP_Silk_Inlines.h:125:22: */
   962  	var a_headrm int32
   963  	var b_headrm int32
   964  	var lshift int32
   965  	var b32_inv int32
   966  	var a32_nrm int32
   967  	var b32_nrm int32
   968  	var result int32
   969  
   970  	/* Compute number of bits head room and normalize inputs */
   971  	a_headrm = (SKP_Silk_CLZ32(tls, func() int32 {
   972  		if (a32) > 0 {
   973  			return a32
   974  		}
   975  		return -a32
   976  	}()) - 1)
   977  	a32_nrm = ((a32) << (a_headrm)) /* Q: a_headrm                    */
   978  	b_headrm = (SKP_Silk_CLZ32(tls, func() int32 {
   979  		if (b32) > 0 {
   980  			return b32
   981  		}
   982  		return -b32
   983  	}()) - 1)
   984  	b32_nrm = ((b32) << (b_headrm)) /* Q: b_headrm                    */
   985  
   986  	/* Inverse of b32, with 14 bits of precision */
   987  	b32_inv = ((int32(0x7FFFFFFF) >> 2) / ((b32_nrm) >> (16))) /* Q: 29 + 16 - b_headrm        */
   988  
   989  	/* First approximation */
   990  	result = ((((a32_nrm) >> 16) * (int32(int16(b32_inv)))) + ((((a32_nrm) & 0x0000FFFF) * (int32(int16(b32_inv)))) >> 16)) /* Q: 29 + a_headrm - b_headrm    */
   991  
   992  	/* Compute residual by subtracting product of denominator and first approximation */
   993  	a32_nrm = a32_nrm - ((int32(((int64_t(b32_nrm)) * (int64_t(result))) >> (32))) << (3)) /* Q: a_headrm                    */
   994  
   995  	/* Refinement */
   996  	result = ((result) + ((((a32_nrm) >> 16) * (int32(int16(b32_inv)))) + ((((a32_nrm) & 0x0000FFFF) * (int32(int16(b32_inv)))) >> 16))) /* Q: 29 + a_headrm - b_headrm    */
   997  
   998  	/* Convert to Qres domain */
   999  	lshift = (((29 + a_headrm) - b_headrm) - Qres)
  1000  	if lshift <= 0 {
  1001  		return ((func() int32 {
  1002  			if (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) > (int32((0x7FFFFFFF)) >> (-lshift)) {
  1003  				return func() int32 {
  1004  					if (result) > (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) {
  1005  						return (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift))
  1006  					}
  1007  					return func() int32 {
  1008  						if (result) < (int32((0x7FFFFFFF)) >> (-lshift)) {
  1009  							return (int32((0x7FFFFFFF)) >> (-lshift))
  1010  						}
  1011  						return result
  1012  					}()
  1013  				}()
  1014  			}
  1015  			return func() int32 {
  1016  				if (result) > (int32((0x7FFFFFFF)) >> (-lshift)) {
  1017  					return (int32((0x7FFFFFFF)) >> (-lshift))
  1018  				}
  1019  				return func() int32 {
  1020  					if (result) < (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) {
  1021  						return (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift))
  1022  					}
  1023  					return result
  1024  				}()
  1025  			}()
  1026  		}()) << (-lshift))
  1027  	} else {
  1028  		if lshift < 32 {
  1029  			return ((result) >> (lshift))
  1030  		} else {
  1031  			/* Avoid undefined result */
  1032  			return 0
  1033  		}
  1034  	}
  1035  	return int32(0)
  1036  }
  1037  
  1038  /* Invert int32 value and return result as int32 in a given Q-domain */
  1039  func SKP_INVERSE32_varQ(tls *libc.TLS, b32 int32, Qres int32) int32 { /* SKP_Silk_Inlines.h:170:22: */
  1040  	var b_headrm int32
  1041  	var lshift int32
  1042  	var b32_inv int32
  1043  	var b32_nrm int32
  1044  	var err_Q32 int32
  1045  	var result int32
  1046  
  1047  	/* SKP_int32_MIN is not handled by SKP_abs */
  1048  
  1049  	/* Compute number of bits head room and normalize input */
  1050  	b_headrm = (SKP_Silk_CLZ32(tls, func() int32 {
  1051  		if (b32) > 0 {
  1052  			return b32
  1053  		}
  1054  		return -b32
  1055  	}()) - 1)
  1056  	b32_nrm = ((b32) << (b_headrm)) /* Q: b_headrm                */
  1057  
  1058  	/* Inverse of b32, with 14 bits of precision */
  1059  	b32_inv = ((int32(0x7FFFFFFF) >> 2) / ((b32_nrm) >> (16))) /* Q: 29 + 16 - b_headrm    */
  1060  
  1061  	/* First approximation */
  1062  	result = ((b32_inv) << (16)) /* Q: 61 - b_headrm            */
  1063  
  1064  	/* Compute residual by subtracting product of denominator and first approximation from one */
  1065  	err_Q32 = ((-((((b32_nrm) >> 16) * (int32(int16(b32_inv)))) + ((((b32_nrm) & 0x0000FFFF) * (int32(int16(b32_inv)))) >> 16))) << (3)) /* Q32                        */
  1066  
  1067  	/* Refinement */
  1068  	result = (((result) + ((((err_Q32) >> 16) * (int32(int16(b32_inv)))) + ((((err_Q32) & 0x0000FFFF) * (int32(int16(b32_inv)))) >> 16))) + ((err_Q32) * (func() int32 {
  1069  		if (16) == 1 {
  1070  			return (((b32_inv) >> 1) + ((b32_inv) & 1))
  1071  		}
  1072  		return ((((b32_inv) >> ((16) - 1)) + 1) >> 1)
  1073  	}()))) /* Q: 61 - b_headrm            */
  1074  
  1075  	/* Convert to Qres domain */
  1076  	lshift = ((61 - b_headrm) - Qres)
  1077  	if lshift <= 0 {
  1078  		return ((func() int32 {
  1079  			if (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) > (int32((0x7FFFFFFF)) >> (-lshift)) {
  1080  				return func() int32 {
  1081  					if (result) > (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) {
  1082  						return (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift))
  1083  					}
  1084  					return func() int32 {
  1085  						if (result) < (int32((0x7FFFFFFF)) >> (-lshift)) {
  1086  							return (int32((0x7FFFFFFF)) >> (-lshift))
  1087  						}
  1088  						return result
  1089  					}()
  1090  				}()
  1091  			}
  1092  			return func() int32 {
  1093  				if (result) > (int32((0x7FFFFFFF)) >> (-lshift)) {
  1094  					return (int32((0x7FFFFFFF)) >> (-lshift))
  1095  				}
  1096  				return func() int32 {
  1097  					if (result) < (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) {
  1098  						return (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift))
  1099  					}
  1100  					return result
  1101  				}()
  1102  			}()
  1103  		}()) << (-lshift))
  1104  	} else {
  1105  		if lshift < 32 {
  1106  			return ((result) >> (lshift))
  1107  		} else {
  1108  			/* Avoid undefined result */
  1109  			return 0
  1110  		}
  1111  	}
  1112  	return int32(0)
  1113  }
  1114  
  1115  /* Sine approximation; an input of 65536 corresponds to 2 * pi */
  1116  /* Uses polynomial expansion of the input to the power 0, 2, 4 and 6 */
  1117  /* The relative error is below 1e-5 */
  1118  func SKP_Silk_SIN_APPROX_Q24(tls *libc.TLS, x int32) int32 { /* SKP_Silk_Inlines.h:220:22: */
  1119  	var y_Q30 int32
  1120  
  1121  	/* Keep only bottom 16 bits (the function repeats itself with period 65536) */
  1122  	x = x & (65535)
  1123  
  1124  	/* Split range in four quadrants */
  1125  	if x <= 32768 {
  1126  		if x < 16384 {
  1127  			/* Return cos(pi/2 - x) */
  1128  			x = (16384 - x)
  1129  		} else {
  1130  			/* Return cos(x - pi/2) */
  1131  			x = x - (16384)
  1132  		}
  1133  		if x < 1100 {
  1134  			/* Special case: high accuracy */
  1135  			return ((int32(1) << 24) + (((((x) * (x)) >> 16) * (int32(int16(-5053)))) + (((((x) * (x)) & 0x0000FFFF) * (int32(int16(-5053)))) >> 16)))
  1136  		}
  1137  		x = (((((x) << (8)) >> 16) * (int32(int16(x)))) + (((((x) << (8)) & 0x0000FFFF) * (int32(int16(x)))) >> 16)) /* contains x^2 in Q20 */
  1138  		y_Q30 = ((1059577) + ((((x) >> 16) * (int32(int16(-5013)))) + ((((x) & 0x0000FFFF) * (int32(int16(-5013)))) >> 16)))
  1139  		y_Q30 = (((-82778932) + ((((x) >> 16) * (int32(int16(y_Q30)))) + ((((x) & 0x0000FFFF) * (int32(int16(y_Q30)))) >> 16))) + ((x) * (func() int32 {
  1140  			if (16) == 1 {
  1141  				return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1142  			}
  1143  			return ((((y_Q30) >> ((16) - 1)) + 1) >> 1)
  1144  		}())))
  1145  		y_Q30 = ((((1073735400) + 66) + ((((x) >> 16) * (int32(int16(y_Q30)))) + ((((x) & 0x0000FFFF) * (int32(int16(y_Q30)))) >> 16))) + ((x) * (func() int32 {
  1146  			if (16) == 1 {
  1147  				return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1148  			}
  1149  			return ((((y_Q30) >> ((16) - 1)) + 1) >> 1)
  1150  		}())))
  1151  	} else {
  1152  		if x < 49152 {
  1153  			/* Return -cos(3*pi/2 - x) */
  1154  			x = (49152 - x)
  1155  		} else {
  1156  			/* Return -cos(x - 3*pi/2) */
  1157  			x = x - (49152)
  1158  		}
  1159  		if x < 1100 {
  1160  			/* Special case: high accuracy */
  1161  			return ((int32(-1) << 24) + (((((x) * (x)) >> 16) * (int32(int16(5053)))) + (((((x) * (x)) & 0x0000FFFF) * (int32(int16(5053)))) >> 16)))
  1162  		}
  1163  		x = (((((x) << (8)) >> 16) * (int32(int16(x)))) + (((((x) << (8)) & 0x0000FFFF) * (int32(int16(x)))) >> 16)) /* contains x^2 in Q20 */
  1164  		y_Q30 = ((-1059577) + ((((x) >> 16) * (int32(int16(- -5013)))) + ((((x) & 0x0000FFFF) * (int32(int16(- -5013)))) >> 16)))
  1165  		y_Q30 = (((- -82778932) + ((((x) >> 16) * (int32(int16(y_Q30)))) + ((((x) & 0x0000FFFF) * (int32(int16(y_Q30)))) >> 16))) + ((x) * (func() int32 {
  1166  			if (16) == 1 {
  1167  				return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1168  			}
  1169  			return ((((y_Q30) >> ((16) - 1)) + 1) >> 1)
  1170  		}())))
  1171  		y_Q30 = (((-1073735400) + ((((x) >> 16) * (int32(int16(y_Q30)))) + ((((x) & 0x0000FFFF) * (int32(int16(y_Q30)))) >> 16))) + ((x) * (func() int32 {
  1172  			if (16) == 1 {
  1173  				return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1174  			}
  1175  			return ((((y_Q30) >> ((16) - 1)) + 1) >> 1)
  1176  		}())))
  1177  	}
  1178  	return func() int32 {
  1179  		if (6) == 1 {
  1180  			return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1181  		}
  1182  		return ((((y_Q30) >> ((6) - 1)) + 1) >> 1)
  1183  	}()
  1184  }
  1185  
  1186  /* Number of binary divisions */
  1187  
  1188  /* Flag for using 2x as many cosine sampling points, reduces the risk of missing a root */
  1189  
  1190  /* Helper function for A2NLSF(..)                    */
  1191  /* Transforms polynomials from cos(n*f) to cos(f)^n  */
  1192  func SKP_Silk_A2NLSF_trans_poly(tls *libc.TLS, p uintptr, dd int32) { /* SKP_Silk_A2NLSF.c:46:17: */
  1193  	var k int32
  1194  	var n int32
  1195  
  1196  	for k = 2; k <= dd; k++ {
  1197  		for n = dd; n > k; n-- {
  1198  			*(*int32)(unsafe.Pointer(p + uintptr((n-2))*4)) -= (*(*int32)(unsafe.Pointer(p + uintptr(n)*4)))
  1199  		}
  1200  		*(*int32)(unsafe.Pointer(p + uintptr((k-2))*4)) -= ((*(*int32)(unsafe.Pointer(p + uintptr(k)*4))) << (1))
  1201  	}
  1202  }
  1203  
  1204  /* Helper function for A2NLSF(..)                    */
  1205  /* Polynomial evaluation                             */
  1206  func SKP_Silk_A2NLSF_eval_poly(tls *libc.TLS, p uintptr, x int32, dd int32) int32 { /* SKP_Silk_A2NLSF.c:62:22: */
  1207  	var n int32
  1208  	var x_Q16 int32
  1209  	var y32 int32
  1210  
  1211  	y32 = *(*int32)(unsafe.Pointer(p + uintptr(dd)*4)) /* QPoly */
  1212  	x_Q16 = ((x) << (4))
  1213  	for n = (dd - 1); n >= 0; n-- {
  1214  		y32 = (((*(*int32)(unsafe.Pointer(p + uintptr(n)*4))) + ((((y32) >> 16) * (int32(int16(x_Q16)))) + ((((y32) & 0x0000FFFF) * (int32(int16(x_Q16)))) >> 16))) + ((y32) * (func() int32 {
  1215  			if (16) == 1 {
  1216  				return (((x_Q16) >> 1) + ((x_Q16) & 1))
  1217  			}
  1218  			return ((((x_Q16) >> ((16) - 1)) + 1) >> 1)
  1219  		}()))) /* QPoly */
  1220  	}
  1221  	return y32
  1222  }
  1223  
  1224  func SKP_Silk_A2NLSF_init(tls *libc.TLS, a_Q16 uintptr, P uintptr, Q uintptr, dd int32) { /* SKP_Silk_A2NLSF.c:79:17: */
  1225  	var k int32
  1226  
  1227  	/* Convert filter coefs to even and odd polynomials */
  1228  	*(*int32)(unsafe.Pointer(P + uintptr(dd)*4)) = (int32((1)) << (16))
  1229  	*(*int32)(unsafe.Pointer(Q + uintptr(dd)*4)) = (int32((1)) << (16))
  1230  	for k = 0; k < dd; k++ {
  1231  		*(*int32)(unsafe.Pointer(P + uintptr(k)*4)) = (-*(*int32)(unsafe.Pointer(a_Q16 + uintptr(((dd-k)-1))*4)) - *(*int32)(unsafe.Pointer(a_Q16 + uintptr((dd+k))*4))) // QPoly
  1232  		*(*int32)(unsafe.Pointer(Q + uintptr(k)*4)) = (-*(*int32)(unsafe.Pointer(a_Q16 + uintptr(((dd-k)-1))*4)) + *(*int32)(unsafe.Pointer(a_Q16 + uintptr((dd+k))*4))) // QPoly
  1233  	}
  1234  
  1235  	/* Divide out zeros as we have that for even filter orders, */
  1236  	/* z =  1 is always a root in Q, and                        */
  1237  	/* z = -1 is always a root in P                             */
  1238  	for k = dd; k > 0; k-- {
  1239  		*(*int32)(unsafe.Pointer(P + uintptr((k-1))*4)) -= (*(*int32)(unsafe.Pointer(P + uintptr(k)*4)))
  1240  		*(*int32)(unsafe.Pointer(Q + uintptr((k-1))*4)) += (*(*int32)(unsafe.Pointer(Q + uintptr(k)*4)))
  1241  	}
  1242  
  1243  	/* Transform polynomials from cos(n*f) to cos(f)^n */
  1244  	SKP_Silk_A2NLSF_trans_poly(tls, P, dd)
  1245  	SKP_Silk_A2NLSF_trans_poly(tls, Q, dd)
  1246  }
  1247  
  1248  /* Compute Normalized Line Spectral Frequencies (NLSFs) from whitening filter coefficients        */
  1249  /* If not all roots are found, the a_Q16 coefficients are bandwidth expanded until convergence.    */
  1250  func SKP_Silk_A2NLSF(tls *libc.TLS, NLSF uintptr, a_Q16 uintptr, d int32) { /* SKP_Silk_A2NLSF.c:119:6: */
  1251  	bp := tls.Alloc(88)
  1252  	defer tls.Free(88)
  1253  
  1254  	var i int32
  1255  	var k int32
  1256  	var m int32
  1257  	var dd int32
  1258  	var root_ix int32
  1259  	var ffrac int32
  1260  	var xlo int32
  1261  	var xhi int32
  1262  	var xmid int32
  1263  	var ylo int32
  1264  	var yhi int32
  1265  	var ymid int32
  1266  	var nom int32
  1267  	var den int32
  1268  	// var P [9]int32 at bp+16, 36
  1269  
  1270  	// var Q [9]int32 at bp+52, 36
  1271  
  1272  	// var PQ [2]uintptr at bp, 16
  1273  
  1274  	var p uintptr
  1275  
  1276  	/* Store pointers to array */
  1277  	*(*uintptr)(unsafe.Pointer(bp /* &PQ[0] */)) = bp + 16       /* &P[0] */
  1278  	*(*uintptr)(unsafe.Pointer(bp /* &PQ[0] */ + 1*8)) = bp + 52 /* &Q[0] */
  1279  
  1280  	dd = ((d) >> (1))
  1281  
  1282  	SKP_Silk_A2NLSF_init(tls, a_Q16, bp+16 /* &P[0] */, bp+52 /* &Q[0] */, dd)
  1283  
  1284  	/* Find roots, alternating between P and Q */
  1285  	p = bp + 16 /* &P[0] */ /* Pointer to polynomial */
  1286  
  1287  	xlo = SKP_Silk_LSFCosTab_FIX_Q12[0] // Q12
  1288  	ylo = SKP_Silk_A2NLSF_eval_poly(tls, p, xlo, dd)
  1289  
  1290  	if ylo < 0 {
  1291  		/* Set the first NLSF to zero and move on to the next */
  1292  		*(*int32)(unsafe.Pointer(NLSF)) = 0
  1293  		p = bp + 52 /* &Q[0] */ /* Pointer to polynomial */
  1294  		ylo = SKP_Silk_A2NLSF_eval_poly(tls, p, xlo, dd)
  1295  		root_ix = 1 /* Index of current root */
  1296  	} else {
  1297  		root_ix = 0 /* Index of current root */
  1298  	}
  1299  	k = 1 /* Loop counter */
  1300  	i = 0 /* Counter for bandwidth expansions applied */
  1301  	for 1 != 0 {
  1302  		/* Evaluate polynomial */
  1303  		xhi = SKP_Silk_LSFCosTab_FIX_Q12[k] /* Q12 */
  1304  		yhi = SKP_Silk_A2NLSF_eval_poly(tls, p, xhi, dd)
  1305  
  1306  		/* Detect zero crossing */
  1307  		if ((ylo <= 0) && (yhi >= 0)) || ((ylo >= 0) && (yhi <= 0)) {
  1308  			/* Binary division */
  1309  			ffrac = -256
  1310  			for m = 0; m < 3; m++ {
  1311  				/* Evaluate polynomial */
  1312  				xmid = func() int32 {
  1313  					if (1) == 1 {
  1314  						return (((xlo + xhi) >> 1) + ((xlo + xhi) & 1))
  1315  					}
  1316  					return ((((xlo + xhi) >> ((1) - 1)) + 1) >> 1)
  1317  				}()
  1318  				ymid = SKP_Silk_A2NLSF_eval_poly(tls, p, xmid, dd)
  1319  
  1320  				/* Detect zero crossing */
  1321  				if ((ylo <= 0) && (ymid >= 0)) || ((ylo >= 0) && (ymid <= 0)) {
  1322  					/* Reduce frequency */
  1323  					xhi = xmid
  1324  					yhi = ymid
  1325  				} else {
  1326  					/* Increase frequency */
  1327  					xlo = xmid
  1328  					ylo = ymid
  1329  					ffrac = ((ffrac) + (int32((128)) >> (m)))
  1330  				}
  1331  			}
  1332  
  1333  			/* Interpolate */
  1334  			if (func() int32 {
  1335  				if (ylo) > 0 {
  1336  					return ylo
  1337  				}
  1338  				return -ylo
  1339  			}()) < 65536 {
  1340  				/* Avoid dividing by zero */
  1341  				den = (ylo - yhi)
  1342  				nom = (((ylo) << (8 - 3)) + ((den) >> (1)))
  1343  				if den != 0 {
  1344  					ffrac = ffrac + ((nom) / (den))
  1345  				}
  1346  			} else {
  1347  				/* No risk of dividing by zero because abs(ylo - yhi) >= abs(ylo) >= 65536 */
  1348  				ffrac = ffrac + ((ylo) / ((ylo - yhi) >> (8 - 3)))
  1349  			}
  1350  			*(*int32)(unsafe.Pointer(NLSF + uintptr(root_ix)*4)) = SKP_min_32(tls, (((k) << (8)) + ffrac), 0x7FFF)
  1351  
  1352  			root_ix++ /* Next root */
  1353  			if root_ix >= d {
  1354  				/* Found all roots */
  1355  				break
  1356  			}
  1357  			/* Alternate pointer to polynomial */
  1358  			p = *(*uintptr)(unsafe.Pointer(bp /* &PQ[0] */ + uintptr((root_ix&1))*8))
  1359  
  1360  			/* Evaluate polynomial */
  1361  			xlo = SKP_Silk_LSFCosTab_FIX_Q12[(k - 1)] // Q12
  1362  			ylo = ((1 - (root_ix & 2)) << (12))
  1363  		} else {
  1364  			/* Increment loop counter */
  1365  			k++
  1366  			xlo = xhi
  1367  			ylo = yhi
  1368  
  1369  			if k > 128 {
  1370  				i++
  1371  				if i > 30 {
  1372  					/* Set NLSFs to white spectrum and exit */
  1373  					*(*int32)(unsafe.Pointer(NLSF)) = ((int32(1) << 15) / (d + 1))
  1374  					for k = 1; k < d; k++ {
  1375  						*(*int32)(unsafe.Pointer(NLSF + uintptr(k)*4)) = ((int32((int16(k + 1)))) * (int32(int16(*(*int32)(unsafe.Pointer(NLSF))))))
  1376  					}
  1377  					return
  1378  				}
  1379  
  1380  				/* Error: Apply progressively more bandwidth expansion and run again */
  1381  				SKP_Silk_bwexpander_32(tls, a_Q16, d, (65536 - ((int32((int16(10 + i)))) * (int32(int16(i)))))) // 10_Q16 = 0.00015
  1382  
  1383  				SKP_Silk_A2NLSF_init(tls, a_Q16, bp+16 /* &P[0] */, bp+52 /* &Q[0] */, dd)
  1384  				p = bp + 16                         /* &P[0] */ /* Pointer to polynomial */
  1385  				xlo = SKP_Silk_LSFCosTab_FIX_Q12[0] // Q12
  1386  				ylo = SKP_Silk_A2NLSF_eval_poly(tls, p, xlo, dd)
  1387  				if ylo < 0 {
  1388  					/* Set the first NLSF to zero and move on to the next */
  1389  					*(*int32)(unsafe.Pointer(NLSF)) = 0
  1390  					p = bp + 52 /* &Q[0] */ /* Pointer to polynomial */
  1391  					ylo = SKP_Silk_A2NLSF_eval_poly(tls, p, xlo, dd)
  1392  					root_ix = 1 /* Index of current root */
  1393  				} else {
  1394  					root_ix = 0 /* Index of current root */
  1395  				}
  1396  				k = 1 /* Reset loop counter */
  1397  			}
  1398  		}
  1399  	}
  1400  }
  1401  
  1402  /* Coefficients for 2-band filter bank based on first-order allpass filters */
  1403  // old
  1404  var A_fb1_20 = [1]int16{(int16(int32(5394) << 1))}                /* SKP_Silk_ana_filt_bank_1.c:40:18 */
  1405  var A_fb1_21 = [1]int16{(libc.Int16FromInt32(int32(20623) << 1))} /* SKP_Silk_ana_filt_bank_1.c:41:18 */
  1406  
  1407  /* wrap-around to negative number is intentional */
  1408  
  1409  /* Split signal into two decimated bands using first-order allpass filters */
  1410  func SKP_Silk_ana_filt_bank_1(tls *libc.TLS, in uintptr, S uintptr, outL uintptr, outH uintptr, scratch uintptr, N int32) { /* SKP_Silk_ana_filt_bank_1.c:44:6: */
  1411  	var k int32
  1412  	var N2 int32 = ((N) >> (1))
  1413  	var in32 int32
  1414  	var X int32
  1415  	var Y int32
  1416  	var out_1 int32
  1417  	var out_2 int32
  1418  
  1419  	/* Internal variables and state are in Q10 format */
  1420  	for k = 0; k < N2; k++ {
  1421  		/* Convert to Q10 */
  1422  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr((2*k))*2)))) << (10))
  1423  
  1424  		/* All-pass section for even input sample */
  1425  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
  1426  		X = ((Y) + ((((Y) >> 16) * (int32(A_fb1_21[0]))) + ((((Y) & 0x0000FFFF) * (int32(A_fb1_21[0]))) >> 16)))
  1427  		out_1 = ((*(*int32)(unsafe.Pointer(S))) + (X))
  1428  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
  1429  
  1430  		/* Convert to Q10 */
  1431  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(((2*k)+1))*2)))) << (10))
  1432  
  1433  		/* All-pass section for odd input sample */
  1434  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
  1435  		X = ((((Y) >> 16) * (int32(A_fb1_20[0]))) + ((((Y) & 0x0000FFFF) * (int32(A_fb1_20[0]))) >> 16))
  1436  		out_2 = ((*(*int32)(unsafe.Pointer(S + 1*4))) + (X))
  1437  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
  1438  
  1439  		/* Add/subtract, convert back to int16 and store to output */
  1440  		*(*int16)(unsafe.Pointer(outL + uintptr(k)*2)) = func() int16 {
  1441  			if (func() int32 {
  1442  				if (11) == 1 {
  1443  					return ((((out_2) + (out_1)) >> 1) + (((out_2) + (out_1)) & 1))
  1444  				}
  1445  				return (((((out_2) + (out_1)) >> ((11) - 1)) + 1) >> 1)
  1446  			}()) > 0x7FFF {
  1447  				return int16(0x7FFF)
  1448  			}
  1449  			return func() int16 {
  1450  				if (func() int32 {
  1451  					if (11) == 1 {
  1452  						return ((((out_2) + (out_1)) >> 1) + (((out_2) + (out_1)) & 1))
  1453  					}
  1454  					return (((((out_2) + (out_1)) >> ((11) - 1)) + 1) >> 1)
  1455  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
  1456  					return libc.Int16FromInt32(0x8000)
  1457  				}
  1458  				return func() int16 {
  1459  					if (11) == 1 {
  1460  						return (int16((((out_2) + (out_1)) >> 1) + (((out_2) + (out_1)) & 1)))
  1461  					}
  1462  					return (int16(((((out_2) + (out_1)) >> ((11) - 1)) + 1) >> 1))
  1463  				}()
  1464  			}()
  1465  		}()
  1466  		*(*int16)(unsafe.Pointer(outH + uintptr(k)*2)) = func() int16 {
  1467  			if (func() int32 {
  1468  				if (11) == 1 {
  1469  					return ((((out_2) - (out_1)) >> 1) + (((out_2) - (out_1)) & 1))
  1470  				}
  1471  				return (((((out_2) - (out_1)) >> ((11) - 1)) + 1) >> 1)
  1472  			}()) > 0x7FFF {
  1473  				return int16(0x7FFF)
  1474  			}
  1475  			return func() int16 {
  1476  				if (func() int32 {
  1477  					if (11) == 1 {
  1478  						return ((((out_2) - (out_1)) >> 1) + (((out_2) - (out_1)) & 1))
  1479  					}
  1480  					return (((((out_2) - (out_1)) >> ((11) - 1)) + 1) >> 1)
  1481  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
  1482  					return libc.Int16FromInt32(0x8000)
  1483  				}
  1484  				return func() int16 {
  1485  					if (11) == 1 {
  1486  						return (int16((((out_2) - (out_1)) >> 1) + (((out_2) - (out_1)) & 1)))
  1487  					}
  1488  					return (int16(((((out_2) - (out_1)) >> ((11) - 1)) + 1) >> 1))
  1489  				}()
  1490  			}()
  1491  		}()
  1492  	}
  1493  }
  1494  
  1495  /* Apply sine window to signal vector.                                      */
  1496  /* Window types:                                                            */
  1497  /*    1 -> sine window from 0 to pi/2                                       */
  1498  /*    2 -> sine window from pi/2 to pi                                      */
  1499  /* Every other sample is linearly interpolated, for speed.                  */
  1500  /* Window length must be between 16 and 120 (incl) and a multiple of 4.     */
  1501  
  1502  /* Matlab code for table:
  1503     for k=16:9*4:16+2*9*4, fprintf(' %7.d,', -round(65536*pi ./ (k:4:k+8*4))); fprintf('\n'); end
  1504  */
  1505  var freq_table_Q16 = [27]int16{
  1506  	int16(12111), int16(9804), int16(8235), int16(7100), int16(6239), int16(5565), int16(5022), int16(4575), int16(4202),
  1507  	int16(3885), int16(3612), int16(3375), int16(3167), int16(2984), int16(2820), int16(2674), int16(2542), int16(2422),
  1508  	int16(2313), int16(2214), int16(2123), int16(2038), int16(1961), int16(1889), int16(1822), int16(1760), int16(1702),
  1509  } /* SKP_Silk_apply_sine_window.c:40:18 */
  1510  
  1511  func SKP_Silk_apply_sine_window(tls *libc.TLS, px_win uintptr, px uintptr, win_type int32, length int32) { /* SKP_Silk_apply_sine_window.c:47:6: */
  1512  	var k int32
  1513  	var f_Q16 int32
  1514  	var c_Q16 int32
  1515  	var S0_Q16 int32
  1516  	var S1_Q16 int32
  1517  
  1518  	/* Length must be in a range from 16 to 120 and a multiple of 4 */
  1519  
  1520  	/* Input pointer must be 4-byte aligned */
  1521  
  1522  	/* Frequency */
  1523  	k = ((length >> 2) - 4)
  1524  
  1525  	f_Q16 = int32(freq_table_Q16[k])
  1526  
  1527  	/* Factor used for cosine approximation */
  1528  	c_Q16 = ((((f_Q16) >> 16) * (int32(int16(-f_Q16)))) + ((((f_Q16) & 0x0000FFFF) * (int32(int16(-f_Q16)))) >> 16))
  1529  
  1530  	/* initialize state */
  1531  	if win_type == 1 {
  1532  		/* start from 0 */
  1533  		S0_Q16 = 0
  1534  		/* approximation of sin(f) */
  1535  		S1_Q16 = (f_Q16 + ((length) >> (3)))
  1536  	} else {
  1537  		/* start from 1 */
  1538  		S0_Q16 = (int32(1) << 16)
  1539  		/* approximation of cos(f) */
  1540  		S1_Q16 = (((int32(1) << 16) + ((c_Q16) >> (1))) + ((length) >> (4)))
  1541  	}
  1542  
  1543  	/* Uses the recursive equation:   sin(n*f) = 2 * cos(f) * sin((n-1)*f) - sin((n-2)*f)    */
  1544  	/* 4 samples at a time */
  1545  	for k = 0; k < length; k = k + (4) {
  1546  		*(*int16)(unsafe.Pointer(px_win + uintptr(k)*2)) = (int16(((((S0_Q16 + S1_Q16) >> (1)) >> 16) * (int32(*(*int16)(unsafe.Pointer(px + uintptr(k)*2))))) + (((((S0_Q16 + S1_Q16) >> (1)) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(px + uintptr(k)*2))))) >> 16)))
  1547  		*(*int16)(unsafe.Pointer(px_win + uintptr((k+1))*2)) = (int16((((S1_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+1))*2))))) + ((((S1_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+1))*2))))) >> 16)))
  1548  		S0_Q16 = (((((((S1_Q16) >> 16) * (int32(int16(c_Q16)))) + ((((S1_Q16) & 0x0000FFFF) * (int32(int16(c_Q16)))) >> 16)) + ((S1_Q16) << (1))) - S0_Q16) + 1)
  1549  		S0_Q16 = func() int32 {
  1550  			if (S0_Q16) < (int32(1) << 16) {
  1551  				return S0_Q16
  1552  			}
  1553  			return (int32(1) << 16)
  1554  		}()
  1555  
  1556  		*(*int16)(unsafe.Pointer(px_win + uintptr((k+2))*2)) = (int16(((((S0_Q16 + S1_Q16) >> (1)) >> 16) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+2))*2))))) + (((((S0_Q16 + S1_Q16) >> (1)) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+2))*2))))) >> 16)))
  1557  		*(*int16)(unsafe.Pointer(px_win + uintptr((k+3))*2)) = (int16((((S0_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+3))*2))))) + ((((S0_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+3))*2))))) >> 16)))
  1558  		S1_Q16 = ((((((S0_Q16) >> 16) * (int32(int16(c_Q16)))) + ((((S0_Q16) & 0x0000FFFF) * (int32(int16(c_Q16)))) >> 16)) + ((S0_Q16) << (1))) - S1_Q16)
  1559  		S1_Q16 = func() int32 {
  1560  			if (S1_Q16) < (int32(1) << 16) {
  1561  				return S1_Q16
  1562  			}
  1563  			return (int32(1) << 16)
  1564  		}()
  1565  	}
  1566  }
  1567  
  1568  /* Function that returns the maximum absolut value of the input vector */
  1569  func SKP_Silk_int16_array_maxabs(tls *libc.TLS, vec uintptr, len int32) int16 { /* SKP_Silk_array_maxabs.c:40:11: */
  1570  	var max int32 = 0
  1571  	var i int32
  1572  	var lvl int32 = 0
  1573  	var ind int32
  1574  	if len == 0 {
  1575  		return int16(0)
  1576  	}
  1577  
  1578  	ind = (len - 1)
  1579  	max = ((int32(*(*int16)(unsafe.Pointer(vec + uintptr(ind)*2)))) * (int32(*(*int16)(unsafe.Pointer(vec + uintptr(ind)*2)))))
  1580  	for i = (len - 2); i >= 0; i-- {
  1581  		lvl = ((int32(*(*int16)(unsafe.Pointer(vec + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(vec + uintptr(i)*2)))))
  1582  		if lvl > max {
  1583  			max = lvl
  1584  			ind = i
  1585  		}
  1586  	}
  1587  
  1588  	/* Do not return 32768, as it will not fit in an int16 so may lead to problems later on */
  1589  	if max >= 1073676289 { // (2^15-1)^2 = 1073676289
  1590  		return int16(0x7FFF)
  1591  	} else {
  1592  		if int32(*(*int16)(unsafe.Pointer(vec + uintptr(ind)*2))) < 0 {
  1593  			return int16(-int32(*(*int16)(unsafe.Pointer(vec + uintptr(ind)*2))))
  1594  		} else {
  1595  			return *(*int16)(unsafe.Pointer(vec + uintptr(ind)*2))
  1596  		}
  1597  	}
  1598  	return int16(0)
  1599  }
  1600  
  1601  /* Compute autocorrelation */
  1602  func SKP_Silk_autocorr(tls *libc.TLS, results uintptr, scale uintptr, inputData uintptr, inputDataSize int32, correlationCount int32) { /* SKP_Silk_autocorr.c:40:6: */
  1603  	var i int32
  1604  	var lz int32
  1605  	var nRightShifts int32
  1606  	var corrCount int32
  1607  	var corr64 int64_t
  1608  
  1609  	corrCount = SKP_min_int(tls, inputDataSize, correlationCount)
  1610  
  1611  	/* compute energy (zero-lag correlation) */
  1612  	corr64 = SKP_Silk_inner_prod16_aligned_64(tls, inputData, inputData, inputDataSize)
  1613  
  1614  	/* deal with all-zero input data */
  1615  	corr64 = corr64 + (int64(1))
  1616  
  1617  	/* number of leading zeros */
  1618  	lz = SKP_Silk_CLZ64(tls, corr64)
  1619  
  1620  	/* scaling: number of right shifts applied to correlations */
  1621  	nRightShifts = (35 - lz)
  1622  	*(*int32)(unsafe.Pointer(scale)) = nRightShifts
  1623  
  1624  	if nRightShifts <= 0 {
  1625  		*(*int32)(unsafe.Pointer(results)) = ((int32(corr64)) << (-nRightShifts))
  1626  
  1627  		/* compute remaining correlations based on int32 inner product */
  1628  		for i = 1; i < corrCount; i++ {
  1629  			*(*int32)(unsafe.Pointer(results + uintptr(i)*4)) = ((SKP_Silk_inner_prod_aligned(tls, inputData, (inputData + uintptr(i)*2), (inputDataSize - i))) << (-nRightShifts))
  1630  		}
  1631  	} else {
  1632  		*(*int32)(unsafe.Pointer(results)) = (int32((corr64) >> (nRightShifts)))
  1633  
  1634  		/* compute remaining correlations based on int64 inner product */
  1635  		for i = 1; i < corrCount; i++ {
  1636  			*(*int32)(unsafe.Pointer(results + uintptr(i)*4)) = (int32((SKP_Silk_inner_prod16_aligned_64(tls, inputData, (inputData + uintptr(i)*2), (inputDataSize - i))) >> (nRightShifts)))
  1637  		}
  1638  	}
  1639  }
  1640  
  1641  /* Second order ARMA filter */
  1642  /* Can handle slowly varying filter coefficients */
  1643  func SKP_Silk_biquad(tls *libc.TLS, in uintptr, B uintptr, A uintptr, S uintptr, out uintptr, len int32) { /* SKP_Silk_biquad.c:41:6: */
  1644  	var k int32
  1645  	var in16 int32
  1646  	var A0_neg int32
  1647  	var A1_neg int32
  1648  	var S0 int32
  1649  	var S1 int32
  1650  	var out32 int32
  1651  	var tmp32 int32
  1652  
  1653  	S0 = *(*int32)(unsafe.Pointer(S))
  1654  	S1 = *(*int32)(unsafe.Pointer(S + 1*4))
  1655  	A0_neg = -int32(*(*int16)(unsafe.Pointer(A)))
  1656  	A1_neg = -int32(*(*int16)(unsafe.Pointer(A + 1*2)))
  1657  	for k = 0; k < len; k++ {
  1658  		/* S[ 0 ], S[ 1 ]: Q13 */
  1659  		in16 = int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))
  1660  		out32 = ((S0) + ((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B))))))
  1661  
  1662  		S0 = ((S1) + ((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B + 1*2))))))
  1663  		S0 = S0 + (((((out32) >> 16) * (int32(int16(A0_neg)))) + ((((out32) & 0x0000FFFF) * (int32(int16(A0_neg)))) >> 16)) << (3))
  1664  
  1665  		S1 = (((((out32) >> 16) * (int32(int16(A1_neg)))) + ((((out32) & 0x0000FFFF) * (int32(int16(A1_neg)))) >> 16)) << (3))
  1666  		S1 = ((S1) + ((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B + 2*2))))))
  1667  		tmp32 = ((func() int32 {
  1668  			if (13) == 1 {
  1669  				return (((out32) >> 1) + ((out32) & 1))
  1670  			}
  1671  			return ((((out32) >> ((13) - 1)) + 1) >> 1)
  1672  		}()) + 1)
  1673  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  1674  			if (tmp32) > 0x7FFF {
  1675  				return int16(0x7FFF)
  1676  			}
  1677  			return func() int16 {
  1678  				if (tmp32) < (int32(libc.Int16FromInt32(0x8000))) {
  1679  					return libc.Int16FromInt32(0x8000)
  1680  				}
  1681  				return int16(tmp32)
  1682  			}()
  1683  		}()
  1684  	}
  1685  	*(*int32)(unsafe.Pointer(S)) = S0
  1686  	*(*int32)(unsafe.Pointer(S + 1*4)) = S1
  1687  }
  1688  
  1689  /* Second order ARMA filter, alternative implementation */
  1690  func SKP_Silk_biquad_alt(tls *libc.TLS, in uintptr, B_Q28 uintptr, A_Q28 uintptr, S uintptr, out uintptr, len int32) { /* SKP_Silk_biquad_alt.c:38:6: */
  1691  	/* DIRECT FORM II TRANSPOSED (uses 2 element state vector) */
  1692  	var k int32
  1693  	var inval int32
  1694  	var A0_U_Q28 int32
  1695  	var A0_L_Q28 int32
  1696  	var A1_U_Q28 int32
  1697  	var A1_L_Q28 int32
  1698  	var out32_Q14 int32
  1699  
  1700  	/* Negate A_Q28 values and split in two parts */
  1701  	A0_L_Q28 = ((-*(*int32)(unsafe.Pointer(A_Q28))) & 0x00003FFF)       /* lower part */
  1702  	A0_U_Q28 = ((-*(*int32)(unsafe.Pointer(A_Q28))) >> (14))            /* upper part */
  1703  	A1_L_Q28 = ((-*(*int32)(unsafe.Pointer(A_Q28 + 1*4))) & 0x00003FFF) /* lower part */
  1704  	A1_U_Q28 = ((-*(*int32)(unsafe.Pointer(A_Q28 + 1*4))) >> (14))      /* upper part */
  1705  
  1706  	for k = 0; k < len; k++ {
  1707  		/* S[ 0 ], S[ 1 ]: Q12 */
  1708  		inval = int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))
  1709  		out32_Q14 = (((*(*int32)(unsafe.Pointer(S))) + ((((*(*int32)(unsafe.Pointer(B_Q28))) >> 16) * (int32(int16(inval)))) + ((((*(*int32)(unsafe.Pointer(B_Q28))) & 0x0000FFFF) * (int32(int16(inval)))) >> 16))) << (2))
  1710  
  1711  		*(*int32)(unsafe.Pointer(S)) = (*(*int32)(unsafe.Pointer(S + 1*4)) + (func() int32 {
  1712  			if (14) == 1 {
  1713  				return ((((((out32_Q14) >> 16) * (int32(int16(A0_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A0_L_Q28)))) >> 16)) >> 1) + (((((out32_Q14) >> 16) * (int32(int16(A0_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A0_L_Q28)))) >> 16)) & 1))
  1714  			}
  1715  			return (((((((out32_Q14) >> 16) * (int32(int16(A0_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A0_L_Q28)))) >> 16)) >> ((14) - 1)) + 1) >> 1)
  1716  		}()))
  1717  		*(*int32)(unsafe.Pointer(S)) = ((*(*int32)(unsafe.Pointer(S))) + ((((out32_Q14) >> 16) * (int32(int16(A0_U_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A0_U_Q28)))) >> 16)))
  1718  		*(*int32)(unsafe.Pointer(S)) = ((*(*int32)(unsafe.Pointer(S))) + ((((*(*int32)(unsafe.Pointer(B_Q28 + 1*4))) >> 16) * (int32(int16(inval)))) + ((((*(*int32)(unsafe.Pointer(B_Q28 + 1*4))) & 0x0000FFFF) * (int32(int16(inval)))) >> 16)))
  1719  
  1720  		*(*int32)(unsafe.Pointer(S + 1*4)) = func() int32 {
  1721  			if (14) == 1 {
  1722  				return ((((((out32_Q14) >> 16) * (int32(int16(A1_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A1_L_Q28)))) >> 16)) >> 1) + (((((out32_Q14) >> 16) * (int32(int16(A1_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A1_L_Q28)))) >> 16)) & 1))
  1723  			}
  1724  			return (((((((out32_Q14) >> 16) * (int32(int16(A1_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A1_L_Q28)))) >> 16)) >> ((14) - 1)) + 1) >> 1)
  1725  		}()
  1726  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((*(*int32)(unsafe.Pointer(S + 1*4))) + ((((out32_Q14) >> 16) * (int32(int16(A1_U_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A1_U_Q28)))) >> 16)))
  1727  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((*(*int32)(unsafe.Pointer(S + 1*4))) + ((((*(*int32)(unsafe.Pointer(B_Q28 + 2*4))) >> 16) * (int32(int16(inval)))) + ((((*(*int32)(unsafe.Pointer(B_Q28 + 2*4))) & 0x0000FFFF) * (int32(int16(inval)))) >> 16)))
  1728  
  1729  		/* Scale back to Q0 and saturate */
  1730  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  1731  			if (((out32_Q14 + (int32(1) << 14)) - 1) >> (14)) > 0x7FFF {
  1732  				return int16(0x7FFF)
  1733  			}
  1734  			return func() int16 {
  1735  				if (((out32_Q14 + (int32(1) << 14)) - 1) >> (14)) < (int32(libc.Int16FromInt32(0x8000))) {
  1736  					return libc.Int16FromInt32(0x8000)
  1737  				}
  1738  				return (int16(((out32_Q14 + (int32(1) << 14)) - 1) >> (14)))
  1739  			}()
  1740  		}()
  1741  	}
  1742  }
  1743  
  1744  /* Compute reflection coefficients from input signal */
  1745  func SKP_Silk_burg_modified(tls *libc.TLS, res_nrg uintptr, res_nrg_Q uintptr, A_Q16 uintptr, x uintptr, subfr_length int32, nb_subfr int32, WhiteNoiseFrac_Q32 int32, D int32) { /* SKP_Silk_burg_modified.c:49:6: */
  1746  	bp := tls.Alloc(336)
  1747  	defer tls.Free(336)
  1748  
  1749  	var k int32
  1750  	var n int32
  1751  	var s int32
  1752  	var lz int32
  1753  	// var rshifts int32 at bp+4, 4
  1754  
  1755  	var rshifts_extra int32
  1756  	// var C0 int32 at bp, 4
  1757  
  1758  	var num int32
  1759  	var nrg int32
  1760  	var rc_Q31 int32
  1761  	var Atmp_QA int32
  1762  	var Atmp1 int32
  1763  	var tmp1 int32
  1764  	var tmp2 int32
  1765  	var x1 int32
  1766  	var x2 int32
  1767  	var x_ptr uintptr
  1768  	// var C_first_row [16]int32 at bp+8, 64
  1769  
  1770  	// var C_last_row [16]int32 at bp+72, 64
  1771  
  1772  	// var Af_QA [16]int32 at bp+272, 64
  1773  
  1774  	// var CAf [17]int32 at bp+204, 68
  1775  
  1776  	// var CAb [17]int32 at bp+136, 68
  1777  
  1778  	/* Compute autocorrelations, added over subframes */
  1779  	SKP_Silk_sum_sqr_shift(tls, bp /* &C0 */, bp+4 /* &rshifts */, x, (nb_subfr * subfr_length))
  1780  	if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) > (32 - 25) {
  1781  		*(*int32)(unsafe.Pointer(bp /* C0 */)) = ((*(*int32)(unsafe.Pointer(bp /* C0 */))) << (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - (32 - 25)))
  1782  
  1783  		*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) = (32 - 25)
  1784  	} else {
  1785  		lz = (SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp /* C0 */))) - 1)
  1786  		rshifts_extra = (2 - lz)
  1787  		if rshifts_extra > 0 {
  1788  			rshifts_extra = func() int32 {
  1789  				if (rshifts_extra) < ((32 - 25) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))) {
  1790  					return rshifts_extra
  1791  				}
  1792  				return ((32 - 25) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))
  1793  			}()
  1794  			*(*int32)(unsafe.Pointer(bp /* C0 */)) = ((*(*int32)(unsafe.Pointer(bp /* C0 */))) >> (rshifts_extra))
  1795  		} else {
  1796  			rshifts_extra = func() int32 {
  1797  				if (rshifts_extra) > (-16 - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))) {
  1798  					return rshifts_extra
  1799  				}
  1800  				return (-16 - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))
  1801  			}()
  1802  			*(*int32)(unsafe.Pointer(bp /* C0 */)) = ((*(*int32)(unsafe.Pointer(bp /* C0 */))) << (-rshifts_extra))
  1803  		}
  1804  		*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) += rshifts_extra
  1805  	}
  1806  	libc.Xmemset(tls, bp+8 /* &C_first_row[0] */, 0, (uint64(16) * uint64(unsafe.Sizeof(int32(0)))))
  1807  	if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) > 0 {
  1808  		for s = 0; s < nb_subfr; s++ {
  1809  			x_ptr = (x + uintptr((s*subfr_length))*2)
  1810  			for n = 1; n < (D + 1); n++ {
  1811  				*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row */ + uintptr((n-1))*4)) += (int32((SKP_Silk_inner_prod16_aligned_64(tls, x_ptr, (x_ptr + uintptr(n)*2), (subfr_length - n))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))))
  1812  			}
  1813  		}
  1814  	} else {
  1815  		for s = 0; s < nb_subfr; s++ {
  1816  			x_ptr = (x + uintptr((s*subfr_length))*2)
  1817  			for n = 1; n < (D + 1); n++ {
  1818  				*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row */ + uintptr((n-1))*4)) += ((SKP_Silk_inner_prod_aligned(tls, x_ptr, (x_ptr + uintptr(n)*2), (subfr_length - n))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))))
  1819  			}
  1820  		}
  1821  	}
  1822  	libc.Xmemcpy(tls, bp+72 /* &C_last_row[0] */, bp+8 /* &C_first_row[0] */, (uint64(16) * uint64(unsafe.Sizeof(int32(0)))))
  1823  
  1824  	/* Initialize */
  1825  	*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */)) = libc.AssignPtrInt32(bp+204 /* &CAf */, ((*(*int32)(unsafe.Pointer(bp /* C0 */)) + (int32(((int64_t(WhiteNoiseFrac_Q32)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* C0 */))))) >> (32)))) + 1)) // Q(-rshifts)
  1826  
  1827  	for n = 0; n < D; n++ {
  1828  		/* Update first row of correlation matrix (without first element) */
  1829  		/* Update last row of correlation matrix (without last element, stored in reversed order) */
  1830  		/* Update C * Af */
  1831  		/* Update C * flipud(Af) (stored in reversed order) */
  1832  		if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) > -2 {
  1833  			for s = 0; s < nb_subfr; s++ {
  1834  				x_ptr = (x + uintptr((s*subfr_length))*2)
  1835  				x1 = -((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(n)*2)))) << (16 - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))))                    // Q(16-rshifts)
  1836  				x2 = -((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)-1))*2)))) << (16 - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))) // Q(16-rshifts)
  1837  				tmp1 = ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(n)*2)))) << (25 - 16))                                                                // Q(QA-16)
  1838  				tmp2 = ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)-1))*2)))) << (25 - 16))                                             // Q(QA-16)
  1839  				for k = 0; k < n; k++ {
  1840  					*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(k)*4))) + ((((x1) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))) + ((((x1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))) >> 16)))                       // Q( -rshifts )
  1841  					*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(k)*4))) + ((((x2) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2))))) + ((((x2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2))))) >> 16))) // Q( -rshifts )
  1842  					Atmp_QA = *(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))
  1843  					tmp1 = ((tmp1) + ((((Atmp_QA) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))) + ((((Atmp_QA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))) >> 16)))                       // Q(QA-16)
  1844  					tmp2 = ((tmp2) + ((((Atmp_QA) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2))))) + ((((Atmp_QA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2))))) >> 16))) // Q(QA-16)
  1845  				}
  1846  				tmp1 = ((-tmp1) << ((32 - 25) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))) // Q(16-rshifts)
  1847  				tmp2 = ((-tmp2) << ((32 - 25) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))) // Q(16-rshifts)
  1848  				for k = 0; k <= n; k++ {
  1849  					*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4))) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2))))) >> 16)))                                       // Q( -rshift )
  1850  					*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(k)*4))) + ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2))))) >> 16))) // Q( -rshift )
  1851  				}
  1852  			}
  1853  		} else {
  1854  			for s = 0; s < nb_subfr; s++ {
  1855  				x_ptr = (x + uintptr((s*subfr_length))*2)
  1856  				x1 = -((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(n)*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))))                    // Q( -rshifts )
  1857  				x2 = -((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))) // Q( -rshifts )
  1858  				tmp1 = ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(n)*2)))) << (17))                                                                 // Q17
  1859  				tmp2 = ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)-1))*2)))) << (17))                                              // Q17
  1860  				for k = 0; k < n; k++ {
  1861  					*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(k)*4))) + ((x1) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))))            // Q( -rshifts )
  1862  					*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(k)*4))) + ((x2) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2)))))) // Q( -rshifts )
  1863  					Atmp1 = func() int32 {
  1864  						if (25 - 17) == 1 {
  1865  							return (((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) & 1))
  1866  						}
  1867  						return ((((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) >> ((25 - 17) - 1)) + 1) >> 1)
  1868  					}() // Q17
  1869  					tmp1 = ((tmp1) + ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2)))) * (Atmp1)))            // Q17
  1870  					tmp2 = ((tmp2) + ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2)))) * (Atmp1))) // Q17
  1871  				}
  1872  				tmp1 = -tmp1 // Q17
  1873  				tmp2 = -tmp2 // Q17
  1874  				for k = 0; k <= n; k++ {
  1875  					*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4)) = (((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4))) + ((((tmp1) >> 16) * (int32((int16((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)))))) + ((((tmp1) & 0x0000FFFF) * (int32((int16((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)))))) >> 16))) + ((tmp1) * (func() int32 {
  1876  						if (16) == 1 {
  1877  							return ((((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) >> 1) + (((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) & 1))
  1878  						}
  1879  						return (((((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) >> ((16) - 1)) + 1) >> 1)
  1880  					}()))) // Q( -rshift )
  1881  					*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(k)*4)) = (((*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(k)*4))) + ((((tmp2) >> 16) * (int32((int16((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)))))) + ((((tmp2) & 0x0000FFFF) * (int32((int16((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)))))) >> 16))) + ((tmp2) * (func() int32 {
  1882  						if (16) == 1 {
  1883  							return ((((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) >> 1) + (((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) & 1))
  1884  						}
  1885  						return (((((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) >> ((16) - 1)) + 1) >> 1)
  1886  					}()))) // Q( -rshift )
  1887  				}
  1888  			}
  1889  		}
  1890  
  1891  		/* Calculate nominator and denominator for the next order reflection (parcor) coefficient */
  1892  		tmp1 = *(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(n)*4))                                     // Q( -rshifts )
  1893  		tmp2 = *(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(n)*4))                                     // Q( -rshifts )
  1894  		num = 0                                                                                                           // Q( -rshifts )
  1895  		nrg = ((*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */))) + (*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */)))) // Q( 1-rshifts )
  1896  		for k = 0; k < n; k++ {
  1897  			Atmp_QA = *(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))
  1898  			lz = (SKP_Silk_CLZ32(tls, func() int32 {
  1899  				if (Atmp_QA) > 0 {
  1900  					return Atmp_QA
  1901  				}
  1902  				return -Atmp_QA
  1903  			}()) - 1)
  1904  			lz = func() int32 {
  1905  				if (32 - 25) < (lz) {
  1906  					return (32 - 25)
  1907  				}
  1908  				return lz
  1909  			}()
  1910  			Atmp1 = ((Atmp_QA) << (lz)) // Q( QA + lz )
  1911  
  1912  			tmp1 = ((tmp1) + ((int32(((int64_t(*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(((n-k)-1))*4)))) * (int64_t(Atmp1))) >> (32))) << ((32 - 25) - lz)))                                                                // Q( -rshifts )
  1913  			tmp2 = ((tmp2) + ((int32(((int64_t(*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(((n-k)-1))*4)))) * (int64_t(Atmp1))) >> (32))) << ((32 - 25) - lz)))                                                                // Q( -rshifts )
  1914  			num = ((num) + ((int32(((int64_t(*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr((n-k))*4)))) * (int64_t(Atmp1))) >> (32))) << ((32 - 25) - lz)))                                                                            // Q( -rshifts )
  1915  			nrg = ((nrg) + ((int32(((int64_t((*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr((k+1))*4))) + (*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((k+1))*4))))) * (int64_t(Atmp1))) >> (32))) << ((32 - 25) - lz))) // Q( 1-rshifts )
  1916  		}
  1917  		*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((n+1))*4)) = tmp1 // Q( -rshifts )
  1918  		*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr((n+1))*4)) = tmp2 // Q( -rshifts )
  1919  		num = ((num) + (tmp2))                                                      // Q( -rshifts )
  1920  		num = ((-num) << (1))                                                       // Q( 1-rshifts )
  1921  
  1922  		/* Calculate the next order reflection (parcor) coefficient */
  1923  		if (func() int32 {
  1924  			if (num) > 0 {
  1925  				return num
  1926  			}
  1927  			return -num
  1928  		}()) < nrg {
  1929  			rc_Q31 = SKP_DIV32_varQ(tls, num, nrg, 31)
  1930  		} else {
  1931  			/* Negative energy or ratio too high; set remaining coefficients to zero and exit loop */
  1932  			libc.Xmemset(tls, (bp + 272 /* &Af_QA */ + uintptr(n)*4), 0, ((uint64(D - n)) * uint64(unsafe.Sizeof(int32(0)))))
  1933  
  1934  			break
  1935  		}
  1936  
  1937  		/* Update the AR coefficients */
  1938  		for k = 0; k < ((n + 1) >> 1); k++ {
  1939  			tmp1 = *(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))                                                                               // QA
  1940  			tmp2 = *(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(((n-k)-1))*4))                                                                       // QA
  1941  			*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4)) = ((tmp1) + ((int32(((int64_t(tmp2)) * (int64_t(rc_Q31))) >> (32))) << (1)))         // QA
  1942  			*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(((n-k)-1))*4)) = ((tmp2) + ((int32(((int64_t(tmp1)) * (int64_t(rc_Q31))) >> (32))) << (1))) // QA
  1943  		}
  1944  		*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(n)*4)) = ((rc_Q31) >> (31 - 25)) // QA
  1945  
  1946  		/* Update C * Af and C * Ab */
  1947  		for k = 0; k <= (n + 1); k++ {
  1948  			tmp1 = *(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4))                                                                               // Q( -rshifts )
  1949  			tmp2 = *(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(((n-k)+1))*4))                                                                       // Q( -rshifts )
  1950  			*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4)) = ((tmp1) + ((int32(((int64_t(tmp2)) * (int64_t(rc_Q31))) >> (32))) << (1)))         // Q( -rshifts )
  1951  			*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(((n-k)+1))*4)) = ((tmp2) + ((int32(((int64_t(tmp1)) * (int64_t(rc_Q31))) >> (32))) << (1))) // Q( -rshifts )
  1952  		}
  1953  	}
  1954  
  1955  	/* Return residual energy */
  1956  	nrg = *(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */)) // Q( -rshifts )
  1957  	tmp1 = (int32(1) << 16)                                 // Q16
  1958  	for k = 0; k < D; k++ {
  1959  		Atmp1 = func() int32 {
  1960  			if (25 - 16) == 1 {
  1961  				return (((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) & 1))
  1962  			}
  1963  			return ((((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) >> ((25 - 16) - 1)) + 1) >> 1)
  1964  		}() // Q16
  1965  		nrg = (((nrg) + ((((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((k+1))*4))) >> 16) * (int32(int16(Atmp1)))) + ((((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((k+1))*4))) & 0x0000FFFF) * (int32(int16(Atmp1)))) >> 16))) + ((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((k+1))*4))) * (func() int32 {
  1966  			if (16) == 1 {
  1967  				return (((Atmp1) >> 1) + ((Atmp1) & 1))
  1968  			}
  1969  			return ((((Atmp1) >> ((16) - 1)) + 1) >> 1)
  1970  		}()))) // Q( -rshifts )
  1971  		tmp1 = (((tmp1) + ((((Atmp1) >> 16) * (int32(int16(Atmp1)))) + ((((Atmp1) & 0x0000FFFF) * (int32(int16(Atmp1)))) >> 16))) + ((Atmp1) * (func() int32 {
  1972  			if (16) == 1 {
  1973  				return (((Atmp1) >> 1) + ((Atmp1) & 1))
  1974  			}
  1975  			return ((((Atmp1) >> ((16) - 1)) + 1) >> 1)
  1976  		}()))) // Q16
  1977  		*(*int32)(unsafe.Pointer(A_Q16 + uintptr(k)*4)) = -Atmp1
  1978  	}
  1979  	*(*int32)(unsafe.Pointer(res_nrg)) = (((nrg) + ((((int32(((int64_t(WhiteNoiseFrac_Q32)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* C0 */))))) >> (32))) >> 16) * (int32(int16(-tmp1)))) + ((((int32(((int64_t(WhiteNoiseFrac_Q32)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* C0 */))))) >> (32))) & 0x0000FFFF) * (int32(int16(-tmp1)))) >> 16))) + ((int32(((int64_t(WhiteNoiseFrac_Q32)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* C0 */))))) >> (32))) * (func() int32 {
  1980  		if (16) == 1 {
  1981  			return (((-tmp1) >> 1) + ((-tmp1) & 1))
  1982  		}
  1983  		return ((((-tmp1) >> ((16) - 1)) + 1) >> 1)
  1984  	}()))) // Q( -rshifts )
  1985  	*(*int32)(unsafe.Pointer(res_nrg_Q)) = -*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))
  1986  }
  1987  
  1988  /* Chirp (bandwidth expand) LP AR filter */
  1989  func SKP_Silk_bwexpander(tls *libc.TLS, ar uintptr, d int32, chirp_Q16 int32) { /* SKP_Silk_bwexpander.c:31:6: */
  1990  	var i int32
  1991  	var chirp_minus_one_Q16 int32
  1992  
  1993  	chirp_minus_one_Q16 = (chirp_Q16 - 65536)
  1994  
  1995  	/* NB: Dont use SKP_SMULWB, instead of SKP_RSHIFT_ROUND( SKP_MUL() , 16 ), below. */
  1996  	/* Bias in SKP_SMULWB can lead to unstable filters                                */
  1997  	for i = 0; i < (d - 1); i++ {
  1998  		*(*int16)(unsafe.Pointer(ar + uintptr(i)*2)) = func() int16 {
  1999  			if (16) == 1 {
  2000  				return (int16((((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr(i)*2))))) >> 1) + (((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr(i)*2))))) & 1)))
  2001  			}
  2002  			return (int16(((((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr(i)*2))))) >> ((16) - 1)) + 1) >> 1))
  2003  		}()
  2004  		chirp_Q16 = chirp_Q16 + (func() int32 {
  2005  			if (16) == 1 {
  2006  				return ((((chirp_Q16) * (chirp_minus_one_Q16)) >> 1) + (((chirp_Q16) * (chirp_minus_one_Q16)) & 1))
  2007  			}
  2008  			return (((((chirp_Q16) * (chirp_minus_one_Q16)) >> ((16) - 1)) + 1) >> 1)
  2009  		}())
  2010  	}
  2011  	*(*int16)(unsafe.Pointer(ar + uintptr((d-1))*2)) = func() int16 {
  2012  		if (16) == 1 {
  2013  			return (int16((((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr((d-1))*2))))) >> 1) + (((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr((d-1))*2))))) & 1)))
  2014  		}
  2015  		return (int16(((((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr((d-1))*2))))) >> ((16) - 1)) + 1) >> 1))
  2016  	}()
  2017  }
  2018  
  2019  /* Chirp (bandwidth expand) LP AR filter */
  2020  func SKP_Silk_bwexpander_32(tls *libc.TLS, ar uintptr, d int32, chirp_Q16 int32) { /* SKP_Silk_bwexpander_32.c:31:6: */
  2021  	var i int32
  2022  	var tmp_chirp_Q16 int32
  2023  
  2024  	tmp_chirp_Q16 = chirp_Q16
  2025  	for i = 0; i < (d - 1); i++ {
  2026  		*(*int32)(unsafe.Pointer(ar + uintptr(i)*4)) = (((((*(*int32)(unsafe.Pointer(ar + uintptr(i)*4))) >> 16) * (int32(int16(tmp_chirp_Q16)))) + ((((*(*int32)(unsafe.Pointer(ar + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(tmp_chirp_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(ar + uintptr(i)*4))) * (func() int32 {
  2027  			if (16) == 1 {
  2028  				return (((tmp_chirp_Q16) >> 1) + ((tmp_chirp_Q16) & 1))
  2029  			}
  2030  			return ((((tmp_chirp_Q16) >> ((16) - 1)) + 1) >> 1)
  2031  		}())))
  2032  		tmp_chirp_Q16 = (((((chirp_Q16) >> 16) * (int32(int16(tmp_chirp_Q16)))) + ((((chirp_Q16) & 0x0000FFFF) * (int32(int16(tmp_chirp_Q16)))) >> 16)) + ((chirp_Q16) * (func() int32 {
  2033  			if (16) == 1 {
  2034  				return (((tmp_chirp_Q16) >> 1) + ((tmp_chirp_Q16) & 1))
  2035  			}
  2036  			return ((((tmp_chirp_Q16) >> ((16) - 1)) + 1) >> 1)
  2037  		}())))
  2038  	}
  2039  	*(*int32)(unsafe.Pointer(ar + uintptr((d-1))*4)) = (((((*(*int32)(unsafe.Pointer(ar + uintptr((d-1))*4))) >> 16) * (int32(int16(tmp_chirp_Q16)))) + ((((*(*int32)(unsafe.Pointer(ar + uintptr((d-1))*4))) & 0x0000FFFF) * (int32(int16(tmp_chirp_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(ar + uintptr((d-1))*4))) * (func() int32 {
  2040  		if (16) == 1 {
  2041  			return (((tmp_chirp_Q16) >> 1) + ((tmp_chirp_Q16) & 1))
  2042  		}
  2043  		return ((((tmp_chirp_Q16) >> ((16) - 1)) + 1) >> 1)
  2044  	}())))
  2045  }
  2046  
  2047  /***********************************************************************
  2048  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2049  Redistribution and use in source and binary forms, with or without
  2050  modification, (subject to the limitations in the disclaimer below)
  2051  are permitted provided that the following conditions are met:
  2052  - Redistributions of source code must retain the above copyright notice,
  2053  this list of conditions and the following disclaimer.
  2054  - Redistributions in binary form must reproduce the above copyright
  2055  notice, this list of conditions and the following disclaimer in the
  2056  documentation and/or other materials provided with the distribution.
  2057  - Neither the name of Skype Limited, nor the names of specific
  2058  contributors, may be used to endorse or promote products derived from
  2059  this software without specific prior written permission.
  2060  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2061  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2062  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2063  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2064  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2065  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2066  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2067  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2068  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2069  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2070  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2071  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2072  ***********************************************************************/
  2073  
  2074  /***********************************************************************
  2075  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2076  Redistribution and use in source and binary forms, with or without
  2077  modification, (subject to the limitations in the disclaimer below)
  2078  are permitted provided that the following conditions are met:
  2079  - Redistributions of source code must retain the above copyright notice,
  2080  this list of conditions and the following disclaimer.
  2081  - Redistributions in binary form must reproduce the above copyright
  2082  notice, this list of conditions and the following disclaimer in the
  2083  documentation and/or other materials provided with the distribution.
  2084  - Neither the name of Skype Limited, nor the names of specific
  2085  contributors, may be used to endorse or promote products derived from
  2086  this software without specific prior written permission.
  2087  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2088  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2089  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2090  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2091  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2092  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2093  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2094  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2095  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2096  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2097  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2098  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2099  ***********************************************************************/
  2100  
  2101  /******************/
  2102  /* Error messages */
  2103  /******************/
  2104  
  2105  /**************************/
  2106  /* Encoder error messages */
  2107  /**************************/
  2108  
  2109  /* Input length is not a multiplum of 10 ms, or length is longer than the packet length */
  2110  
  2111  /* Sampling frequency not 8000, 12000, 16000 or 24000 Hertz */
  2112  
  2113  /* Packet size not 20, 40, 60, 80 or 100 ms */
  2114  
  2115  /* Allocated payload buffer too short */
  2116  
  2117  /* Loss rate not between 0 and 100 percent */
  2118  
  2119  /* Complexity setting not valid, use 0, 1 or 2 */
  2120  
  2121  /* Inband FEC setting not valid, use 0 or 1 */
  2122  
  2123  /* DTX setting not valid, use 0 or 1 */
  2124  
  2125  /* Internal encoder error */
  2126  
  2127  /**************************/
  2128  /* Decoder error messages */
  2129  /**************************/
  2130  
  2131  /* Output sampling frequency lower than internal decoded sampling frequency */
  2132  
  2133  /* Payload size exceeded the maximum allowed 1024 bytes */
  2134  
  2135  /* Payload has bit errors */
  2136  
  2137  /***********************************************************************
  2138  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2139  Redistribution and use in source and binary forms, with or without
  2140  modification, (subject to the limitations in the disclaimer below)
  2141  are permitted provided that the following conditions are met:
  2142  - Redistributions of source code must retain the above copyright notice,
  2143  this list of conditions and the following disclaimer.
  2144  - Redistributions in binary form must reproduce the above copyright
  2145  notice, this list of conditions and the following disclaimer in the
  2146  documentation and/or other materials provided with the distribution.
  2147  - Neither the name of Skype Limited, nor the names of specific
  2148  contributors, may be used to endorse or promote products derived from
  2149  this software without specific prior written permission.
  2150  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2151  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2152  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2153  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2154  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2155  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2156  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2157  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2158  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2159  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2160  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2161  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2162  ***********************************************************************/
  2163  
  2164  /* Limits on bitrate */
  2165  
  2166  /* Transition bitrates between modes */
  2167  
  2168  /* Integration/hysteresis threshold for lowering internal sample frequency */
  2169  /* 30000000 -> 6 sec if bitrate is 5000 bps below limit; 3 sec if bitrate is 10000 bps below limit */
  2170  
  2171  /* DTX settings                                 */
  2172  
  2173  /* Amount of concecutive no FEC packets before telling JB */
  2174  
  2175  /* Maximum delay between real packet and LBRR packet */
  2176  
  2177  /* LBRR usage defines */
  2178  
  2179  /* Frame termination indicator defines */
  2180  
  2181  /* Number of Second order Sections for SWB detection HP filter */
  2182  
  2183  /* Low complexity setting */
  2184  
  2185  /* Activate bandwidth transition filtering for mode switching */
  2186  
  2187  /* Decoder Parameters */
  2188  
  2189  /* Maximum sampling frequency, should be 16 for some embedded platforms */
  2190  
  2191  /* Signal Types used by silk */
  2192  
  2193  /* VAD Types used by silk */
  2194  
  2195  /* Number of samples per frame */
  2196  
  2197  /* Milliseconds of lookahead for pitch analysis */
  2198  
  2199  /* Length of LPC window used in find pitch */
  2200  
  2201  /* Order of LPC used in find pitch */
  2202  
  2203  /* Milliseconds of lookahead for noise shape analysis */
  2204  
  2205  /* Max length of LPC window used in noise shape analysis */
  2206  
  2207  /* Max number of bytes in payload output buffer (may contain multiple frames) */
  2208  
  2209  /* dB level of lowest gain quantization level */
  2210  /* dB level of highest gain quantization level */
  2211  /* Number of gain quantization levels */
  2212  /* Max increase in gain quantization index */
  2213  /* Max decrease in gain quantization index */
  2214  
  2215  /* Quantization offsets (multiples of 4) */
  2216  
  2217  /* Maximum numbers of iterations used to stabilize a LPC vector */
  2218  
  2219  /* Find Pred Coef defines */
  2220  
  2221  /* LTP quantization settings */
  2222  
  2223  /* Number of subframes */
  2224  
  2225  /* Flag to use harmonic noise shaping */
  2226  
  2227  /* Max LPC order of noise shaping filters */
  2228  
  2229  /* Maximum number of delayed decision states */
  2230  
  2231  /* number of subframes for excitation entropy coding */
  2232  
  2233  /* number of rate levels, for entropy coding of excitation */
  2234  
  2235  /* maximum sum of pulses per shell coding frame */
  2236  
  2237  /***********************/
  2238  /* High pass filtering */
  2239  /***********************/
  2240  
  2241  /***************************/
  2242  /* Voice activity detector */
  2243  /***************************/
  2244  
  2245  /* Sigmoid settings */
  2246  
  2247  /* smoothing for SNR measurement */
  2248  
  2249  /******************/
  2250  /* NLSF quantizer */
  2251  /******************/
  2252  
  2253  /* Based on above defines, calculate how much memory is necessary to allocate */
  2254  
  2255  /* Transition filtering for mode switching */
  2256  
  2257  /* Row based */
  2258  
  2259  /* Column based */
  2260  
  2261  /* BWE factors to apply after packet loss */
  2262  
  2263  /* Defines for CN generation */
  2264  
  2265  /***********************************************************************
  2266  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2267  Redistribution and use in source and binary forms, with or without
  2268  modification, (subject to the limitations in the disclaimer below)
  2269  are permitted provided that the following conditions are met:
  2270  - Redistributions of source code must retain the above copyright notice,
  2271  this list of conditions and the following disclaimer.
  2272  - Redistributions in binary form must reproduce the above copyright
  2273  notice, this list of conditions and the following disclaimer in the
  2274  documentation and/or other materials provided with the distribution.
  2275  - Neither the name of Skype Limited, nor the names of specific
  2276  contributors, may be used to endorse or promote products derived from
  2277  this software without specific prior written permission.
  2278  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2279  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2280  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2281  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2282  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2283  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2284  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2285  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2286  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2287  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2288  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2289  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2290  ***********************************************************************/
  2291  
  2292  /***********************************************************************
  2293  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2294  Redistribution and use in source and binary forms, with or without
  2295  modification, (subject to the limitations in the disclaimer below)
  2296  are permitted provided that the following conditions are met:
  2297  - Redistributions of source code must retain the above copyright notice,
  2298  this list of conditions and the following disclaimer.
  2299  - Redistributions in binary form must reproduce the above copyright
  2300  notice, this list of conditions and the following disclaimer in the
  2301  documentation and/or other materials provided with the distribution.
  2302  - Neither the name of Skype Limited, nor the names of specific
  2303  contributors, may be used to endorse or promote products derived from
  2304  this software without specific prior written permission.
  2305  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2306  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2307  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2308  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2309  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2310  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2311  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2312  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2313  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2314  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2315  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2316  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2317  ***********************************************************************/
  2318  
  2319  /***********************************************************************
  2320  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2321  Redistribution and use in source and binary forms, with or without
  2322  modification, (subject to the limitations in the disclaimer below)
  2323  are permitted provided that the following conditions are met:
  2324  - Redistributions of source code must retain the above copyright notice,
  2325  this list of conditions and the following disclaimer.
  2326  - Redistributions in binary form must reproduce the above copyright
  2327  notice, this list of conditions and the following disclaimer in the
  2328  documentation and/or other materials provided with the distribution.
  2329  - Neither the name of Skype Limited, nor the names of specific
  2330  contributors, may be used to endorse or promote products derived from
  2331  this software without specific prior written permission.
  2332  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2333  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2334  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2335  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2336  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2337  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2338  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2339  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2340  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2341  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2342  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2343  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2344  ***********************************************************************/
  2345  
  2346  /***********************************************************************
  2347  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2348  Redistribution and use in source and binary forms, with or without
  2349  modification, (subject to the limitations in the disclaimer below)
  2350  are permitted provided that the following conditions are met:
  2351  - Redistributions of source code must retain the above copyright notice,
  2352  this list of conditions and the following disclaimer.
  2353  - Redistributions in binary form must reproduce the above copyright
  2354  notice, this list of conditions and the following disclaimer in the
  2355  documentation and/or other materials provided with the distribution.
  2356  - Neither the name of Skype Limited, nor the names of specific
  2357  contributors, may be used to endorse or promote products derived from
  2358  this software without specific prior written permission.
  2359  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2360  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2361  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2362  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2363  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2364  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2365  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2366  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2367  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2368  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2369  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2370  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2371  ***********************************************************************/
  2372  
  2373  /************************************/
  2374  /* Noise shaping quantization state */
  2375  /************************************/
  2376  type SKP_Silk_nsq_state = struct {
  2377  	Fxq                [960]int16
  2378  	FsLTP_shp_Q10      [960]int32
  2379  	FsLPC_Q14          [152]int32
  2380  	FsAR2_Q14          [16]int32
  2381  	FsLF_AR_shp_Q12    int32
  2382  	FlagPrev           int32
  2383  	FsLTP_buf_idx      int32
  2384  	FsLTP_shp_buf_idx  int32
  2385  	Frand_seed         int32
  2386  	Fprev_inv_gain_Q16 int32
  2387  	Frewhite_flag      int32
  2388  } /* SKP_Silk_structs.h:56:3 */ /* FIX*/
  2389  
  2390  /* Struct for Low BitRate Redundant (LBRR) information */
  2391  type SKP_SILK_LBRR_struct = struct {
  2392  	Fpayload [1024]uint8
  2393  	FnBytes  int32
  2394  	Fusage   int32
  2395  } /* SKP_Silk_structs.h:63:3 */
  2396  
  2397  /********************************/
  2398  /* VAD state                    */
  2399  /********************************/
  2400  type SKP_Silk_VAD_state = struct {
  2401  	FAnaState        [2]int32
  2402  	FAnaState1       [2]int32
  2403  	FAnaState2       [2]int32
  2404  	FXnrgSubfr       [4]int32
  2405  	FNrgRatioSmth_Q8 [4]int32
  2406  	FHPstate         int16
  2407  	_                [2]byte
  2408  	FNL              [4]int32
  2409  	Finv_NL          [4]int32
  2410  	FNoiseLevelBias  [4]int32
  2411  	Fcounter         int32
  2412  } /* SKP_Silk_structs.h:79:3 */
  2413  
  2414  /*******************************/
  2415  /* Range encoder/decoder state */
  2416  /*******************************/
  2417  type SKP_Silk_range_coder_state = struct {
  2418  	FbufferLength int32
  2419  	FbufferIx     int32
  2420  	Fbase_Q32     uint32
  2421  	Frange_Q16    uint32
  2422  	Ferror        int32
  2423  	Fbuffer       [1024]uint8
  2424  } /* SKP_Silk_structs.h:91:3 */
  2425  
  2426  /* Input frequency range detection struct */
  2427  type SKP_Silk_detect_SWB_state = struct {
  2428  	FS_HP_8_kHz            [3][2]int32
  2429  	FConsecSmplsAboveThres int32
  2430  	FActiveSpeech_ms       int32
  2431  	FSWB_detected          int32
  2432  	FWB_detected           int32
  2433  } /* SKP_Silk_structs.h:100:3 */
  2434  
  2435  /* Variable cut-off low-pass filter state */
  2436  type SKP_Silk_LP_state = struct {
  2437  	FIn_LP_State         [2]int32
  2438  	Ftransition_frame_no int32
  2439  	Fmode                int32
  2440  } /* SKP_Silk_structs.h:108:3 */
  2441  
  2442  /* Structure for one stage of MSVQ */
  2443  type SKP_Silk_NLSF_CBS = struct {
  2444  	FnVectors    int32
  2445  	_            [4]byte
  2446  	FCB_NLSF_Q15 uintptr
  2447  	FRates_Q5    uintptr
  2448  } /* SKP_Silk_structs.h:116:3 */
  2449  
  2450  /* Structure containing NLSF MSVQ codebook */
  2451  type SKP_Silk_NLSF_CB_struct = struct {
  2452  	FnStages       int32
  2453  	_              [4]byte
  2454  	FCBStages      uintptr
  2455  	FNDeltaMin_Q15 uintptr
  2456  	FCDF           uintptr
  2457  	FStartPtr      uintptr
  2458  	FMiddleIx      uintptr
  2459  } /* SKP_Silk_structs.h:130:3 */
  2460  
  2461  /********************************/
  2462  /* Encoder state                */
  2463  /********************************/
  2464  type SKP_Silk_encoder_state = struct {
  2465  	FsRC                           SKP_Silk_range_coder_state
  2466  	FsRC_LBRR                      SKP_Silk_range_coder_state
  2467  	FsNSQ                          SKP_Silk_nsq_state
  2468  	FsNSQ_LBRR                     SKP_Silk_nsq_state
  2469  	FIn_HP_State                   [2]int32
  2470  	FsLP                           SKP_Silk_LP_state
  2471  	FsVAD                          SKP_Silk_VAD_state
  2472  	FLBRRprevLastGainIndex         int32
  2473  	Fprev_sigtype                  int32
  2474  	FtypeOffsetPrev                int32
  2475  	FprevLag                       int32
  2476  	Fprev_lagIndex                 int32
  2477  	FAPI_fs_Hz                     int32
  2478  	Fprev_API_fs_Hz                int32
  2479  	FmaxInternal_fs_kHz            int32
  2480  	Ffs_kHz                        int32
  2481  	Ffs_kHz_changed                int32
  2482  	Fframe_length                  int32
  2483  	Fsubfr_length                  int32
  2484  	Fla_pitch                      int32
  2485  	Fla_shape                      int32
  2486  	FshapeWinLength                int32
  2487  	FTargetRate_bps                int32
  2488  	FPacketSize_ms                 int32
  2489  	FPacketLoss_perc               int32
  2490  	FframeCounter                  int32
  2491  	FComplexity                    int32
  2492  	FnStatesDelayedDecision        int32
  2493  	FuseInterpolatedNLSFs          int32
  2494  	FshapingLPCOrder               int32
  2495  	FpredictLPCOrder               int32
  2496  	FpitchEstimationComplexity     int32
  2497  	FpitchEstimationLPCOrder       int32
  2498  	FpitchEstimationThreshold_Q16  int32
  2499  	FLTPQuantLowComplexity         int32
  2500  	FNLSF_MSVQ_Survivors           int32
  2501  	Ffirst_frame_after_reset       int32
  2502  	Fcontrolled_since_last_payload int32
  2503  	Fwarping_Q16                   int32
  2504  	FinputBuf                      [480]int16
  2505  	FinputBufIx                    int32
  2506  	FnFramesInPayloadBuf           int32
  2507  	FnBytesInPayloadBuf            int32
  2508  	Fframes_since_onset            int32
  2509  	FpsNLSF_CB                     [2]uintptr
  2510  	FLBRR_buffer                   [2]SKP_SILK_LBRR_struct
  2511  	Foldest_LBRR_idx               int32
  2512  	FuseInBandFEC                  int32
  2513  	FLBRR_enabled                  int32
  2514  	FLBRR_GainIncreases            int32
  2515  	FbitrateDiff                   int32
  2516  	Fbitrate_threshold_up          int32
  2517  	Fbitrate_threshold_down        int32
  2518  	_                              [4]byte
  2519  	Fresampler_state               SKP_Silk_resampler_state_struct
  2520  	FnoSpeechCounter               int32
  2521  	FuseDTX                        int32
  2522  	FinDTX                         int32
  2523  	FvadFlag                       int32
  2524  	FsSWBdetect                    SKP_Silk_detect_SWB_state
  2525  	Fq                             [480]int8
  2526  	Fq_LBRR                        [480]int8
  2527  } /* SKP_Silk_structs.h:221:3 */
  2528  
  2529  /************************/
  2530  /* Encoder control      */
  2531  /************************/
  2532  type SKP_Silk_encoder_control = struct {
  2533  	FlagIndex          int32
  2534  	FcontourIndex      int32
  2535  	FPERIndex          int32
  2536  	FLTPIndex          [4]int32
  2537  	FNLSFIndices       [10]int32
  2538  	FNLSFInterpCoef_Q2 int32
  2539  	FGainsIndices      [4]int32
  2540  	FSeed              int32
  2541  	FLTP_scaleIndex    int32
  2542  	FRateLevelIndex    int32
  2543  	FQuantOffsetType   int32
  2544  	Fsigtype           int32
  2545  	FpitchL            [4]int32
  2546  	FLBRR_usage        int32
  2547  } /* SKP_Silk_structs.h:246:3 */
  2548  
  2549  /* Struct for Packet Loss Concealment */
  2550  type SKP_Silk_PLC_struct = struct {
  2551  	FpitchL_Q8         int32
  2552  	FLTPCoef_Q14       [5]int16
  2553  	FprevLPC_Q12       [16]int16
  2554  	_                  [2]byte
  2555  	Flast_frame_lost   int32
  2556  	Frand_seed         int32
  2557  	FrandScale_Q14     int16
  2558  	_                  [2]byte
  2559  	Fconc_energy       int32
  2560  	Fconc_energy_shift int32
  2561  	FprevLTP_scale_Q14 int16
  2562  	_                  [2]byte
  2563  	FprevGain_Q16      [4]int32
  2564  	Ffs_kHz            int32
  2565  } /* SKP_Silk_structs.h:261:3 */
  2566  
  2567  /* Struct for CNG */
  2568  type SKP_Silk_CNG_struct = struct {
  2569  	FCNG_exc_buf_Q10   [480]int32
  2570  	FCNG_smth_NLSF_Q15 [16]int32
  2571  	FCNG_synth_state   [16]int32
  2572  	FCNG_smth_Gain_Q16 int32
  2573  	Frand_seed         int32
  2574  	Ffs_kHz            int32
  2575  } /* SKP_Silk_structs.h:271:3 */
  2576  
  2577  /********************************/
  2578  /* Decoder state                */
  2579  /********************************/
  2580  type SKP_Silk_decoder_state = struct {
  2581  	FsRC                       SKP_Silk_range_coder_state
  2582  	Fprev_inv_gain_Q16         int32
  2583  	FsLTP_Q16                  [960]int32
  2584  	FsLPC_Q14                  [136]int32
  2585  	Fexc_Q10                   [480]int32
  2586  	Fres_Q10                   [480]int32
  2587  	FoutBuf                    [960]int16
  2588  	FlagPrev                   int32
  2589  	FLastGainIndex             int32
  2590  	FLastGainIndex_EnhLayer    int32
  2591  	FtypeOffsetPrev            int32
  2592  	FHPState                   [2]int32
  2593  	FHP_A                      uintptr
  2594  	FHP_B                      uintptr
  2595  	Ffs_kHz                    int32
  2596  	Fprev_API_sampleRate       int32
  2597  	Fframe_length              int32
  2598  	Fsubfr_length              int32
  2599  	FLPC_order                 int32
  2600  	FprevNLSF_Q15              [16]int32
  2601  	Ffirst_frame_after_reset   int32
  2602  	FnBytesLeft                int32
  2603  	FnFramesDecoded            int32
  2604  	FnFramesInPacket           int32
  2605  	FmoreInternalDecoderFrames int32
  2606  	FFrameTermination          int32
  2607  	_                          [4]byte
  2608  	Fresampler_state           SKP_Silk_resampler_state_struct
  2609  	FpsNLSF_CB                 [2]uintptr
  2610  	FvadFlag                   int32
  2611  	Fno_FEC_counter            int32
  2612  	Finband_FEC_offset         int32
  2613  	FsCNG                      SKP_Silk_CNG_struct
  2614  	FlossCnt                   int32
  2615  	Fprev_sigtype              int32
  2616  	FsPLC                      SKP_Silk_PLC_struct
  2617  	_                          [4]byte
  2618  } /* SKP_Silk_structs.h:326:3 */
  2619  
  2620  /************************/
  2621  /* Decoder control      */
  2622  /************************/
  2623  type SKP_Silk_decoder_control = struct {
  2624  	FpitchL            [4]int32
  2625  	FGains_Q16         [4]int32
  2626  	FSeed              int32
  2627  	FPredCoef_Q12      [2][16]int16
  2628  	FLTPCoef_Q14       [20]int16
  2629  	FLTP_scale_Q14     int32
  2630  	FPERIndex          int32
  2631  	FRateLevelIndex    int32
  2632  	FQuantOffsetType   int32
  2633  	Fsigtype           int32
  2634  	FNLSFInterpCoef_Q2 int32
  2635  } /* SKP_Silk_structs.h:347:3 */
  2636  
  2637  /* Generates excitation for CNG LPC synthesis */
  2638  func SKP_Silk_CNG_exc(tls *libc.TLS, residual uintptr, exc_buf_Q10 uintptr, Gain_Q16 int32, length int32, rand_seed uintptr) { /* SKP_Silk_CNG.c:31:17: */
  2639  	var seed int32
  2640  	var i int32
  2641  	var idx int32
  2642  	var exc_mask int32
  2643  
  2644  	exc_mask = 255
  2645  	for exc_mask > length {
  2646  		exc_mask = ((exc_mask) >> (1))
  2647  	}
  2648  
  2649  	seed = *(*int32)(unsafe.Pointer(rand_seed))
  2650  	for i = 0; i < length; i++ {
  2651  		seed = (int32((uint32(907633515)) + ((uint32(seed)) * (uint32(196314165)))))
  2652  		idx = (((seed) >> (24)) & exc_mask)
  2653  
  2654  		*(*int16)(unsafe.Pointer(residual + uintptr(i)*2)) = func() int16 {
  2655  			if (func() int32 {
  2656  				if (10) == 1 {
  2657  					return (((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2658  						if (16) == 1 {
  2659  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2660  						}
  2661  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2662  					}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2663  						if (16) == 1 {
  2664  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2665  						}
  2666  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2667  					}()))) & 1))
  2668  				}
  2669  				return ((((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2670  					if (16) == 1 {
  2671  						return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2672  					}
  2673  					return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2674  				}()))) >> ((10) - 1)) + 1) >> 1)
  2675  			}()) > 0x7FFF {
  2676  				return int16(0x7FFF)
  2677  			}
  2678  			return func() int16 {
  2679  				if (func() int32 {
  2680  					if (10) == 1 {
  2681  						return (((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2682  							if (16) == 1 {
  2683  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2684  							}
  2685  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2686  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2687  							if (16) == 1 {
  2688  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2689  							}
  2690  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2691  						}()))) & 1))
  2692  					}
  2693  					return ((((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2694  						if (16) == 1 {
  2695  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2696  						}
  2697  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2698  					}()))) >> ((10) - 1)) + 1) >> 1)
  2699  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
  2700  					return libc.Int16FromInt32(0x8000)
  2701  				}
  2702  				return func() int16 {
  2703  					if (10) == 1 {
  2704  						return (int16(((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2705  							if (16) == 1 {
  2706  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2707  							}
  2708  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2709  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2710  							if (16) == 1 {
  2711  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2712  							}
  2713  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2714  						}()))) & 1)))
  2715  					}
  2716  					return (int16((((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2717  						if (16) == 1 {
  2718  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2719  						}
  2720  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2721  					}()))) >> ((10) - 1)) + 1) >> 1))
  2722  				}()
  2723  			}()
  2724  		}()
  2725  	}
  2726  	*(*int32)(unsafe.Pointer(rand_seed)) = seed
  2727  }
  2728  
  2729  func SKP_Silk_CNG_Reset(tls *libc.TLS, psDec uintptr) { /* SKP_Silk_CNG.c:58:6: */
  2730  	var i int32
  2731  	var NLSF_step_Q15 int32
  2732  	var NLSF_acc_Q15 int32
  2733  
  2734  	NLSF_step_Q15 = ((0x7FFF) / ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order + 1))
  2735  	NLSF_acc_Q15 = 0
  2736  	for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order; i++ {
  2737  		NLSF_acc_Q15 = NLSF_acc_Q15 + (NLSF_step_Q15)
  2738  		*(*int32)(unsafe.Pointer((psDec + 11564 /* &.sCNG */ + 1920 /* &.CNG_smth_NLSF_Q15 */) + uintptr(i)*4)) = NLSF_acc_Q15
  2739  	}
  2740  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsCNG.FCNG_smth_Gain_Q16 = 0
  2741  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsCNG.Frand_seed = 3176576
  2742  }
  2743  
  2744  /* Updates CNG estimate, and applies the CNG when packet was lost   */
  2745  func SKP_Silk_CNG(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32) { /* SKP_Silk_CNG.c:75:6: */
  2746  	bp := tls.Alloc(992)
  2747  	defer tls.Free(992)
  2748  
  2749  	var i int32
  2750  	var subfr int32
  2751  	var tmp_32 int32
  2752  	var Gain_Q26 int32
  2753  	var max_Gain_Q16 int32
  2754  	// var LPC_buf [16]int16 at bp+960, 32
  2755  
  2756  	// var CNG_sig [480]int16 at bp, 960
  2757  
  2758  	var psCNG uintptr
  2759  	psCNG = (psDec + 11564 /* &.sCNG */)
  2760  
  2761  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz != (*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).Ffs_kHz {
  2762  		/* Reset state */
  2763  		SKP_Silk_CNG_Reset(tls, psDec)
  2764  
  2765  		(*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).Ffs_kHz = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz
  2766  	}
  2767  	if ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt == 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FvadFlag == 0) {
  2768  		/* Update CNG parameters */
  2769  
  2770  		/* Smoothing of LSF's  */
  2771  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order; i++ {
  2772  			*(*int32)(unsafe.Pointer((psCNG + 1920 /* &.CNG_smth_NLSF_Q15 */) + uintptr(i)*4)) += ((((*(*int32)(unsafe.Pointer((psDec + 11252 /* &.prevNLSF_Q15 */) + uintptr(i)*4)) - *(*int32)(unsafe.Pointer((psCNG + 1920 /* &.CNG_smth_NLSF_Q15 */) + uintptr(i)*4))) >> 16) * (int32(int16(16348)))) + ((((*(*int32)(unsafe.Pointer((psDec + 11252 /* &.prevNLSF_Q15 */) + uintptr(i)*4)) - *(*int32)(unsafe.Pointer((psCNG + 1920 /* &.CNG_smth_NLSF_Q15 */) + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(16348)))) >> 16))
  2773  		}
  2774  		/* Find the subframe with the highest gain */
  2775  		max_Gain_Q16 = 0
  2776  		subfr = 0
  2777  		for i = 0; i < 4; i++ {
  2778  			if *(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(i)*4)) > max_Gain_Q16 {
  2779  				max_Gain_Q16 = *(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(i)*4))
  2780  				subfr = i
  2781  			}
  2782  		}
  2783  		/* Update CNG excitation buffer with excitation from this subframe */
  2784  		libc.Xmemmove(tls, ((psCNG /* &.CNG_exc_buf_Q10 */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)*4), psCNG /* &.CNG_exc_buf_Q10 */, ((uint64((4 - 1) * (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)) * uint64(unsafe.Sizeof(int32(0)))))
  2785  		libc.Xmemcpy(tls, psCNG /* &.CNG_exc_buf_Q10 */, ((psDec + 5432 /* &.exc_Q10 */) + uintptr((subfr*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))*4), (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length) * uint64(unsafe.Sizeof(int32(0)))))
  2786  
  2787  		/* Smooth gains */
  2788  		for i = 0; i < 4; i++ {
  2789  			*(*int32)(unsafe.Pointer(psCNG + 2048 /* &.CNG_smth_Gain_Q16 */)) += ((((*(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(i)*4)) - (*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).FCNG_smth_Gain_Q16) >> 16) * (int32(int16(4634)))) + ((((*(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(i)*4)) - (*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).FCNG_smth_Gain_Q16) & 0x0000FFFF) * (int32(int16(4634)))) >> 16))
  2790  		}
  2791  	}
  2792  
  2793  	/* Add CNG when packet is lost and / or when low speech activity */
  2794  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt != 0 { //|| psDec->vadFlag == NO_VOICE_ACTIVITY ) {
  2795  
  2796  		/* Generate CNG excitation */
  2797  		SKP_Silk_CNG_exc(tls, bp /* &CNG_sig[0] */, psCNG, /* &.CNG_exc_buf_Q10 */
  2798  			(*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).FCNG_smth_Gain_Q16, length, (psCNG + 2052 /* &.rand_seed */))
  2799  
  2800  		/* Convert CNG NLSF to filter representation */
  2801  		SKP_Silk_NLSF2A_stable(tls, bp+960 /* &LPC_buf[0] */, psCNG+1920 /* &.CNG_smth_NLSF_Q15 */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  2802  
  2803  		Gain_Q26 = (int32(1) << 26) /* 1.0 */
  2804  
  2805  		/* Generate CNG signal, by synthesis filtering */
  2806  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order == 16 {
  2807  			SKP_Silk_LPC_synthesis_order16(tls, bp /* &CNG_sig[0] */, bp+960, /* &LPC_buf[0] */
  2808  				Gain_Q26, psCNG+1984 /* &.CNG_synth_state */, bp /* &CNG_sig[0] */, length)
  2809  		} else {
  2810  			SKP_Silk_LPC_synthesis_filter(tls, bp /* &CNG_sig[0] */, bp+960, /* &LPC_buf[0] */
  2811  				Gain_Q26, psCNG+1984 /* &.CNG_synth_state */, bp /* &CNG_sig[0] */, length, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  2812  		}
  2813  		/* Mix with signal */
  2814  		for i = 0; i < length; i++ {
  2815  			tmp_32 = (int32(*(*int16)(unsafe.Pointer(signal + uintptr(i)*2))) + int32(*(*int16)(unsafe.Pointer(bp /* &CNG_sig[0] */ + uintptr(i)*2))))
  2816  			*(*int16)(unsafe.Pointer(signal + uintptr(i)*2)) = func() int16 {
  2817  				if (tmp_32) > 0x7FFF {
  2818  					return int16(0x7FFF)
  2819  				}
  2820  				return func() int16 {
  2821  					if (tmp_32) < (int32(libc.Int16FromInt32(0x8000))) {
  2822  						return libc.Int16FromInt32(0x8000)
  2823  					}
  2824  					return int16(tmp_32)
  2825  				}()
  2826  			}()
  2827  		}
  2828  	} else {
  2829  		libc.Xmemset(tls, psCNG+1984 /* &.CNG_synth_state */, 0, (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint64(unsafe.Sizeof(int32(0)))))
  2830  	}
  2831  }
  2832  
  2833  //#define SKP_enc_map(a)                ((a) > 0 ? 1 : 0)
  2834  //#define SKP_dec_map(a)                ((a) > 0 ? 1 : -1)
  2835  /* shifting avoids if-statement */
  2836  
  2837  /* Encodes signs of excitation */
  2838  func SKP_Silk_encode_signs(tls *libc.TLS, sRC uintptr, q uintptr, length int32, sigtype int32, QuantOffsetType int32, RateLevelIndex int32) { /* SKP_Silk_code_signs.c:37:6: */
  2839  	bp := tls.Alloc(6)
  2840  	defer tls.Free(6)
  2841  
  2842  	var i int32
  2843  	var inData int32
  2844  	// var cdf [3]uint16 at bp, 6
  2845  
  2846  	i = (((int32((int16(10 - 1)))) * (int32((int16(((sigtype) << (1)) + QuantOffsetType))))) + RateLevelIndex)
  2847  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */)) = uint16(0)
  2848  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */ + 1*2)) = SKP_Silk_sign_CDF[i]
  2849  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */ + 2*2)) = uint16(65535)
  2850  
  2851  	for i = 0; i < length; i++ {
  2852  		if int32(*(*int8)(unsafe.Pointer(q + uintptr(i)))) != 0 {
  2853  			inData = (((int32(*(*int8)(unsafe.Pointer(q + uintptr(i))))) >> (15)) + 1) /* - = 0, + = 1 */
  2854  			SKP_Silk_range_encoder(tls, sRC, inData, bp /* &cdf[0] */)
  2855  		}
  2856  	}
  2857  }
  2858  
  2859  /* Decodes signs of excitation */
  2860  func SKP_Silk_decode_signs(tls *libc.TLS, sRC uintptr, q uintptr, length int32, sigtype int32, QuantOffsetType int32, RateLevelIndex int32) { /* SKP_Silk_code_signs.c:64:6: */
  2861  	bp := tls.Alloc(12)
  2862  	defer tls.Free(12)
  2863  
  2864  	var i int32
  2865  	// var data int32 at bp+8, 4
  2866  
  2867  	// var cdf [3]uint16 at bp, 6
  2868  
  2869  	i = (((int32((int16(10 - 1)))) * (int32((int16(((sigtype) << (1)) + QuantOffsetType))))) + RateLevelIndex)
  2870  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */)) = uint16(0)
  2871  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */ + 1*2)) = SKP_Silk_sign_CDF[i]
  2872  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */ + 2*2)) = uint16(65535)
  2873  
  2874  	for i = 0; i < length; i++ {
  2875  		if *(*int32)(unsafe.Pointer(q + uintptr(i)*4)) > 0 {
  2876  			SKP_Silk_range_decoder(tls, bp+8 /* &data */, sRC, bp /* &cdf[0] */, 1)
  2877  			/* attach sign */
  2878  			/* implementation with shift, subtraction, multiplication */
  2879  			*(*int32)(unsafe.Pointer(q + uintptr(i)*4)) *= (((*(*int32)(unsafe.Pointer(bp + 8 /* data */))) << (1)) - 1)
  2880  		}
  2881  	}
  2882  }
  2883  
  2884  // 7.18.2  Limits of specified-width integer types
  2885  
  2886  // 7.18.2.1  Limits of exact-width integer types
  2887  
  2888  // 7.18.2.2  Limits of minimum-width integer types
  2889  
  2890  // 7.18.2.3  Limits of fastest minimum-width integer types
  2891  
  2892  // 7.18.2.4  Limits of integer types capable of holding
  2893  //     object pointers
  2894  
  2895  // 7.18.2.5  Limits of greatest-width integer types
  2896  
  2897  // 7.18.3  Limits of other integer types
  2898  
  2899  // wint_t is unsigned short for compatibility with MS runtime
  2900  
  2901  // 7.18.4  Macros for integer constants
  2902  
  2903  // 7.18.4.1  Macros for minimum-width integer constants
  2904  //
  2905  //     Accoding to Douglas Gwyn <gwyn@arl.mil>:
  2906  // 	"This spec was changed in ISO/IEC 9899:1999 TC1; in ISO/IEC
  2907  // 	9899:1999 as initially published, the expansion was required
  2908  // 	to be an integer constant of precisely matching type, which
  2909  // 	is impossible to accomplish for the shorter types on most
  2910  // 	platforms, because C99 provides no standard way to designate
  2911  // 	an integer constant with width less than that of type int.
  2912  // 	TC1 changed this to require just an integer constant
  2913  // 	*expression* with *promoted* type."
  2914  //
  2915  // 	The trick used here is from Clive D W Feather.
  2916  
  2917  //  The 'trick' doesn't work in C89 for long long because, without
  2918  //     suffix, (val) will be evaluated as int, not intmax_t
  2919  
  2920  // 7.18.4.2  Macros for greatest-width integer constants
  2921  
  2922  /* assertions */
  2923  
  2924  /***********************************************/
  2925  /* Structure for controlling encoder operation */
  2926  /***********************************************/
  2927  type SKP_SILK_SDK_EncControlStruct = struct {
  2928  	FAPI_sampleRate        int32
  2929  	FmaxInternalSampleRate int32
  2930  	FpacketSize            int32
  2931  	FbitRate               int32
  2932  	FpacketLossPercentage  int32
  2933  	Fcomplexity            int32
  2934  	FuseInBandFEC          int32
  2935  	FuseDTX                int32
  2936  } /* SKP_Silk_control.h:65:3 */
  2937  
  2938  /**************************************************************************/
  2939  /* Structure for controlling decoder operation and reading decoder status */
  2940  /**************************************************************************/
  2941  type SKP_SILK_SDK_DecControlStruct = struct {
  2942  	FAPI_sampleRate            int32
  2943  	FframeSize                 int32
  2944  	FframesPerPacket           int32
  2945  	FmoreInternalDecoderFrames int32
  2946  	FinBandFECOffset           int32
  2947  } /* SKP_Silk_control.h:85:3 */
  2948  
  2949  /* Control internal sampling rate */
  2950  func SKP_Silk_control_audio_bandwidth(tls *libc.TLS, psEncC uintptr, TargetRate_bps int32) int32 { /* SKP_Silk_control_audio_bandwidth.c:31:9: */
  2951  	var fs_kHz int32
  2952  
  2953  	fs_kHz = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz
  2954  	if fs_kHz == 0 {
  2955  		/* Encoder has just been initialized */
  2956  		if TargetRate_bps >= 25000 {
  2957  			fs_kHz = 24
  2958  		} else if TargetRate_bps >= 14000 {
  2959  			fs_kHz = 16
  2960  		} else if TargetRate_bps >= 10000 {
  2961  			fs_kHz = 12
  2962  		} else {
  2963  			fs_kHz = 8
  2964  		}
  2965  		/* Make sure internal rate is not higher than external rate or maximum allowed, or lower than minimum allowed */
  2966  		fs_kHz = func() int32 {
  2967  			if (fs_kHz) < (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) / (1000)) {
  2968  				return fs_kHz
  2969  			}
  2970  			return (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) / (1000))
  2971  		}()
  2972  		fs_kHz = func() int32 {
  2973  			if (fs_kHz) < ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz) {
  2974  				return fs_kHz
  2975  			}
  2976  			return (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz
  2977  		}()
  2978  	} else if (((int32(int16(fs_kHz))) * (int32(int16(1000)))) > (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) || (fs_kHz > (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz) {
  2979  		/* Make sure internal rate is not higher than external rate or maximum allowed */
  2980  		fs_kHz = (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) / (1000))
  2981  		fs_kHz = func() int32 {
  2982  			if (fs_kHz) < ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz) {
  2983  				return fs_kHz
  2984  			}
  2985  			return (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz
  2986  		}()
  2987  	} else {
  2988  		/* State machine for the internal sampling rate switching */
  2989  		if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz > 8000 {
  2990  			/* Accumulate the difference between the target rate and limit for switching down */
  2991  			*(*int32)(unsafe.Pointer(psEncC + 18344 /* &.bitrateDiff */)) += (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FPacketSize_ms) * (TargetRate_bps - (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fbitrate_threshold_down))
  2992  			(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff = func() int32 {
  2993  				if ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff) < (0) {
  2994  					return (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff
  2995  				}
  2996  				return 0
  2997  			}()
  2998  
  2999  			if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FvadFlag == 0 { /* Low speech activity */
  3000  				/* Check if we should switch down */
  3001  				if ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no == 0) && (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff <= -30000000) || (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsSWBdetect.FWB_detected * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz) == 24)) { /* Forced down-switching due to WB input */
  3002  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no = 1 /* Begin transition phase */
  3003  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Fmode = 0                /* Switch down */
  3004  				} else if ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no >= (2560 / 20)) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Fmode == 0) { /* Ready to switch down */
  3005  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no = 0 /* Ready for new transition phase */
  3006  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff = 0
  3007  
  3008  					/* Switch to a lower sample frequency */
  3009  					if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 24 {
  3010  						fs_kHz = 16
  3011  					} else if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 16 {
  3012  						fs_kHz = 12
  3013  					} else {
  3014  
  3015  						fs_kHz = 8
  3016  					}
  3017  				}
  3018  
  3019  				/* Check if we should switch up */
  3020  				if ((((((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz * 1000) < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) && (TargetRate_bps >= (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fbitrate_threshold_up)) && (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsSWBdetect.FWB_detected * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz) < 16)) && (((((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 16) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz >= 24)) || (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 12) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz >= 16))) || (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 8) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz >= 12)))) &&
  3021  					((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no == 0) { /* No transition phase running, ready to switch */
  3022  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Fmode = 1 /* Switch up */
  3023  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff = 0
  3024  
  3025  					/* Switch to a higher sample frequency */
  3026  					if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 8 {
  3027  						fs_kHz = 12
  3028  					} else if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 12 {
  3029  						fs_kHz = 16
  3030  					} else {
  3031  
  3032  						fs_kHz = 24
  3033  					}
  3034  				}
  3035  			}
  3036  		}
  3037  
  3038  		/* After switching up, stop transition filter during speech inactivity */
  3039  		if (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Fmode == 1) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no >= (5120 / 20))) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FvadFlag == 0) {
  3040  
  3041  			(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no = 0
  3042  
  3043  			/* Reset transition filter state */
  3044  			libc.Xmemset(tls, psEncC+15016 /* &.sLP */ /* &.In_LP_State */, 0, (uint64(2) * uint64(unsafe.Sizeof(int32(0)))))
  3045  		}
  3046  	}
  3047  
  3048  	return fs_kHz
  3049  }
  3050  
  3051  /***********************************************************************
  3052  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  3053  Redistribution and use in source and binary forms, with or without
  3054  modification, (subject to the limitations in the disclaimer below)
  3055  are permitted provided that the following conditions are met:
  3056  - Redistributions of source code must retain the above copyright notice,
  3057  this list of conditions and the following disclaimer.
  3058  - Redistributions in binary form must reproduce the above copyright
  3059  notice, this list of conditions and the following disclaimer in the
  3060  documentation and/or other materials provided with the distribution.
  3061  - Neither the name of Skype Limited, nor the names of specific
  3062  contributors, may be used to endorse or promote products derived from
  3063  this software without specific prior written permission.
  3064  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  3065  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  3066  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  3067  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  3068  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  3069  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  3070  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  3071  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  3072  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  3073  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3074  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  3075  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3076  ***********************************************************************/
  3077  
  3078  /********************************/
  3079  /* Noise shaping analysis state */
  3080  /********************************/
  3081  type SKP_Silk_shape_state_FIX = struct {
  3082  	FLastGainIndex          int32
  3083  	FHarmBoost_smth_Q16     int32
  3084  	FHarmShapeGain_smth_Q16 int32
  3085  	FTilt_smth_Q16          int32
  3086  } /* SKP_Silk_structs_FIX.h:49:3 */
  3087  
  3088  /********************************/
  3089  /* Prefilter state              */
  3090  /********************************/
  3091  type SKP_Silk_prefilter_state_FIX = struct {
  3092  	FsLTP_shp         [512]int16
  3093  	FsAR_shp          [17]int32
  3094  	FsLTP_shp_buf_idx int32
  3095  	FsLF_AR_shp_Q12   int32
  3096  	FsLF_MA_shp_Q12   int32
  3097  	FsHarmHP          int32
  3098  	Frand_seed        int32
  3099  	FlagPrev          int32
  3100  } /* SKP_Silk_structs_FIX.h:63:3 */
  3101  
  3102  /*****************************/
  3103  /* Prediction analysis state */
  3104  /*****************************/
  3105  type SKP_Silk_predict_state_FIX = struct {
  3106  	Fpitch_LPC_win_length int32
  3107  	Fmin_pitch_lag        int32
  3108  	Fmax_pitch_lag        int32
  3109  	Fprev_NLSFq_Q15       [16]int32
  3110  } /* SKP_Silk_structs_FIX.h:73:3 */
  3111  
  3112  /********************************/
  3113  /* Encoder state FIX            */
  3114  /********************************/
  3115  type SKP_Silk_encoder_state_FIX = struct {
  3116  	FsCmn                           SKP_Silk_encoder_state
  3117  	Fvariable_HP_smth1_Q15          int32
  3118  	Fvariable_HP_smth2_Q15          int32
  3119  	FsShape                         SKP_Silk_shape_state_FIX
  3120  	FsPrefilt                       SKP_Silk_prefilter_state_FIX
  3121  	FsPred                          SKP_Silk_predict_state_FIX
  3122  	Fx_buf                          [1080]int16
  3123  	FLTPCorr_Q15                    int32
  3124  	Fmu_LTP_Q8                      int32
  3125  	FSNR_dB_Q7                      int32
  3126  	FavgGain_Q16                    int32
  3127  	FavgGain_Q16_one_bit_per_sample int32
  3128  	FBufferedInChannel_ms           int32
  3129  	Fspeech_activity_Q8             int32
  3130  	FprevLTPredCodGain_Q7           int32
  3131  	FHPLTPredCodGain_Q7             int32
  3132  	FinBandFEC_SNR_comp_Q8          int32
  3133  } /* SKP_Silk_structs_FIX.h:106:3 */
  3134  
  3135  /************************/
  3136  /* Encoder control FIX  */
  3137  /************************/
  3138  type SKP_Silk_encoder_control_FIX = struct {
  3139  	FsCmn                    SKP_Silk_encoder_control
  3140  	FGains_Q16               [4]int32
  3141  	FPredCoef_Q12            [2][16]int16
  3142  	FLTPCoef_Q14             [20]int16
  3143  	FLTP_scale_Q14           int32
  3144  	FAR1_Q13                 [64]int16
  3145  	FAR2_Q13                 [64]int16
  3146  	FLF_shp_Q14              [4]int32
  3147  	FGainsPre_Q14            [4]int32
  3148  	FHarmBoost_Q14           [4]int32
  3149  	FTilt_Q14                [4]int32
  3150  	FHarmShapeGain_Q14       [4]int32
  3151  	FLambda_Q10              int32
  3152  	Finput_quality_Q14       int32
  3153  	Fcoding_quality_Q14      int32
  3154  	Fpitch_freq_low_Hz       int32
  3155  	Fcurrent_SNR_dB_Q7       int32
  3156  	Fsparseness_Q8           int32
  3157  	FpredGain_Q16            int32
  3158  	FLTPredCodGain_Q7        int32
  3159  	Finput_quality_bands_Q15 [4]int32
  3160  	Finput_tilt_Q15          int32
  3161  	FResNrg                  [4]int32
  3162  	FResNrgQ                 [4]int32
  3163  } /* SKP_Silk_structs_FIX.h:144:3 */
  3164  
  3165  /***********************************************************************
  3166  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  3167  Redistribution and use in source and binary forms, with or without
  3168  modification, (subject to the limitations in the disclaimer below)
  3169  are permitted provided that the following conditions are met:
  3170  - Redistributions of source code must retain the above copyright notice,
  3171  this list of conditions and the following disclaimer.
  3172  - Redistributions in binary form must reproduce the above copyright
  3173  notice, this list of conditions and the following disclaimer in the
  3174  documentation and/or other materials provided with the distribution.
  3175  - Neither the name of Skype Limited, nor the names of specific
  3176  contributors, may be used to endorse or promote products derived from
  3177  this software without specific prior written permission.
  3178  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  3179  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  3180  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  3181  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  3182  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  3183  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  3184  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  3185  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  3186  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  3187  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3188  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  3189  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3190  ***********************************************************************/
  3191  
  3192  /***********************************************************************
  3193  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  3194  Redistribution and use in source and binary forms, with or without
  3195  modification, (subject to the limitations in the disclaimer below)
  3196  are permitted provided that the following conditions are met:
  3197  - Redistributions of source code must retain the above copyright notice,
  3198  this list of conditions and the following disclaimer.
  3199  - Redistributions in binary form must reproduce the above copyright
  3200  notice, this list of conditions and the following disclaimer in the
  3201  documentation and/or other materials provided with the distribution.
  3202  - Neither the name of Skype Limited, nor the names of specific
  3203  contributors, may be used to endorse or promote products derived from
  3204  this software without specific prior written permission.
  3205  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  3206  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  3207  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  3208  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  3209  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  3210  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  3211  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  3212  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  3213  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  3214  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3215  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  3216  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3217  ***********************************************************************/
  3218  
  3219  /***********************************************************************
  3220  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  3221  Redistribution and use in source and binary forms, with or without
  3222  modification, (subject to the limitations in the disclaimer below)
  3223  are permitted provided that the following conditions are met:
  3224  - Redistributions of source code must retain the above copyright notice,
  3225  this list of conditions and the following disclaimer.
  3226  - Redistributions in binary form must reproduce the above copyright
  3227  notice, this list of conditions and the following disclaimer in the
  3228  documentation and/or other materials provided with the distribution.
  3229  - Neither the name of Skype Limited, nor the names of specific
  3230  contributors, may be used to endorse or promote products derived from
  3231  this software without specific prior written permission.
  3232  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  3233  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  3234  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  3235  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  3236  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  3237  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  3238  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  3239  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  3240  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  3241  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3242  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  3243  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3244  ***********************************************************************/
  3245  
  3246  /*******************/
  3247  /* Pitch estimator */
  3248  /*******************/
  3249  
  3250  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  3251  
  3252  /* Bandwidth expansion for whitening filter in pitch analysis */
  3253  
  3254  /* Threshold used by pitch estimator for early escape */
  3255  
  3256  /*********************/
  3257  /* Linear prediction */
  3258  /*********************/
  3259  
  3260  /* LPC analysis defines: regularization and bandwidth expansion */
  3261  
  3262  /* LTP analysis defines */
  3263  
  3264  /* LTP quantization settings */
  3265  
  3266  /***********************/
  3267  /* High pass filtering */
  3268  /***********************/
  3269  
  3270  /* Smoothing parameters for low end of pitch frequency range estimation */
  3271  
  3272  /* Min and max values for low end of pitch frequency range estimation */
  3273  
  3274  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  3275  
  3276  /***********/
  3277  /* Various */
  3278  /***********/
  3279  
  3280  /* Required speech activity for counting frame as active */
  3281  
  3282  /* Speech Activity LBRR enable threshold (needs tuning) */
  3283  
  3284  /*************************/
  3285  /* Perceptual parameters */
  3286  /*************************/
  3287  
  3288  /* reduction in coding SNR during low speech activity */
  3289  
  3290  /* factor for reducing quantization noise during voiced speech */
  3291  
  3292  /* factor for reducing quantization noise for unvoiced sparse signals */
  3293  
  3294  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  3295  
  3296  /* warping control */
  3297  
  3298  /* fraction added to first autocorrelation value */
  3299  
  3300  /* noise shaping filter chirp factor */
  3301  
  3302  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  3303  
  3304  /* gain reduction for fricatives */
  3305  
  3306  /* extra harmonic boosting (signal shaping) at low bitrates */
  3307  
  3308  /* extra harmonic boosting (signal shaping) for noisy input signals */
  3309  
  3310  /* harmonic noise shaping */
  3311  
  3312  /* extra harmonic noise shaping for high bitrates or noisy input */
  3313  
  3314  /* parameter for shaping noise towards higher frequencies */
  3315  
  3316  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  3317  
  3318  /* parameter for applying a high-pass tilt to the input signal */
  3319  
  3320  /* parameter for extra high-pass tilt to the input signal at high rates */
  3321  
  3322  /* parameter for reducing noise at the very low frequencies */
  3323  
  3324  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  3325  
  3326  /* noise floor to put a lower limit on the quantization step size */
  3327  
  3328  /* noise floor relative to active speech gain level */
  3329  
  3330  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  3331  
  3332  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  3333  
  3334  /* parameters defining the R/D tradeoff in the residual quantizer */
  3335  
  3336  func SKP_Silk_setup_complexity(tls *libc.TLS, psEncC uintptr, Complexity int32) int32 { /* SKP_Silk_setup_complexity.h:31:20: */
  3337  	var ret int32 = 0
  3338  
  3339  	/* Check that settings are valid */
  3340  	if (0 != 0) && (Complexity != 0) {
  3341  		ret = -6
  3342  	}
  3343  
  3344  	/* Set encoding complexity */
  3345  	if (Complexity == 0) || (0 != 0) {
  3346  		/* Low complexity */
  3347  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FComplexity = 0
  3348  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationComplexity = 0
  3349  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationThreshold_Q16 = SKP_FIX_CONST(tls, 0.8, 16)
  3350  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder = 6
  3351  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder = 8
  3352  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fla_shape = (3 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz)
  3353  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision = 1
  3354  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FuseInterpolatedNLSFs = 0
  3355  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FLTPQuantLowComplexity = 1
  3356  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FNLSF_MSVQ_Survivors = 2
  3357  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fwarping_Q16 = 0
  3358  	} else if Complexity == 1 {
  3359  		/* Medium complexity */
  3360  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FComplexity = 1
  3361  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationComplexity = 1
  3362  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationThreshold_Q16 = SKP_FIX_CONST(tls, 0.75, 16)
  3363  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder = 12
  3364  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder = 12
  3365  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fla_shape = (5 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz)
  3366  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision = 2
  3367  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FuseInterpolatedNLSFs = 0
  3368  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FLTPQuantLowComplexity = 0
  3369  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FNLSF_MSVQ_Survivors = 4
  3370  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fwarping_Q16 = ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz * SKP_FIX_CONST(tls, 0.015, 16))
  3371  	} else if Complexity == 2 {
  3372  		/* High complexity */
  3373  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FComplexity = 2
  3374  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationComplexity = 2
  3375  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationThreshold_Q16 = SKP_FIX_CONST(tls, 0.7, 16)
  3376  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder = 16
  3377  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder = 16
  3378  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fla_shape = (5 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz)
  3379  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision = 4
  3380  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FuseInterpolatedNLSFs = 1
  3381  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FLTPQuantLowComplexity = 0
  3382  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FNLSF_MSVQ_Survivors = 16
  3383  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fwarping_Q16 = ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz * SKP_FIX_CONST(tls, 0.015, 16))
  3384  	} else {
  3385  		ret = -6
  3386  	}
  3387  
  3388  	/* Do not allow higher pitch estimation LPC order than predict LPC order */
  3389  	(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder = SKP_min_int(tls, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder)
  3390  	(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapeWinLength = ((5 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz) + (2 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fla_shape))
  3391  
  3392  	return ret
  3393  }
  3394  
  3395  /* Control encoder */
  3396  func SKP_Silk_control_encoder_FIX(tls *libc.TLS, psEnc uintptr, PacketSize_ms int32, TargetRate_bps int32, PacketLoss_perc int32, DTX_enabled int32, Complexity int32) int32 { /* SKP_Silk_control_codec_FIX.c:56:9: */
  3397  	var fs_kHz int32
  3398  	var ret int32 = 0
  3399  
  3400  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fcontrolled_since_last_payload != 0 {
  3401  		if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_API_fs_Hz) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz > 0) {
  3402  			/* Change in API sampling rate in the middle of encoding a packet */
  3403  			ret = ret + (SKP_Silk_setup_resamplers_FIX(tls, psEnc, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))
  3404  		}
  3405  		return ret
  3406  	}
  3407  
  3408  	/* Beyond this point we know that there are no previously coded frames in the payload buffer */
  3409  
  3410  	/********************************************/
  3411  	/* Determine internal sampling rate         */
  3412  	/********************************************/
  3413  	fs_kHz = SKP_Silk_control_audio_bandwidth(tls, (psEnc /* &.sCmn */), TargetRate_bps)
  3414  
  3415  	/********************************************/
  3416  	/* Prepare resampler and buffered data      */
  3417  	/********************************************/
  3418  	ret = ret + (SKP_Silk_setup_resamplers_FIX(tls, psEnc, fs_kHz))
  3419  
  3420  	/********************************************/
  3421  	/* Set packet size                          */
  3422  	/********************************************/
  3423  	ret = ret + (SKP_Silk_setup_packetsize_FIX(tls, psEnc, PacketSize_ms))
  3424  
  3425  	/********************************************/
  3426  	/* Set internal sampling frequency          */
  3427  	/********************************************/
  3428  	ret = ret + (SKP_Silk_setup_fs_FIX(tls, psEnc, fs_kHz))
  3429  
  3430  	/********************************************/
  3431  	/* Set encoding complexity                  */
  3432  	/********************************************/
  3433  	ret = ret + (SKP_Silk_setup_complexity(tls, (psEnc /* &.sCmn */), Complexity))
  3434  
  3435  	/********************************************/
  3436  	/* Set bitrate/coding quality               */
  3437  	/********************************************/
  3438  	ret = ret + (SKP_Silk_setup_rate_FIX(tls, psEnc, TargetRate_bps))
  3439  
  3440  	/********************************************/
  3441  	/* Set packet loss rate measured by farend  */
  3442  	/********************************************/
  3443  	if (PacketLoss_perc < 0) || (PacketLoss_perc > 100) {
  3444  		ret = -5
  3445  	}
  3446  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc = PacketLoss_perc
  3447  
  3448  	/********************************************/
  3449  	/* Set LBRR usage                           */
  3450  	/********************************************/
  3451  	ret = ret + (SKP_Silk_setup_LBRR_FIX(tls, psEnc))
  3452  
  3453  	/********************************************/
  3454  	/* Set DTX mode                             */
  3455  	/********************************************/
  3456  	if (DTX_enabled < 0) || (DTX_enabled > 1) {
  3457  		ret = -8
  3458  	}
  3459  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseDTX = DTX_enabled
  3460  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fcontrolled_since_last_payload = 1
  3461  
  3462  	return ret
  3463  }
  3464  
  3465  /* Control low bitrate redundancy usage */
  3466  func SKP_Silk_LBRR_ctrl_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrlC uintptr) { /* SKP_Silk_control_codec_FIX.c:133:6: */
  3467  	var LBRR_usage int32
  3468  
  3469  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled != 0 {
  3470  		/* Control LBRR */
  3471  
  3472  		/* Usage Control based on sensitivity and packet loss caracteristics */
  3473  		/* For now only enable adding to next for active frames. Make more complex later */
  3474  		LBRR_usage = 0
  3475  		if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 > SKP_FIX_CONST(tls, 0.5, 8)) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc > 1) { // nb! maybe multiply loss prob and speech activity
  3476  			LBRR_usage = 1
  3477  		}
  3478  		(*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FLBRR_usage = LBRR_usage
  3479  	} else {
  3480  		(*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FLBRR_usage = 0
  3481  	}
  3482  }
  3483  
  3484  func SKP_Silk_setup_resamplers_FIX(tls *libc.TLS, psEnc uintptr, fs_kHz int32) int32 { /* SKP_Silk_control_codec_FIX.c:155:20: */
  3485  	bp := tls.Alloc(13152)
  3486  	defer tls.Free(13152)
  3487  
  3488  	var ret int32 = 0
  3489  
  3490  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz != fs_kHz) || ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_API_fs_Hz != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz) {
  3491  
  3492  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 0 {
  3493  			/* Initialize the resampler for enc_API.c preparing resampling from API_fs_Hz to fs_kHz */
  3494  			ret = ret + (SKP_Silk_resampler_init(tls, (psEnc /* &.sCmn */ + 18360 /* &.resampler_state */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz, (fs_kHz * 1000)))
  3495  		} else {
  3496  			/* Allocate space for worst case temporary upsampling, 8 to 48 kHz, so a factor 6 */
  3497  			// var x_buf_API_fs_Hz [6480]int16 at bp+192, 12960
  3498  
  3499  			var nSamples_temp int32 = ((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length) << (1)) + (5 * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))
  3500  
  3501  			if (((int32(int16(fs_kHz))) * (int32(int16(1000)))) < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz != 0) {
  3502  				/* Resample buffered data in x_buf to API_fs_Hz */
  3503  
  3504  				// var temp_resampler_state SKP_Silk_resampler_state_struct at bp, 192
  3505  
  3506  				/* Initialize resampler for temporary resampling of x_buf data to API_fs_Hz */
  3507  				ret = ret + (SKP_Silk_resampler_init(tls, bp /* &temp_resampler_state */, ((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))) * (int32(int16(1000)))), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz))
  3508  
  3509  				/* Temporary resampling of x_buf data to API_fs_Hz */
  3510  				ret = ret + (SKP_Silk_resampler(tls, bp /* &temp_resampler_state */, bp+192 /* &x_buf_API_fs_Hz[0] */, psEnc+20784 /* &.x_buf */, nSamples_temp))
  3511  
  3512  				/* Calculate number of samples that has been temporarily upsampled */
  3513  				nSamples_temp = ((nSamples_temp * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz) / ((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))) * (int32(int16(1000)))))
  3514  
  3515  				/* Initialize the resampler for enc_API.c preparing resampling from API_fs_Hz to fs_kHz */
  3516  				ret = ret + (SKP_Silk_resampler_init(tls, (psEnc /* &.sCmn */ + 18360 /* &.resampler_state */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz, ((int32(int16(fs_kHz))) * (int32(int16(1000))))))
  3517  
  3518  			} else {
  3519  				/* Copy data */
  3520  				libc.Xmemcpy(tls, bp+192 /* &x_buf_API_fs_Hz[0] */, psEnc+20784 /* &.x_buf */, (uint64(nSamples_temp) * uint64(unsafe.Sizeof(int16(0)))))
  3521  			}
  3522  
  3523  			if (1000 * fs_kHz) != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz {
  3524  				/* Correct resampler state (unless resampling by a factor 1) by resampling buffered data from API_fs_Hz to fs_kHz */
  3525  				ret = ret + (SKP_Silk_resampler(tls, (psEnc /* &.sCmn */ + 18360 /* &.resampler_state */), psEnc+20784 /* &.x_buf */, bp+192 /* &x_buf_API_fs_Hz[0] */, nSamples_temp))
  3526  			}
  3527  		}
  3528  	}
  3529  
  3530  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_API_fs_Hz = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz
  3531  
  3532  	return ret
  3533  }
  3534  
  3535  func SKP_Silk_setup_packetsize_FIX(tls *libc.TLS, psEnc uintptr, PacketSize_ms int32) int32 { /* SKP_Silk_control_codec_FIX.c:207:20: */
  3536  	var ret int32 = 0
  3537  
  3538  	/* Set packet size */
  3539  	if ((((PacketSize_ms != 20) && (PacketSize_ms != 40)) && (PacketSize_ms != 60)) && (PacketSize_ms != 80)) && (PacketSize_ms != 100) {
  3540  		ret = -3
  3541  	} else {
  3542  		if PacketSize_ms != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms {
  3543  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms = PacketSize_ms
  3544  
  3545  			/* Packet length changes. Reset LBRR buffer */
  3546  			SKP_Silk_LBRR_reset(tls, (psEnc /* &.sCmn */))
  3547  		}
  3548  	}
  3549  	return ret
  3550  }
  3551  
  3552  func SKP_Silk_setup_fs_FIX(tls *libc.TLS, psEnc uintptr, fs_kHz int32) int32 { /* SKP_Silk_control_codec_FIX.c:232:20: */
  3553  	var ret int32 = 0
  3554  
  3555  	/* Set internal sampling frequency */
  3556  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz != fs_kHz {
  3557  		/* reset part of the state */
  3558  		libc.Xmemset(tls, (psEnc + 19576 /* &.sShape */), 0, uint64(unsafe.Sizeof(SKP_Silk_shape_state_FIX{})))
  3559  		libc.Xmemset(tls, (psEnc + 19592 /* &.sPrefilt */), 0, uint64(unsafe.Sizeof(SKP_Silk_prefilter_state_FIX{})))
  3560  		libc.Xmemset(tls, (psEnc + 20708 /* &.sPred */), 0, uint64(unsafe.Sizeof(SKP_Silk_predict_state_FIX{})))
  3561  		libc.Xmemset(tls, (psEnc /* &.sCmn */ + 2088 /* &.sNSQ */), 0, uint64(unsafe.Sizeof(SKP_Silk_nsq_state{})))
  3562  		libc.Xmemset(tls, psEnc /* &.sCmn */ +8548 /* &.sNSQ_LBRR */ /* &.xq */, 0, ((uint64(2 * (20 * 24))) * uint64(unsafe.Sizeof(int16(0)))))
  3563  		libc.Xmemset(tls, psEnc /* &.sCmn */ +16264 /* &.LBRR_buffer */, 0, (uint64(2) * uint64(unsafe.Sizeof(SKP_SILK_LBRR_struct{}))))
  3564  		libc.Xmemset(tls, psEnc /* &.sCmn */ +15016 /* &.sLP */ /* &.In_LP_State */, 0, (uint64(2) * uint64(unsafe.Sizeof(int32(0)))))
  3565  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsLP.Fmode == 1 {
  3566  			/* Begin transition phase */
  3567  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsLP.Ftransition_frame_no = 1
  3568  		} else {
  3569  			/* End transition phase */
  3570  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsLP.Ftransition_frame_no = 0
  3571  		}
  3572  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx = 0
  3573  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf = 0
  3574  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf = 0
  3575  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx = 0
  3576  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps = 0 /* Ensures that psEnc->SNR_dB is recomputed */
  3577  
  3578  		libc.Xmemset(tls, psEnc+20708 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, 0, (uint64(16) * uint64(unsafe.Sizeof(int32(0)))))
  3579  
  3580  		/* Initialize non-zero parameters */
  3581  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FprevLag = 100
  3582  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_sigtype = 1
  3583  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset = 1
  3584  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsPrefilt.FlagPrev = 100
  3585  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsShape.FLastGainIndex = 1
  3586  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ.FlagPrev = 100
  3587  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ.Fprev_inv_gain_Q16 = 65536
  3588  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ_LBRR.Fprev_inv_gain_Q16 = 65536
  3589  
  3590  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz = fs_kHz
  3591  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 8 {
  3592  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder = 10
  3593  			*(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */))) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10))
  3594  			*(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */) + 1*8)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10))
  3595  		} else {
  3596  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder = 16
  3597  			*(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */))) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16))
  3598  			*(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */) + 1*8)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16))
  3599  		}
  3600  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length = ((int32(int16(20))) * (int32(int16(fs_kHz))))
  3601  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length) / (4))
  3602  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch = ((int32(int16(2))) * (int32(int16(fs_kHz))))
  3603  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsPred.Fmin_pitch_lag = ((int32(int16(3))) * (int32(int16(fs_kHz))))
  3604  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsPred.Fmax_pitch_lag = ((int32(int16(18))) * (int32(int16(fs_kHz))))
  3605  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsPred.Fpitch_LPC_win_length = ((int32((int16(20 + (int32(2) << 1))))) * (int32(int16(fs_kHz))))
  3606  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 24 {
  3607  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8 = SKP_FIX_CONST(tls, 0.016, 8)
  3608  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_up = 0x7FFFFFFF
  3609  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_down = 25000
  3610  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
  3611  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8 = SKP_FIX_CONST(tls, 0.02, 8)
  3612  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_up = 30000
  3613  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_down = 14000
  3614  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 12 {
  3615  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8 = SKP_FIX_CONST(tls, 0.025, 8)
  3616  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_up = 18000
  3617  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_down = 10000
  3618  		} else {
  3619  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8 = SKP_FIX_CONST(tls, 0.03, 8)
  3620  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_up = 14000
  3621  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_down = 0
  3622  		}
  3623  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz_changed = 1
  3624  
  3625  		/* Check that settings are valid */
  3626  
  3627  	}
  3628  	return ret
  3629  }
  3630  
  3631  func SKP_Silk_setup_rate_FIX(tls *libc.TLS, psEnc uintptr, TargetRate_bps int32) int32 { /* SKP_Silk_control_codec_FIX.c:317:20: */
  3632  	var k int32
  3633  	var ret int32 = 0
  3634  	var frac_Q6 int32
  3635  	var rateTable uintptr
  3636  
  3637  	/* Set bitrate/coding quality */
  3638  	if TargetRate_bps != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps {
  3639  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps = TargetRate_bps
  3640  
  3641  		/* If new TargetRate_bps, translate to SNR_dB value */
  3642  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 8 {
  3643  			rateTable = uintptr(unsafe.Pointer(&TargetRate_table_NB))
  3644  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 12 {
  3645  			rateTable = uintptr(unsafe.Pointer(&TargetRate_table_MB))
  3646  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
  3647  			rateTable = uintptr(unsafe.Pointer(&TargetRate_table_WB))
  3648  		} else {
  3649  			rateTable = uintptr(unsafe.Pointer(&TargetRate_table_SWB))
  3650  		}
  3651  		for k = 1; k < 8; k++ {
  3652  			/* Find bitrate interval in table and interpolate */
  3653  			if TargetRate_bps <= *(*int32)(unsafe.Pointer(rateTable + uintptr(k)*4)) {
  3654  				frac_Q6 = (((TargetRate_bps - *(*int32)(unsafe.Pointer(rateTable + uintptr((k-1))*4))) << (6)) / (*(*int32)(unsafe.Pointer(rateTable + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(rateTable + uintptr((k-1))*4))))
  3655  				(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FSNR_dB_Q7 = (((SNR_table_Q1[(k - 1)]) << (6)) + ((frac_Q6) * (SNR_table_Q1[k] - SNR_table_Q1[(k-1)])))
  3656  				break
  3657  			}
  3658  		}
  3659  	}
  3660  	return ret
  3661  }
  3662  
  3663  func SKP_Silk_setup_LBRR_FIX(tls *libc.TLS, psEnc uintptr) int32 { /* SKP_Silk_control_codec_FIX.c:353:20: */
  3664  	var ret int32 = 0
  3665  	var LBRRRate_thres_bps int32
  3666  
  3667  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC < 0) || ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC > 1) {
  3668  		ret = -7
  3669  	}
  3670  
  3671  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC
  3672  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 8 {
  3673  		LBRRRate_thres_bps = (18000 - 9000)
  3674  	} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 12 {
  3675  		LBRRRate_thres_bps = (18000 - 6000)
  3676  
  3677  	} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
  3678  		LBRRRate_thres_bps = (18000 - 3000)
  3679  	} else {
  3680  		LBRRRate_thres_bps = 18000
  3681  	}
  3682  
  3683  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps >= LBRRRate_thres_bps {
  3684  		/* Set gain increase / rate reduction for LBRR usage */
  3685  		/* Coarsely tuned with PESQ for now. */
  3686  		/* Linear regression coefs G = 8 - 0.5 * loss */
  3687  		/* Meaning that at 16% loss main rate and redundant rate is the same, -> G = 0 */
  3688  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_GainIncreases = SKP_max_int(tls, (8 - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc) >> (1))), 0)
  3689  
  3690  		/* Set main stream rate compensation */
  3691  		if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled != 0) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc > 1) {
  3692  			/* Tuned to give approx same mean / weighted bitrate as no inband FEC */
  3693  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FinBandFEC_SNR_comp_Q8 = (SKP_FIX_CONST(tls, 6.0, 8) - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_GainIncreases) << (7)))
  3694  		} else {
  3695  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FinBandFEC_SNR_comp_Q8 = 0
  3696  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled = 0
  3697  		}
  3698  	} else {
  3699  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FinBandFEC_SNR_comp_Q8 = 0
  3700  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled = 0
  3701  	}
  3702  	return ret
  3703  }
  3704  
  3705  /* Calculates correlation vector X'*t */
  3706  func SKP_Silk_corrVector_FIX(tls *libc.TLS, x uintptr, t uintptr, L int32, order int32, Xt uintptr, rshifts int32) { /* SKP_Silk_corrMatrix_FIX.c:35:6: */
  3707  	var lag int32
  3708  	var i int32
  3709  	var ptr1 uintptr
  3710  	var ptr2 uintptr
  3711  	var inner_prod int32
  3712  
  3713  	ptr1 = (x + uintptr((order-1))*2) /* Points to first sample of column 0 of X: X[:,0] */
  3714  	ptr2 = t
  3715  	/* Calculate X'*t */
  3716  	if rshifts > 0 {
  3717  		/* Right shifting used */
  3718  		for lag = 0; lag < order; lag++ {
  3719  			inner_prod = 0
  3720  			for i = 0; i < L; i++ {
  3721  				inner_prod = inner_prod + (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr(i)*2))))) >> (rshifts))
  3722  			}
  3723  			*(*int32)(unsafe.Pointer(Xt + uintptr(lag)*4)) = inner_prod /* X[:,lag]'*t */
  3724  			ptr1 -= 2                                                   /* Go to next column of X */
  3725  		}
  3726  	} else {
  3727  
  3728  		for lag = 0; lag < order; lag++ {
  3729  			*(*int32)(unsafe.Pointer(Xt + uintptr(lag)*4)) = SKP_Silk_inner_prod_aligned(tls, ptr1, ptr2, L) /* X[:,lag]'*t */
  3730  			ptr1 -= 2                                                                                        /* Go to next column of X */
  3731  		}
  3732  	}
  3733  }
  3734  
  3735  /* Calculates correlation matrix X'*X */
  3736  func SKP_Silk_corrMatrix_FIX(tls *libc.TLS, x uintptr, L int32, order int32, head_room int32, XX uintptr, rshifts uintptr) { /* SKP_Silk_corrMatrix_FIX.c:71:6: */
  3737  	bp := tls.Alloc(8)
  3738  	defer tls.Free(8)
  3739  
  3740  	var i int32
  3741  	var j int32
  3742  	var lag int32
  3743  	// var rshifts_local int32 at bp+4, 4
  3744  
  3745  	var head_room_rshifts int32
  3746  	// var energy int32 at bp, 4
  3747  
  3748  	var ptr1 uintptr
  3749  	var ptr2 uintptr
  3750  
  3751  	/* Calculate energy to find shift used to fit in 32 bits */
  3752  	SKP_Silk_sum_sqr_shift(tls, bp /* &energy */, bp+4 /* &rshifts_local */, x, ((L + order) - 1))
  3753  
  3754  	/* Add shifts to get the desired head room */
  3755  	head_room_rshifts = func() int32 {
  3756  		if (head_room - SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp /* energy */)))) > (0) {
  3757  			return (head_room - SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp /* energy */))))
  3758  		}
  3759  		return 0
  3760  	}()
  3761  
  3762  	*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) >> (head_room_rshifts))
  3763  	*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)) += head_room_rshifts
  3764  
  3765  	/* Calculate energy of first column (0) of X: X[:,0]'*X[:,0] */
  3766  	/* Remove contribution of first order - 1 samples */
  3767  	for i = 0; i < (order - 1); i++ {
  3768  		*(*int32)(unsafe.Pointer(bp /* energy */)) -= (((int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */))))
  3769  	}
  3770  	if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)) < *(*int32)(unsafe.Pointer(rshifts)) {
  3771  		/* Adjust energy */
  3772  		*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) >> (*(*int32)(unsafe.Pointer(rshifts)) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */))))
  3773  		*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)) = *(*int32)(unsafe.Pointer(rshifts))
  3774  	}
  3775  
  3776  	/* Calculate energy of remaining columns of X: X[:,j]'*X[:,j] */
  3777  	/* Fill out the diagonal of the correlation matrix */
  3778  	*(*int32)(unsafe.Pointer((XX + uintptr((((0)*(order))+(0)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3779  	ptr1 = (x + uintptr((order-1))*2) /* First sample of column 0 of X */
  3780  	for j = 1; j < order; j++ {
  3781  		*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) - (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr((L-j))*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr((L-j))*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)))))
  3782  		*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) + (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(-j)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(-j)*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)))))
  3783  		*(*int32)(unsafe.Pointer((XX + uintptr((((j)*(order))+(j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3784  	}
  3785  
  3786  	ptr2 = (x + uintptr((order-2))*2) /* First sample of column 1 of X */
  3787  	/* Calculate the remaining elements of the correlation matrix */
  3788  	if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)) > 0 {
  3789  		/* Right shifting used */
  3790  		for lag = 1; lag < order; lag++ {
  3791  			/* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */
  3792  			*(*int32)(unsafe.Pointer(bp /* energy */)) = 0
  3793  			for i = 0; i < L; i++ {
  3794  				*(*int32)(unsafe.Pointer(bp /* energy */)) += (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr(i)*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */))))
  3795  			}
  3796  			/* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */
  3797  			*(*int32)(unsafe.Pointer((XX + uintptr((((lag)*(order))+(0)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3798  			*(*int32)(unsafe.Pointer((XX + uintptr((((0)*(order))+(lag)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3799  			for j = 1; j < (order - lag); j++ {
  3800  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) - (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr((L-j))*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr((L-j))*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)))))
  3801  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) + (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(-j)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr(-j)*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)))))
  3802  				*(*int32)(unsafe.Pointer((XX + uintptr((((lag+j)*(order))+(j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3803  				*(*int32)(unsafe.Pointer((XX + uintptr((((j)*(order))+(lag+j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3804  			}
  3805  			ptr2 -= 2 /* Update pointer to first sample of next column (lag) in X */
  3806  		}
  3807  	} else {
  3808  		for lag = 1; lag < order; lag++ {
  3809  			/* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */
  3810  			*(*int32)(unsafe.Pointer(bp /* energy */)) = SKP_Silk_inner_prod_aligned(tls, ptr1, ptr2, L)
  3811  			*(*int32)(unsafe.Pointer((XX + uintptr((((lag)*(order))+(0)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3812  			*(*int32)(unsafe.Pointer((XX + uintptr((((0)*(order))+(lag)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3813  			/* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */
  3814  			for j = 1; j < (order - lag); j++ {
  3815  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) - ((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr((L-j))*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr((L-j))*2))))))
  3816  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) + ((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(-j)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr(-j)*2))))))
  3817  				*(*int32)(unsafe.Pointer((XX + uintptr((((lag+j)*(order))+(j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3818  				*(*int32)(unsafe.Pointer((XX + uintptr((((j)*(order))+(lag+j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3819  			}
  3820  			ptr2 -= 2 /* Update pointer to first sample of next column (lag) in X */
  3821  		}
  3822  	}
  3823  	*(*int32)(unsafe.Pointer(rshifts)) = *(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */))
  3824  }
  3825  
  3826  /************************/
  3827  /* Init Decoder State   */
  3828  /************************/
  3829  func SKP_Silk_init_decoder(tls *libc.TLS, psDec uintptr) int32 { /* SKP_Silk_create_init_destroy.c:34:9: */
  3830  	libc.Xmemset(tls, psDec, 0, uint64(unsafe.Sizeof(SKP_Silk_decoder_state{})))
  3831  	/* Set sampling rate to 24 kHz, and init non-zero values */
  3832  	SKP_Silk_decoder_set_fs(tls, psDec, 24)
  3833  
  3834  	/* Used to deactivate e.g. LSF interpolation and fluctuation reduction */
  3835  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffirst_frame_after_reset = 1
  3836  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_inv_gain_Q16 = 65536
  3837  
  3838  	/* Reset CNG state */
  3839  	SKP_Silk_CNG_Reset(tls, psDec)
  3840  
  3841  	SKP_Silk_PLC_Reset(tls, psDec)
  3842  
  3843  	return 0
  3844  }
  3845  
  3846  /* Set decoder sampling rate */
  3847  func SKP_Silk_decoder_set_fs(tls *libc.TLS, psDec uintptr, fs_kHz int32) { /* SKP_Silk_decoder_set_fs.c:31:6: */
  3848  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz != fs_kHz {
  3849  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz = fs_kHz
  3850  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length = ((int32(int16(20))) * (int32(int16(fs_kHz))))
  3851  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length = ((int32((int16(20 / 4)))) * (int32(int16(fs_kHz))))
  3852  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 8 {
  3853  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order = 10
  3854  			*(*uintptr)(unsafe.Pointer((psDec + 11536 /* &.psNLSF_CB */))) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10))
  3855  			*(*uintptr)(unsafe.Pointer((psDec + 11536 /* &.psNLSF_CB */) + 1*8)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10))
  3856  		} else {
  3857  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order = 16
  3858  			*(*uintptr)(unsafe.Pointer((psDec + 11536 /* &.psNLSF_CB */))) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16))
  3859  			*(*uintptr)(unsafe.Pointer((psDec + 11536 /* &.psNLSF_CB */) + 1*8)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16))
  3860  		}
  3861  		/* Reset part of the decoder state */
  3862  		libc.Xmemset(tls, psDec+4888 /* &.sLPC_Q14 */, 0, (uint64(16) * uint64(unsafe.Sizeof(int32(0)))))
  3863  		libc.Xmemset(tls, psDec+9272 /* &.outBuf */, 0, ((uint64(20 * 24)) * uint64(unsafe.Sizeof(int16(0)))))
  3864  		libc.Xmemset(tls, psDec+11252 /* &.prevNLSF_Q15 */, 0, (uint64(16) * uint64(unsafe.Sizeof(int32(0)))))
  3865  
  3866  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlagPrev = 100
  3867  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLastGainIndex = 1
  3868  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype = 0
  3869  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffirst_frame_after_reset = 1
  3870  
  3871  		if fs_kHz == 24 {
  3872  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A = uintptr(unsafe.Pointer(&SKP_Silk_Dec_A_HP_24))
  3873  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B = uintptr(unsafe.Pointer(&SKP_Silk_Dec_B_HP_24))
  3874  		} else if fs_kHz == 16 {
  3875  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A = uintptr(unsafe.Pointer(&SKP_Silk_Dec_A_HP_16))
  3876  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B = uintptr(unsafe.Pointer(&SKP_Silk_Dec_B_HP_16))
  3877  		} else if fs_kHz == 12 {
  3878  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A = uintptr(unsafe.Pointer(&SKP_Silk_Dec_A_HP_12))
  3879  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B = uintptr(unsafe.Pointer(&SKP_Silk_Dec_B_HP_12))
  3880  		} else if fs_kHz == 8 {
  3881  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A = uintptr(unsafe.Pointer(&SKP_Silk_Dec_A_HP_8))
  3882  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B = uintptr(unsafe.Pointer(&SKP_Silk_Dec_B_HP_8))
  3883  		} else {
  3884  			/* unsupported sampling rate */
  3885  
  3886  		}
  3887  	}
  3888  
  3889  	/* Check that settings are valid */
  3890  
  3891  }
  3892  
  3893  /**********************************************************/
  3894  /* Core decoder. Performs inverse NSQ operation LTP + LPC */
  3895  /**********************************************************/
  3896  func SKP_Silk_decode_core(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, xq uintptr, q uintptr) { /* SKP_Silk_decode_core.c:44:6: */
  3897  	bp := tls.Alloc(1536)
  3898  	defer tls.Free(1536)
  3899  
  3900  	var i int32
  3901  	var k int32
  3902  	var lag int32 = 0
  3903  	var start_idx int32
  3904  	var sLTP_buf_idx int32
  3905  	var NLSF_interpolation_flag int32
  3906  	var sigtype int32
  3907  	var A_Q12 uintptr
  3908  	var B_Q14 uintptr
  3909  	var pxq uintptr
  3910  	// var A_Q12_tmp [16]int16 at bp, 32
  3911  
  3912  	// var sLTP [480]int16 at bp+96, 960
  3913  
  3914  	var LTP_pred_Q14 int32
  3915  	var Gain_Q16 int32
  3916  	var inv_gain_Q16 int32
  3917  	var inv_gain_Q32 int32
  3918  	var gain_adj_Q16 int32
  3919  	var rand_seed int32
  3920  	var offset_Q10 int32
  3921  	var dither int32
  3922  	var pred_lag_ptr uintptr
  3923  	var pexc_Q10 uintptr
  3924  	var pres_Q10 uintptr
  3925  	// var vec_Q10 [120]int32 at bp+1056, 480
  3926  
  3927  	// var FiltState [16]int32 at bp+32, 64
  3928  
  3929  	offset_Q10 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Quantization_Offsets_Q10)) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype)*4) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FQuantOffsetType)*2)))
  3930  
  3931  	if (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FNLSFInterpCoef_Q2 < (int32(1) << 2) {
  3932  		NLSF_interpolation_flag = 1
  3933  	} else {
  3934  		NLSF_interpolation_flag = 0
  3935  	}
  3936  
  3937  	/* Decode excitation */
  3938  	rand_seed = (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FSeed
  3939  	for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length; i++ {
  3940  		rand_seed = (int32((uint32(907633515)) + ((uint32(rand_seed)) * (uint32(196314165)))))
  3941  		/* dither = rand_seed < 0 ? 0xFFFFFFFF : 0; */
  3942  		dither = ((rand_seed) >> (31))
  3943  
  3944  		*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr(i)*4)) = (((*(*int32)(unsafe.Pointer(q + uintptr(i)*4))) << (10)) + offset_Q10)
  3945  		*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr(i)*4)) ^ dither) - dither)
  3946  
  3947  		rand_seed = rand_seed + (*(*int32)(unsafe.Pointer(q + uintptr(i)*4)))
  3948  	}
  3949  
  3950  	pexc_Q10 = psDec + 5432 /* &.exc_Q10 */
  3951  	pres_Q10 = psDec + 7352 /* &.res_Q10 */
  3952  	pxq = ((psDec + 9272 /* &.outBuf */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length)*2)
  3953  	sLTP_buf_idx = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length
  3954  	/* Loop over subframes */
  3955  	for k = 0; k < 4; k++ {
  3956  		A_Q12 = psDecCtrl + 36 /* &.PredCoef_Q12 */ + uintptr((k>>1))*32
  3957  
  3958  		/* Preload LPC coeficients to array on stack. Gives small performance gain */
  3959  		libc.Xmemcpy(tls, bp /* &A_Q12_tmp[0] */, A_Q12, (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint64(unsafe.Sizeof(int16(0)))))
  3960  		B_Q14 = ((psDecCtrl + 100 /* &.LTPCoef_Q14 */) + uintptr((k*5))*2)
  3961  		Gain_Q16 = *(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(k)*4))
  3962  		sigtype = (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype
  3963  
  3964  		inv_gain_Q16 = SKP_INVERSE32_varQ(tls, func() int32 {
  3965  			if (Gain_Q16) > (1) {
  3966  				return Gain_Q16
  3967  			}
  3968  			return 1
  3969  		}(), 32)
  3970  		inv_gain_Q16 = func() int32 {
  3971  			if (inv_gain_Q16) < (0x7FFF) {
  3972  				return inv_gain_Q16
  3973  			}
  3974  			return 0x7FFF
  3975  		}()
  3976  
  3977  		/* Calculate Gain adjustment factor */
  3978  		gain_adj_Q16 = (int32(1) << 16)
  3979  		if inv_gain_Q16 != (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_inv_gain_Q16 {
  3980  			gain_adj_Q16 = SKP_DIV32_varQ(tls, inv_gain_Q16, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_inv_gain_Q16, 16)
  3981  		}
  3982  
  3983  		/* Avoid abrupt transition from voiced PLC to unvoiced normal decoding */
  3984  		if ((((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt != 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype == 0)) && ((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype == 1)) && (k < (int32(4) >> 1)) {
  3985  
  3986  			libc.Xmemset(tls, B_Q14, 0, (uint64(5) * uint64(unsafe.Sizeof(int16(0)))))
  3987  			*(*int16)(unsafe.Pointer(B_Q14 + 2*2)) = (int16(int32(int32(int16(1))) << 12)) /* 0.25 */
  3988  
  3989  			sigtype = 0
  3990  			*(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + uintptr(k)*4)) = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlagPrev
  3991  		}
  3992  
  3993  		if sigtype == 0 {
  3994  			/* Voiced */
  3995  
  3996  			lag = *(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + uintptr(k)*4))
  3997  			/* Re-whitening */
  3998  			if (k & (3 - ((NLSF_interpolation_flag) << (1)))) == 0 {
  3999  				/* Rewhiten with new A coefs */
  4000  				start_idx = ((((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length - lag) - (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) - (5 / 2))
  4001  
  4002  				libc.Xmemset(tls, bp+32 /* &FiltState[0] */, 0, (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint64(unsafe.Sizeof(int32(0))))) /* Not really necessary, but Valgrind and Coverity will complain otherwise */
  4003  				SKP_Silk_MA_Prediction(tls, ((psDec + 9272 /* &.outBuf */) + uintptr((start_idx+(k*((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length>>2))))*2),
  4004  					A_Q12, bp+32 /* &FiltState[0] */, (bp + 96 /* &sLTP[0] */ + uintptr(start_idx)*2), ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length - start_idx), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  4005  
  4006  				/* After rewhitening the LTP state is unscaled */
  4007  				inv_gain_Q32 = ((inv_gain_Q16) << (16))
  4008  				if k == 0 {
  4009  					/* Do LTP downscaling */
  4010  					inv_gain_Q32 = (((((inv_gain_Q32) >> 16) * (int32(int16((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14)))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(int16((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14)))) >> 16)) << (2))
  4011  				}
  4012  				for i = 0; i < (lag + (5 / 2)); i++ {
  4013  					*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4)) = ((((inv_gain_Q32) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp + 96 /* &sLTP[0] */ + uintptr((((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length-i)-1))*2))))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp + 96 /* &sLTP[0] */ + uintptr((((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length-i)-1))*2))))) >> 16))
  4014  				}
  4015  			} else {
  4016  				/* Update LTP state when Gain changes */
  4017  				if gain_adj_Q16 != (int32(1) << 16) {
  4018  					for i = 0; i < (lag + (5 / 2)); i++ {
  4019  						*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
  4020  							if (16) == 1 {
  4021  								return (((*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4))) & 1))
  4022  							}
  4023  							return ((((*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4))) >> ((16) - 1)) + 1) >> 1)
  4024  						}())))
  4025  					}
  4026  				}
  4027  			}
  4028  		}
  4029  
  4030  		/* Scale short term state */
  4031  		for i = 0; i < 16; i++ {
  4032  			*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
  4033  				if (16) == 1 {
  4034  					return (((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4))) & 1))
  4035  				}
  4036  				return ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  4037  			}())))
  4038  		}
  4039  
  4040  		/* Save inv_gain */
  4041  
  4042  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_inv_gain_Q16 = inv_gain_Q16
  4043  
  4044  		/* Long-term prediction */
  4045  		if sigtype == 0 {
  4046  			/* Setup pointer */
  4047  			pred_lag_ptr = ((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-lag)+(5/2)))*4)
  4048  			for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
  4049  				/* Unrolled loop */
  4050  				LTP_pred_Q14 = ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14))))) >> 16))
  4051  				LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 1*2))))) >> 16)))
  4052  				LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 2*2))))) >> 16)))
  4053  				LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 3*2))))) >> 16)))
  4054  				LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 4*2))))) >> 16)))
  4055  				pred_lag_ptr += 4
  4056  
  4057  				/* Generate LPC residual */
  4058  				*(*int32)(unsafe.Pointer(pres_Q10 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(pexc_Q10 + uintptr(i)*4))) + (func() int32 {
  4059  					if (4) == 1 {
  4060  						return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
  4061  					}
  4062  					return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
  4063  				}()))
  4064  
  4065  				/* Update states */
  4066  				*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(sLTP_buf_idx)*4)) = ((*(*int32)(unsafe.Pointer(pres_Q10 + uintptr(i)*4))) << (6))
  4067  				sLTP_buf_idx++
  4068  			}
  4069  		} else {
  4070  			libc.Xmemcpy(tls, pres_Q10, pexc_Q10, (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length) * uint64(unsafe.Sizeof(int32(0)))))
  4071  		}
  4072  
  4073  		SKP_Silk_decode_short_term_prediction(tls, bp+1056 /* &vec_Q10[0] */, pres_Q10, psDec+4888 /* &.sLPC_Q14 */, bp /* &A_Q12_tmp[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)
  4074  
  4075  		/* Scale with Gain */
  4076  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
  4077  			*(*int16)(unsafe.Pointer(pxq + uintptr(i)*2)) = func() int16 {
  4078  				if (func() int32 {
  4079  					if (10) == 1 {
  4080  						return (((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4081  							if (16) == 1 {
  4082  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4083  							}
  4084  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4085  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4086  							if (16) == 1 {
  4087  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4088  							}
  4089  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4090  						}()))) & 1))
  4091  					}
  4092  					return ((((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4093  						if (16) == 1 {
  4094  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4095  						}
  4096  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4097  					}()))) >> ((10) - 1)) + 1) >> 1)
  4098  				}()) > 0x7FFF {
  4099  					return int16(0x7FFF)
  4100  				}
  4101  				return func() int16 {
  4102  					if (func() int32 {
  4103  						if (10) == 1 {
  4104  							return (((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4105  								if (16) == 1 {
  4106  									return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4107  								}
  4108  								return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4109  							}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4110  								if (16) == 1 {
  4111  									return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4112  								}
  4113  								return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4114  							}()))) & 1))
  4115  						}
  4116  						return ((((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4117  							if (16) == 1 {
  4118  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4119  							}
  4120  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4121  						}()))) >> ((10) - 1)) + 1) >> 1)
  4122  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
  4123  						return libc.Int16FromInt32(0x8000)
  4124  					}
  4125  					return func() int16 {
  4126  						if (10) == 1 {
  4127  							return (int16(((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4128  								if (16) == 1 {
  4129  									return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4130  								}
  4131  								return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4132  							}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4133  								if (16) == 1 {
  4134  									return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4135  								}
  4136  								return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4137  							}()))) & 1)))
  4138  						}
  4139  						return (int16((((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4140  							if (16) == 1 {
  4141  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4142  							}
  4143  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4144  						}()))) >> ((10) - 1)) + 1) >> 1))
  4145  					}()
  4146  				}()
  4147  			}()
  4148  		}
  4149  
  4150  		/* Update LPC filter state */
  4151  		libc.Xmemcpy(tls, psDec+4888 /* &.sLPC_Q14 */, ((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)*4), (uint64(16) * uint64(unsafe.Sizeof(int32(0)))))
  4152  		pexc_Q10 += 4 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
  4153  		pres_Q10 += 4 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
  4154  		pxq += 2 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
  4155  	}
  4156  
  4157  	/* Copy to output */
  4158  	libc.Xmemcpy(tls, xq, ((psDec + 9272 /* &.outBuf */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length)*2), (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length) * uint64(unsafe.Sizeof(int16(0)))))
  4159  
  4160  }
  4161  
  4162  func SKP_Silk_decode_short_term_prediction(tls *libc.TLS, vec_Q10 uintptr, pres_Q10 uintptr, sLPC_Q14 uintptr, A_Q12_tmp uintptr, LPC_order int32, subfr_length int32) { /* SKP_Silk_decode_core.c:204:6: */
  4163  	var i int32
  4164  	var LPC_pred_Q10 int32
  4165  	var j int32
  4166  	for i = 0; i < subfr_length; i++ {
  4167  		/* Partially unrolled */
  4168  		LPC_pred_Q10 = ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-1))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-1))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp))))) >> 16))
  4169  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-2))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 1*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-2))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 1*2))))) >> 16)))
  4170  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-3))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 2*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-3))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 2*2))))) >> 16)))
  4171  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-4))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 3*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-4))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 3*2))))) >> 16)))
  4172  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-5))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 4*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-5))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 4*2))))) >> 16)))
  4173  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-6))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 5*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-6))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 5*2))))) >> 16)))
  4174  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-7))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 6*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-7))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 6*2))))) >> 16)))
  4175  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-8))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 7*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-8))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 7*2))))) >> 16)))
  4176  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-9))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 8*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-9))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 8*2))))) >> 16)))
  4177  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-10))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 9*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-10))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 9*2))))) >> 16)))
  4178  
  4179  		for j = 10; j < LPC_order; j++ {
  4180  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr((((16+i)-j)-1))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + uintptr(j)*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr((((16+i)-j)-1))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + uintptr(j)*2))))) >> 16)))
  4181  		}
  4182  
  4183  		/* Add prediction to LPC residual */
  4184  		*(*int32)(unsafe.Pointer(vec_Q10 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(pres_Q10 + uintptr(i)*4))) + (LPC_pred_Q10))
  4185  
  4186  		/* Update states */
  4187  		*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr((16+i))*4)) = ((*(*int32)(unsafe.Pointer(vec_Q10 + uintptr(i)*4))) << (4))
  4188  	}
  4189  }
  4190  
  4191  /***********************************************************************
  4192  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  4193  Redistribution and use in source and binary forms, with or without
  4194  modification, (subject to the limitations in the disclaimer below)
  4195  are permitted provided that the following conditions are met:
  4196  - Redistributions of source code must retain the above copyright notice,
  4197  this list of conditions and the following disclaimer.
  4198  - Redistributions in binary form must reproduce the above copyright
  4199  notice, this list of conditions and the following disclaimer in the
  4200  documentation and/or other materials provided with the distribution.
  4201  - Neither the name of Skype Limited, nor the names of specific
  4202  contributors, may be used to endorse or promote products derived from
  4203  this software without specific prior written permission.
  4204  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  4205  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  4206  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  4207  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  4208  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  4209  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  4210  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  4211  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  4212  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  4213  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  4214  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  4215  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  4216  ***********************************************************************/
  4217  
  4218  /****************/
  4219  /* Decode frame */
  4220  /****************/
  4221  func SKP_Silk_decode_frame(tls *libc.TLS, psDec uintptr, pOut uintptr, pN uintptr, pCode uintptr, nBytes int32, action int32, decBytes uintptr) int32 { /* SKP_Silk_decode_frame.c:35:9: */
  4222  	bp := tls.Alloc(2084)
  4223  	defer tls.Free(2084)
  4224  
  4225  	// var sDecCtrl SKP_Silk_decoder_control at bp, 164
  4226  
  4227  	var L int32
  4228  	var fs_Khz_old int32
  4229  	var ret int32 = 0
  4230  	// var Pulses [480]int32 at bp+164, 1920
  4231  
  4232  	L = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length
  4233  	(*SKP_Silk_decoder_control)(unsafe.Pointer(bp /* &sDecCtrl */)).FLTP_scale_Q14 = 0
  4234  
  4235  	/* Safety checks */
  4236  
  4237  	/********************************************/
  4238  	/* Decode Frame if packet is not lost  */
  4239  	/********************************************/
  4240  	*(*int32)(unsafe.Pointer(decBytes)) = 0
  4241  	if action == 0 {
  4242  		/********************************************/
  4243  		/* Initialize arithmetic coder              */
  4244  		/********************************************/
  4245  		fs_Khz_old = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz
  4246  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded == 0 {
  4247  			/* Initialize range decoder state */
  4248  			SKP_Silk_range_dec_init(tls, (psDec /* &.sRC */), pCode, nBytes)
  4249  		}
  4250  
  4251  		/********************************************/
  4252  		/* Decode parameters and pulse signal       */
  4253  		/********************************************/
  4254  		SKP_Silk_decode_parameters(tls, psDec, bp /* &sDecCtrl */, bp+164 /* &Pulses[0] */, 1)
  4255  
  4256  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsRC.Ferror != 0 {
  4257  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft = 0
  4258  
  4259  			action = 1 /* PLC operation */
  4260  			/* revert fs if changed in decode_parameters */
  4261  			SKP_Silk_decoder_set_fs(tls, psDec, fs_Khz_old)
  4262  
  4263  			/* Avoid crashing */
  4264  			*(*int32)(unsafe.Pointer(decBytes)) = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsRC.FbufferLength
  4265  
  4266  			if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsRC.Ferror == -8 {
  4267  				ret = -11
  4268  			} else {
  4269  				ret = -12
  4270  			}
  4271  		} else {
  4272  			*(*int32)(unsafe.Pointer(decBytes)) = ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsRC.FbufferLength - (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft)
  4273  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded++
  4274  
  4275  			/* Update lengths. Sampling frequency could have changed */
  4276  			L = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length
  4277  
  4278  			/********************************************************/
  4279  			/* Run inverse NSQ                                      */
  4280  			/********************************************************/
  4281  			SKP_Silk_decode_core(tls, psDec, bp /* &sDecCtrl */, pOut, bp+164 /* &Pulses[0] */)
  4282  
  4283  			/********************************************************/
  4284  			/* Update PLC state                                     */
  4285  			/********************************************************/
  4286  			SKP_Silk_PLC(tls, psDec, bp /* &sDecCtrl */, pOut, L, action)
  4287  
  4288  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt = 0
  4289  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype = (*SKP_Silk_decoder_control)(unsafe.Pointer(bp /* &sDecCtrl */)).Fsigtype
  4290  
  4291  			/* A frame has been decoded without errors */
  4292  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffirst_frame_after_reset = 0
  4293  		}
  4294  	}
  4295  	/*************************************************************/
  4296  	/* Generate Concealment frame if packet is lost, or corrupt  */
  4297  	/*************************************************************/
  4298  	if action == 1 {
  4299  		/* Handle packet loss by extrapolation */
  4300  		SKP_Silk_PLC(tls, psDec, bp /* &sDecCtrl */, pOut, L, action)
  4301  	}
  4302  
  4303  	/*************************/
  4304  	/* Update output buffer. */
  4305  	/*************************/
  4306  	libc.Xmemcpy(tls, psDec+9272 /* &.outBuf */, pOut, (uint64(L) * uint64(unsafe.Sizeof(int16(0)))))
  4307  
  4308  	/****************************************************************/
  4309  	/* Ensure smooth connection of extrapolated and good frames     */
  4310  	/****************************************************************/
  4311  	SKP_Silk_PLC_glue_frames(tls, psDec, bp /* &sDecCtrl */, pOut, L)
  4312  
  4313  	/************************************************/
  4314  	/* Comfort noise generation / estimation        */
  4315  	/************************************************/
  4316  	SKP_Silk_CNG(tls, psDec, bp /* &sDecCtrl */, pOut, L)
  4317  
  4318  	/********************************************/
  4319  	/* HP filter output                            */
  4320  	/********************************************/
  4321  
  4322  	SKP_Silk_biquad(tls, pOut, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A, psDec+11208 /* &.HPState */, pOut, L)
  4323  
  4324  	/********************************************/
  4325  	/* set output frame length                    */
  4326  	/********************************************/
  4327  	*(*int16)(unsafe.Pointer(pN)) = int16(L)
  4328  
  4329  	/* Update some decoder state variables */
  4330  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlagPrev = *(*int32)(unsafe.Pointer((bp /* &sDecCtrl */ /* &.pitchL */) + 3*4))
  4331  
  4332  	return ret
  4333  }
  4334  
  4335  /* Decode parameters from payload */
  4336  func SKP_Silk_decode_parameters(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, q uintptr, fullDecoding int32) { /* SKP_Silk_decode_parameters.c:31:6: */
  4337  	bp := tls.Alloc(208)
  4338  	defer tls.Free(208)
  4339  
  4340  	var i int32
  4341  	var k int32
  4342  	// var Ix int32 at bp, 4
  4343  
  4344  	var fs_kHz_dec int32
  4345  	// var nBytesUsed int32 at bp+204, 4
  4346  
  4347  	// var Ixs [4]int32 at bp+188, 16
  4348  
  4349  	// var GainsIndices [4]int32 at bp+4, 16
  4350  
  4351  	// var NLSFIndices [10]int32 at bp+20, 40
  4352  
  4353  	// var pNLSF_Q15 [16]int32 at bp+60, 64
  4354  
  4355  	// var pNLSF0_Q15 [16]int32 at bp+124, 64
  4356  
  4357  	var cbk_ptr_Q14 uintptr
  4358  	var psNLSF_CB uintptr = uintptr(0)
  4359  	var psRC uintptr = (psDec /* &.sRC */)
  4360  
  4361  	/************************/
  4362  	/* Decode sampling rate */
  4363  	/************************/
  4364  	/* only done for first frame of packet */
  4365  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded == 0 {
  4366  		SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_SamplingRates_CDF)), SKP_Silk_SamplingRates_offset)
  4367  
  4368  		/* check that sampling rate is supported */
  4369  		if (*(*int32)(unsafe.Pointer(bp /* Ix */)) < 0) || (*(*int32)(unsafe.Pointer(bp /* Ix */)) > 3) {
  4370  			(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -7
  4371  			return
  4372  		}
  4373  		fs_kHz_dec = SKP_Silk_SamplingRates_table[*(*int32)(unsafe.Pointer(bp /* Ix */))]
  4374  		SKP_Silk_decoder_set_fs(tls, psDec, fs_kHz_dec)
  4375  	}
  4376  
  4377  	/*******************************************/
  4378  	/* Decode signal type and quantizer offset */
  4379  	/*******************************************/
  4380  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded == 0 {
  4381  		/* first frame in packet: independent coding */
  4382  		SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_type_offset_CDF)), SKP_Silk_type_offset_CDF_offset)
  4383  	} else {
  4384  		/* condidtional coding */
  4385  		SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, (uintptr(unsafe.Pointer(&SKP_Silk_type_offset_joint_CDF)) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FtypeOffsetPrev)*10),
  4386  			SKP_Silk_type_offset_CDF_offset)
  4387  	}
  4388  	(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype = ((*(*int32)(unsafe.Pointer(bp /* Ix */))) >> (1))
  4389  	(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FQuantOffsetType = (*(*int32)(unsafe.Pointer(bp /* Ix */)) & 1)
  4390  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FtypeOffsetPrev = *(*int32)(unsafe.Pointer(bp /* Ix */))
  4391  
  4392  	/****************/
  4393  	/* Decode gains */
  4394  	/****************/
  4395  	/* first subframe */
  4396  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded == 0 {
  4397  		/* first frame in packet: independent coding */
  4398  		SKP_Silk_range_decoder(tls, (bp + 4 /* &GainsIndices */), psRC, (uintptr(unsafe.Pointer(&SKP_Silk_gain_CDF)) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype)*130), SKP_Silk_gain_CDF_offset)
  4399  	} else {
  4400  		/* condidtional coding */
  4401  		SKP_Silk_range_decoder(tls, (bp + 4 /* &GainsIndices */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_delta_gain_CDF)), SKP_Silk_delta_gain_CDF_offset)
  4402  	}
  4403  
  4404  	/* remaining subframes */
  4405  	for i = 1; i < 4; i++ {
  4406  		SKP_Silk_range_decoder(tls, (bp + 4 /* &GainsIndices */ + uintptr(i)*4), psRC, uintptr(unsafe.Pointer(&SKP_Silk_delta_gain_CDF)), SKP_Silk_delta_gain_CDF_offset)
  4407  	}
  4408  
  4409  	/* Dequant Gains */
  4410  	SKP_Silk_gains_dequant(tls, psDecCtrl+16 /* &.Gains_Q16 */, bp+4 /* &GainsIndices[0] */, (psDec + 11196 /* &.LastGainIndex */), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded)
  4411  	/****************/
  4412  	/* Decode NLSFs */
  4413  	/****************/
  4414  	/* Set pointer to NLSF VQ CB for the current signal type */
  4415  	psNLSF_CB = *(*uintptr)(unsafe.Pointer((psDec + 11536 /* &.psNLSF_CB */) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype)*8))
  4416  
  4417  	/* Range decode NLSF path */
  4418  	SKP_Silk_range_decoder_multi(tls, bp+20 /* &NLSFIndices[0] */, psRC, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FStartPtr, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FMiddleIx, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)
  4419  
  4420  	/* From the NLSF path, decode an NLSF vector */
  4421  	SKP_Silk_NLSF_MSVQ_decode(tls, bp+60 /* &pNLSF_Q15[0] */, psNLSF_CB, bp+20 /* &NLSFIndices[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  4422  
  4423  	/************************************/
  4424  	/* Decode NLSF interpolation factor */
  4425  	/************************************/
  4426  	SKP_Silk_range_decoder(tls, (psDecCtrl + 160 /* &.NLSFInterpCoef_Q2 */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_NLSF_interpolation_factor_CDF)),
  4427  		SKP_Silk_NLSF_interpolation_factor_offset)
  4428  
  4429  	/* If just reset, e.g., because internal Fs changed, do not allow interpolation */
  4430  	/* improves the case of packet loss in the first frame after a switch           */
  4431  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffirst_frame_after_reset == 1 {
  4432  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FNLSFInterpCoef_Q2 = 4
  4433  	}
  4434  
  4435  	if fullDecoding != 0 {
  4436  		/* Convert NLSF parameters to AR prediction filter coefficients */
  4437  		SKP_Silk_NLSF2A_stable(tls, ((psDecCtrl + 36 /* &.PredCoef_Q12 */) + 1*32), bp+60 /* &pNLSF_Q15[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  4438  
  4439  		if (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FNLSFInterpCoef_Q2 < 4 {
  4440  			/* Calculation of the interpolated NLSF0 vector from the interpolation factor, */
  4441  			/* the previous NLSF1, and the current NLSF1                                   */
  4442  			for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order; i++ {
  4443  				*(*int32)(unsafe.Pointer(bp + 124 /* &pNLSF0_Q15[0] */ + uintptr(i)*4)) = (*(*int32)(unsafe.Pointer((psDec + 11252 /* &.prevNLSF_Q15 */) + uintptr(i)*4)) + ((((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FNLSFInterpCoef_Q2) * (*(*int32)(unsafe.Pointer(bp + 60 /* &pNLSF_Q15[0] */ + uintptr(i)*4)) - *(*int32)(unsafe.Pointer((psDec + 11252 /* &.prevNLSF_Q15 */) + uintptr(i)*4)))) >> (2)))
  4444  			}
  4445  
  4446  			/* Convert NLSF parameters to AR prediction filter coefficients */
  4447  			SKP_Silk_NLSF2A_stable(tls, (psDecCtrl + 36 /* &.PredCoef_Q12 */), bp+124 /* &pNLSF0_Q15[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  4448  		} else {
  4449  			/* Copy LPC coefficients for first half from second half */
  4450  			libc.Xmemcpy(tls, (psDecCtrl + 36 /* &.PredCoef_Q12 */), ((psDecCtrl + 36 /* &.PredCoef_Q12 */) + 1*32), (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint64(unsafe.Sizeof(int16(0)))))
  4451  		}
  4452  	}
  4453  
  4454  	libc.Xmemcpy(tls, psDec+11252 /* &.prevNLSF_Q15 */, bp+60 /* &pNLSF_Q15[0] */, (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint64(unsafe.Sizeof(int32(0)))))
  4455  
  4456  	/* After a packet loss do BWE of LPC coefs */
  4457  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt != 0 {
  4458  		SKP_Silk_bwexpander(tls, (psDecCtrl + 36 /* &.PredCoef_Q12 */), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order, 63570)
  4459  		SKP_Silk_bwexpander(tls, ((psDecCtrl + 36 /* &.PredCoef_Q12 */) + 1*32), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order, 63570)
  4460  	}
  4461  
  4462  	if (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype == 0 {
  4463  		/*********************/
  4464  		/* Decode pitch lags */
  4465  		/*********************/
  4466  		/* Get lag index */
  4467  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 8 {
  4468  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_NB_CDF)), SKP_Silk_pitch_lag_NB_CDF_offset)
  4469  		} else if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 12 {
  4470  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_MB_CDF)), SKP_Silk_pitch_lag_MB_CDF_offset)
  4471  		} else if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 16 {
  4472  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_WB_CDF)), SKP_Silk_pitch_lag_WB_CDF_offset)
  4473  		} else {
  4474  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_SWB_CDF)), SKP_Silk_pitch_lag_SWB_CDF_offset)
  4475  		}
  4476  
  4477  		/* Get countour index */
  4478  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 8 {
  4479  			/* Less codevectors used in 8 khz mode */
  4480  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */ + 1*4), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_contour_NB_CDF)), SKP_Silk_pitch_contour_NB_CDF_offset)
  4481  		} else {
  4482  			/* Joint for 12, 16, and 24 khz */
  4483  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */ + 1*4), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_contour_CDF)), SKP_Silk_pitch_contour_CDF_offset)
  4484  		}
  4485  
  4486  		/* Decode pitch values */
  4487  		SKP_Silk_decode_pitch(tls, *(*int32)(unsafe.Pointer(bp + 188 /* &Ixs[0] */)), *(*int32)(unsafe.Pointer(bp + 188 /* &Ixs[0] */ + 1*4)), psDecCtrl /* &.pitchL */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz)
  4488  
  4489  		/********************/
  4490  		/* Decode LTP gains */
  4491  		/********************/
  4492  		/* Decode PERIndex value */
  4493  		SKP_Silk_range_decoder(tls, (psDecCtrl + 144 /* &.PERIndex */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_LTP_per_index_CDF)),
  4494  			SKP_Silk_LTP_per_index_CDF_offset)
  4495  
  4496  		/* Decode Codebook Index */
  4497  		cbk_ptr_Q14 = SKP_Silk_LTP_vq_ptrs_Q14[(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FPERIndex] /* set pointer to start of codebook */
  4498  
  4499  		for k = 0; k < 4; k++ {
  4500  			SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, SKP_Silk_LTP_gain_CDF_ptrs[(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FPERIndex],
  4501  				SKP_Silk_LTP_gain_CDF_offsets[(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FPERIndex])
  4502  
  4503  			for i = 0; i < 5; i++ {
  4504  				*(*int16)(unsafe.Pointer((psDecCtrl + 100 /* &.LTPCoef_Q14 */) + uintptr(((k*5)+i))*2)) = *(*int16)(unsafe.Pointer(cbk_ptr_Q14 + uintptr(((*(*int32)(unsafe.Pointer(bp /* Ix */))*5)+i))*2))
  4505  			}
  4506  		}
  4507  
  4508  		/**********************/
  4509  		/* Decode LTP scaling */
  4510  		/**********************/
  4511  		SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_LTPscale_CDF)), SKP_Silk_LTPscale_offset)
  4512  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14 = int32(SKP_Silk_LTPScales_table_Q14[*(*int32)(unsafe.Pointer(bp /* Ix */))])
  4513  	} else {
  4514  
  4515  		libc.Xmemset(tls, psDecCtrl /* &.pitchL */, 0, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
  4516  		libc.Xmemset(tls, psDecCtrl+100 /* &.LTPCoef_Q14 */, 0, ((uint64(5 * 4)) * uint64(unsafe.Sizeof(int16(0)))))
  4517  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FPERIndex = 0
  4518  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14 = 0
  4519  	}
  4520  
  4521  	/***************/
  4522  	/* Decode seed */
  4523  	/***************/
  4524  	SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_Seed_CDF)), SKP_Silk_Seed_offset)
  4525  	(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FSeed = *(*int32)(unsafe.Pointer(bp /* Ix */))
  4526  	/*********************************************/
  4527  	/* Decode quantization indices of excitation */
  4528  	/*********************************************/
  4529  	SKP_Silk_decode_pulses(tls, psRC, psDecCtrl, q, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length)
  4530  
  4531  	/*********************************************/
  4532  	/* Decode VAD flag                           */
  4533  	/*********************************************/
  4534  	SKP_Silk_range_decoder(tls, (psDec + 11552 /* &.vadFlag */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_vadflag_CDF)), SKP_Silk_vadflag_offset)
  4535  
  4536  	/**************************************/
  4537  	/* Decode Frame termination indicator */
  4538  	/**************************************/
  4539  	SKP_Silk_range_decoder(tls, (psDec + 11336 /* &.FrameTermination */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_FrameTermination_CDF)), SKP_Silk_FrameTermination_offset)
  4540  
  4541  	/****************************************/
  4542  	/* get number of bytes used so far      */
  4543  	/****************************************/
  4544  	SKP_Silk_range_coder_get_length(tls, psRC, bp+204 /* &nBytesUsed */)
  4545  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft = ((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength - *(*int32)(unsafe.Pointer(bp + 204 /* nBytesUsed */)))
  4546  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft < 0 {
  4547  		(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -6
  4548  	}
  4549  
  4550  	/****************************************/
  4551  	/* check remaining bits in last byte    */
  4552  	/****************************************/
  4553  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft == 0 {
  4554  		SKP_Silk_range_coder_check_after_decoding(tls, psRC)
  4555  	}
  4556  }
  4557  
  4558  func SKP_Silk_decode_pitch(tls *libc.TLS, lagIndex int32, contourIndex int32, pitch_lags uintptr, Fs_kHz int32) { /* SKP_Silk_decode_pitch.c:34:6: */
  4559  	var lag int32
  4560  	var i int32
  4561  	var min_lag int32
  4562  
  4563  	min_lag = ((int32(int16(2))) * (int32(int16(Fs_kHz))))
  4564  
  4565  	/* Only for 24 / 16 kHz version for now */
  4566  	lag = (min_lag + lagIndex)
  4567  	if Fs_kHz == 8 {
  4568  		/* Only a small codebook for 8 khz */
  4569  		for i = 0; i < 4; i++ {
  4570  			*(*int32)(unsafe.Pointer(pitch_lags + uintptr(i)*4)) = (lag + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2)) + uintptr(i)*22) + uintptr(contourIndex)*2))))
  4571  		}
  4572  	} else {
  4573  		for i = 0; i < 4; i++ {
  4574  			*(*int32)(unsafe.Pointer(pitch_lags + uintptr(i)*4)) = (lag + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3)) + uintptr(i)*68) + uintptr(contourIndex)*2))))
  4575  		}
  4576  	}
  4577  }
  4578  
  4579  /*********************************************/
  4580  /* Decode quantization indices of excitation */
  4581  /*********************************************/
  4582  func SKP_Silk_decode_pulses(tls *libc.TLS, psRC uintptr, psDecCtrl uintptr, q uintptr, frame_length int32) { /* SKP_Silk_decode_pulses.c:33:6: */
  4583  	bp := tls.Alloc(244)
  4584  	defer tls.Free(244)
  4585  
  4586  	var i int32
  4587  	var j int32
  4588  	var k int32
  4589  	var iter int32
  4590  	var abs_q int32
  4591  	var nLS int32
  4592  	// var bit int32 at bp+240, 4
  4593  
  4594  	// var sum_pulses [30]int32 at bp+120, 120
  4595  
  4596  	// var nLshifts [30]int32 at bp, 120
  4597  
  4598  	var pulses_ptr uintptr
  4599  	var cdf_ptr uintptr
  4600  
  4601  	/*********************/
  4602  	/* Decode rate level */
  4603  	/*********************/
  4604  	SKP_Silk_range_decoder(tls, (psDecCtrl + 148 /* &.RateLevelIndex */), psRC,
  4605  		(uintptr(unsafe.Pointer(&SKP_Silk_rate_levels_CDF)) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype)*20), SKP_Silk_rate_levels_CDF_offset)
  4606  
  4607  	/* Calculate number of shell blocks */
  4608  	iter = (frame_length / 16)
  4609  
  4610  	/***************************************************/
  4611  	/* Sum-Weighted-Pulses Decoding                    */
  4612  	/***************************************************/
  4613  	cdf_ptr = (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FRateLevelIndex)*42)
  4614  	for i = 0; i < iter; i++ {
  4615  		*(*int32)(unsafe.Pointer(bp /* &nLshifts[0] */ + uintptr(i)*4)) = 0
  4616  		SKP_Silk_range_decoder(tls, (bp + 120 /* &sum_pulses */ + uintptr(i)*4), psRC, cdf_ptr, SKP_Silk_pulses_per_block_CDF_offset)
  4617  
  4618  		/* LSB indication */
  4619  		for *(*int32)(unsafe.Pointer(bp + 120 /* &sum_pulses[0] */ + uintptr(i)*4)) == (18 + 1) {
  4620  			*(*int32)(unsafe.Pointer(bp /* &nLshifts[0] */ + uintptr(i)*4))++
  4621  			SKP_Silk_range_decoder(tls, (bp + 120 /* &sum_pulses */ + uintptr(i)*4), psRC,
  4622  				(uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + 9*42), SKP_Silk_pulses_per_block_CDF_offset)
  4623  		}
  4624  	}
  4625  
  4626  	/***************************************************/
  4627  	/* Shell decoding                                  */
  4628  	/***************************************************/
  4629  	for i = 0; i < iter; i++ {
  4630  		if *(*int32)(unsafe.Pointer(bp + 120 /* &sum_pulses[0] */ + uintptr(i)*4)) > 0 {
  4631  			SKP_Silk_shell_decoder(tls, (q + uintptr(((int32(int16(i)))*(int32(int16(16)))))*4), psRC, *(*int32)(unsafe.Pointer(bp + 120 /* &sum_pulses[0] */ + uintptr(i)*4)))
  4632  		} else {
  4633  			libc.Xmemset(tls, (q + uintptr(((int32(int16(i)))*(int32(int16(16)))))*4), 0, (uint64(16) * uint64(unsafe.Sizeof(int32(0)))))
  4634  		}
  4635  	}
  4636  
  4637  	/***************************************************/
  4638  	/* LSB Decoding                                    */
  4639  	/***************************************************/
  4640  	for i = 0; i < iter; i++ {
  4641  		if *(*int32)(unsafe.Pointer(bp /* &nLshifts[0] */ + uintptr(i)*4)) > 0 {
  4642  			nLS = *(*int32)(unsafe.Pointer(bp /* &nLshifts[0] */ + uintptr(i)*4))
  4643  			pulses_ptr = (q + uintptr(((int32(int16(i)))*(int32(int16(16)))))*4)
  4644  			for k = 0; k < 16; k++ {
  4645  				abs_q = *(*int32)(unsafe.Pointer(pulses_ptr + uintptr(k)*4))
  4646  				for j = 0; j < nLS; j++ {
  4647  					abs_q = ((abs_q) << (1))
  4648  					SKP_Silk_range_decoder(tls, bp+240 /* &bit */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_lsb_CDF)), 1)
  4649  					abs_q = abs_q + (*(*int32)(unsafe.Pointer(bp + 240 /* bit */)))
  4650  				}
  4651  				*(*int32)(unsafe.Pointer(pulses_ptr + uintptr(k)*4)) = abs_q
  4652  			}
  4653  		}
  4654  	}
  4655  
  4656  	/****************************************/
  4657  	/* Decode and add signs to pulse signal */
  4658  	/****************************************/
  4659  	SKP_Silk_decode_signs(tls, psRC, q, frame_length, (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype,
  4660  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FQuantOffsetType, (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FRateLevelIndex)
  4661  }
  4662  
  4663  /***********************************************************************
  4664  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  4665  Redistribution and use in source and binary forms, with or without
  4666  modification, (subject to the limitations in the disclaimer below)
  4667  are permitted provided that the following conditions are met:
  4668  - Redistributions of source code must retain the above copyright notice,
  4669  this list of conditions and the following disclaimer.
  4670  - Redistributions in binary form must reproduce the above copyright
  4671  notice, this list of conditions and the following disclaimer in the
  4672  documentation and/or other materials provided with the distribution.
  4673  - Neither the name of Skype Limited, nor the names of specific
  4674  contributors, may be used to endorse or promote products derived from
  4675  this software without specific prior written permission.
  4676  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  4677  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  4678  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  4679  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  4680  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  4681  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  4682  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  4683  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  4684  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  4685  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  4686  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  4687  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  4688  ***********************************************************************/
  4689  
  4690  /***********************************************************************
  4691  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  4692  Redistribution and use in source and binary forms, with or without
  4693  modification, (subject to the limitations in the disclaimer below)
  4694  are permitted provided that the following conditions are met:
  4695  - Redistributions of source code must retain the above copyright notice,
  4696  this list of conditions and the following disclaimer.
  4697  - Redistributions in binary form must reproduce the above copyright
  4698  notice, this list of conditions and the following disclaimer in the
  4699  documentation and/or other materials provided with the distribution.
  4700  - Neither the name of Skype Limited, nor the names of specific
  4701  contributors, may be used to endorse or promote products derived from
  4702  this software without specific prior written permission.
  4703  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  4704  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  4705  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  4706  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  4707  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  4708  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  4709  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  4710  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  4711  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  4712  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  4713  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  4714  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  4715  ***********************************************************************/
  4716  
  4717  /******************/
  4718  /* Error messages */
  4719  /******************/
  4720  
  4721  /**************************/
  4722  /* Encoder error messages */
  4723  /**************************/
  4724  
  4725  /* Input length is not a multiplum of 10 ms, or length is longer than the packet length */
  4726  
  4727  /* Sampling frequency not 8000, 12000, 16000 or 24000 Hertz */
  4728  
  4729  /* Packet size not 20, 40, 60, 80 or 100 ms */
  4730  
  4731  /* Allocated payload buffer too short */
  4732  
  4733  /* Loss rate not between 0 and 100 percent */
  4734  
  4735  /* Complexity setting not valid, use 0, 1 or 2 */
  4736  
  4737  /* Inband FEC setting not valid, use 0 or 1 */
  4738  
  4739  /* DTX setting not valid, use 0 or 1 */
  4740  
  4741  /* Internal encoder error */
  4742  
  4743  /**************************/
  4744  /* Decoder error messages */
  4745  /**************************/
  4746  
  4747  /* Output sampling frequency lower than internal decoded sampling frequency */
  4748  
  4749  /* Payload size exceeded the maximum allowed 1024 bytes */
  4750  
  4751  /* Payload has bit errors */
  4752  
  4753  /* Struct for TOC (Table of Contents) */
  4754  type SKP_Silk_TOC_struct = struct {
  4755  	FframesInPacket int32
  4756  	Ffs_kHz         int32
  4757  	FinbandLBRR     int32
  4758  	Fcorrupt        int32
  4759  	FvadFlags       [5]int32
  4760  	FsigtypeFlags   [5]int32
  4761  } /* SKP_Silk_SDK_API.h:50:3 */
  4762  
  4763  /*********************/
  4764  /* Decoder functions */
  4765  /*********************/
  4766  
  4767  func SKP_Silk_SDK_Get_Decoder_Size(tls *libc.TLS, decSizeBytes uintptr) int32 { /* SKP_Silk_dec_API.c:35:9: */
  4768  	var ret int32 = 0
  4769  
  4770  	*(*int32)(unsafe.Pointer(decSizeBytes)) = int32(unsafe.Sizeof(SKP_Silk_decoder_state{}))
  4771  
  4772  	return ret
  4773  }
  4774  
  4775  /* Reset decoder state */
  4776  func SKP_Silk_SDK_InitDecoder(tls *libc.TLS, decState uintptr) int32 { /* SKP_Silk_dec_API.c:45:9: */
  4777  	var ret int32 = 0
  4778  	var struc uintptr
  4779  
  4780  	struc = decState
  4781  
  4782  	ret = SKP_Silk_init_decoder(tls, struc)
  4783  
  4784  	return ret
  4785  }
  4786  
  4787  /* Decode a frame */
  4788  func SKP_Silk_SDK_Decode(tls *libc.TLS, decState uintptr, decControl uintptr, lostFlag int32, inData uintptr, nBytesIn int32, samplesOut uintptr, nSamplesOut uintptr) int32 { /* SKP_Silk_dec_API.c:60:9: */
  4789  	bp := tls.Alloc(3844)
  4790  	defer tls.Free(3844)
  4791  
  4792  	var ret int32 = 0
  4793  	// var used_bytes int32 at bp+1920, 4
  4794  
  4795  	var prev_fs_kHz int32
  4796  	var psDec uintptr
  4797  	// var samplesOutInternal [960]int16 at bp, 1920
  4798  
  4799  	var pSamplesOutInternal uintptr
  4800  
  4801  	psDec = decState
  4802  
  4803  	/* We need this buffer to have room for an internal frame */
  4804  	pSamplesOutInternal = samplesOut
  4805  	if ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz * 1000) > (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate {
  4806  		pSamplesOutInternal = bp /* &samplesOutInternal[0] */
  4807  	}
  4808  
  4809  	/**********************************/
  4810  	/* Test if first frame in payload */
  4811  	/**********************************/
  4812  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames == 0 {
  4813  		/* First Frame in Payload */
  4814  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded = 0 /* Used to count frames in packet */
  4815  	}
  4816  
  4817  	if (((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames == 0) && (lostFlag == 0)) && (nBytesIn > 1024) { /* Too long payload         */
  4818  		/* Avoid trying to decode a too large packet */
  4819  		lostFlag = 1
  4820  		ret = -11
  4821  	}
  4822  
  4823  	/* Save previous sample frequency */
  4824  	prev_fs_kHz = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz
  4825  
  4826  	/* Call decoder for one frame */
  4827  	ret = ret + (SKP_Silk_decode_frame(tls, psDec, pSamplesOutInternal, nSamplesOut, inData, nBytesIn,
  4828  		lostFlag, bp+1920 /* &used_bytes */))
  4829  
  4830  	if *(*int32)(unsafe.Pointer(bp + 1920 /* used_bytes */)) != 0 { /* Only Call if not a packet loss */
  4831  		if (((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft > 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FFrameTermination == 1)) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded < 5) {
  4832  			/* We have more frames in the Payload */
  4833  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames = 1
  4834  		} else {
  4835  			/* Last frame in Payload */
  4836  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames = 0
  4837  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesInPacket = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded
  4838  
  4839  			/* Track inband FEC usage */
  4840  			if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FvadFlag == 1 {
  4841  				if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FFrameTermination == 0 {
  4842  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fno_FEC_counter++
  4843  					if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fno_FEC_counter > 10 {
  4844  						(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Finband_FEC_offset = 0
  4845  					}
  4846  				} else if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FFrameTermination == 2 {
  4847  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Finband_FEC_offset = 1 /* FEC info with 1 packet delay */
  4848  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fno_FEC_counter = 0
  4849  				} else if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FFrameTermination == 3 {
  4850  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Finband_FEC_offset = 2 /* FEC info with 2 packets delay */
  4851  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fno_FEC_counter = 0
  4852  				}
  4853  			}
  4854  		}
  4855  	}
  4856  
  4857  	if ((48 * 1000) < (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate) || (8000 > (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate) {
  4858  		ret = -10
  4859  		return ret
  4860  	}
  4861  
  4862  	/* Resample if needed */
  4863  	if ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz * 1000) != (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate {
  4864  		// var samplesOut_tmp [960]int16 at bp+1924, 1920
  4865  
  4866  		/* Copy to a tmp buffer as the resampling writes to samplesOut */
  4867  		libc.Xmemcpy(tls, bp+1924 /* &samplesOut_tmp[0] */, pSamplesOutInternal, (uint64(*(*int16)(unsafe.Pointer(nSamplesOut))) * uint64(unsafe.Sizeof(int16(0)))))
  4868  
  4869  		/* (Re-)initialize resampler state when switching internal sampling frequency */
  4870  		if (prev_fs_kHz != (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz) || ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_API_sampleRate != (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate) {
  4871  			ret = SKP_Silk_resampler_init(tls, (psDec + 11344 /* &.resampler_state */), ((int32(int16((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz))) * (int32(int16(1000)))), (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate)
  4872  		}
  4873  
  4874  		/* Resample the output to API_sampleRate */
  4875  		ret = ret + (SKP_Silk_resampler(tls, (psDec + 11344 /* &.resampler_state */), samplesOut, bp+1924 /* &samplesOut_tmp[0] */, int32(*(*int16)(unsafe.Pointer(nSamplesOut)))))
  4876  
  4877  		/* Update the number of output samples */
  4878  		*(*int16)(unsafe.Pointer(nSamplesOut)) = int16(((int32(*(*int16)(unsafe.Pointer(nSamplesOut))) * (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate) / ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz * 1000)))
  4879  	} else if (prev_fs_kHz * 1000) > (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate {
  4880  		libc.Xmemcpy(tls, samplesOut, pSamplesOutInternal, (uint64(*(*int16)(unsafe.Pointer(nSamplesOut))) * uint64(unsafe.Sizeof(int16(0)))))
  4881  	}
  4882  
  4883  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_API_sampleRate = (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate
  4884  
  4885  	/* Copy all parameters that are needed out of internal structure to the control stucture */
  4886  	(*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FframeSize = int32((uint16((*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate / 50)))
  4887  	(*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FframesPerPacket = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesInPacket
  4888  	(*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FinBandFECOffset = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Finband_FEC_offset
  4889  	(*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FmoreInternalDecoderFrames = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames
  4890  
  4891  	return ret
  4892  }
  4893  
  4894  /* Function to find LBRR information in a packet */
  4895  func SKP_Silk_SDK_search_for_LBRR(tls *libc.TLS, inData uintptr, nBytesIn int32, lost_offset int32, LBRRData uintptr, nLBRRBytes uintptr) { /* SKP_Silk_dec_API.c:173:6: */
  4896  	bp := tls.Alloc(15812)
  4897  	defer tls.Free(15812)
  4898  
  4899  	// var sDec SKP_Silk_decoder_state at bp, 13728
  4900  	// Local decoder state to avoid interfering with running decoder */
  4901  	// var sDecCtrl SKP_Silk_decoder_control at bp+13728, 164
  4902  
  4903  	// var TempQ [480]int32 at bp+13892, 1920
  4904  
  4905  	if (lost_offset < 1) || (lost_offset > 2) {
  4906  		/* No useful FEC in this packet */
  4907  		*(*int16)(unsafe.Pointer(nLBRRBytes)) = int16(0)
  4908  		return
  4909  	}
  4910  
  4911  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded = 0
  4912  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).Ffs_kHz = 0  /* Force update parameters LPC_order etc */
  4913  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FlossCnt = 0 /* Avoid running bw expansion of the LPC parameters when searching for LBRR data */
  4914  	libc.Xmemset(tls, bp /* &sDec */ +11252 /* &.prevNLSF_Q15 */, 0, (uint64(16) * uint64(unsafe.Sizeof(int32(0)))))
  4915  	SKP_Silk_range_dec_init(tls, (bp /* &sDec */ /* &.sRC */), inData, nBytesIn)
  4916  
  4917  	for 1 != 0 {
  4918  		SKP_Silk_decode_parameters(tls, bp /* &sDec */, bp+13728 /* &sDecCtrl */, bp+13892 /* &TempQ[0] */, 0)
  4919  
  4920  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FsRC.Ferror != 0 {
  4921  			/* Corrupt stream */
  4922  			*(*int16)(unsafe.Pointer(nLBRRBytes)) = int16(0)
  4923  			return
  4924  		}
  4925  
  4926  		if (((((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination - 1) & lost_offset) != 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination > 0)) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft >= 0) {
  4927  			/* The wanted FEC is present in the packet */
  4928  			*(*int16)(unsafe.Pointer(nLBRRBytes)) = int16((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft)
  4929  			libc.Xmemcpy(tls, LBRRData, (inData + uintptr((nBytesIn - (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft))), (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft) * uint64(unsafe.Sizeof(uint8(0)))))
  4930  			break
  4931  		}
  4932  		if ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft > 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination == 1) {
  4933  			(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded++
  4934  		} else {
  4935  			LBRRData = uintptr(0)
  4936  			*(*int16)(unsafe.Pointer(nLBRRBytes)) = int16(0)
  4937  			break
  4938  		}
  4939  	}
  4940  }
  4941  
  4942  /* Getting type of content for a packet */
  4943  func SKP_Silk_SDK_get_TOC(tls *libc.TLS, inData uintptr, nBytesIn int32, Silk_TOC uintptr) { /* SKP_Silk_dec_API.c:222:6: */
  4944  	bp := tls.Alloc(15812)
  4945  	defer tls.Free(15812)
  4946  
  4947  	// var sDec SKP_Silk_decoder_state at bp, 13728
  4948  	// Local Decoder state to avoid interfering with running decoder */
  4949  	// var sDecCtrl SKP_Silk_decoder_control at bp+13728, 164
  4950  
  4951  	// var TempQ [480]int32 at bp+13892, 1920
  4952  
  4953  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded = 0
  4954  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).Ffs_kHz = 0 /* Force update parameters LPC_order etc */
  4955  	SKP_Silk_range_dec_init(tls, (bp /* &sDec */ /* &.sRC */), inData, nBytesIn)
  4956  
  4957  	(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Fcorrupt = 0
  4958  	for 1 != 0 {
  4959  		SKP_Silk_decode_parameters(tls, bp /* &sDec */, bp+13728 /* &sDecCtrl */, bp+13892 /* &TempQ[0] */, 0)
  4960  
  4961  		*(*int32)(unsafe.Pointer((Silk_TOC + 16 /* &.vadFlags */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded)*4)) = (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FvadFlag
  4962  		*(*int32)(unsafe.Pointer((Silk_TOC + 36 /* &.sigtypeFlags */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded)*4)) = (*SKP_Silk_decoder_control)(unsafe.Pointer(bp + 13728 /* &sDecCtrl */)).Fsigtype
  4963  
  4964  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FsRC.Ferror != 0 {
  4965  			/* Corrupt stream */
  4966  			(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Fcorrupt = 1
  4967  			break
  4968  		}
  4969  
  4970  		if ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft > 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination == 1) {
  4971  			(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded++
  4972  		} else {
  4973  			break
  4974  		}
  4975  	}
  4976  	if (((*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Fcorrupt != 0) || ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination == 1)) || ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesInPacket > 5) {
  4977  		/* Corrupt packet */
  4978  		libc.Xmemset(tls, Silk_TOC, 0, uint64(unsafe.Sizeof(SKP_Silk_TOC_struct{})))
  4979  		(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Fcorrupt = 1
  4980  	} else {
  4981  		(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).FframesInPacket = ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded + 1)
  4982  		(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Ffs_kHz = (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).Ffs_kHz
  4983  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination == 0 {
  4984  			(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).FinbandLBRR = (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination
  4985  		} else {
  4986  			(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).FinbandLBRR = ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination - 1)
  4987  		}
  4988  	}
  4989  }
  4990  
  4991  /**************************/
  4992  /* Get the version number */
  4993  /**************************/
  4994  /* Return a pointer to string specifying the version */
  4995  func SKP_Silk_SDK_get_version(tls *libc.TLS) uintptr { /* SKP_Silk_dec_API.c:275:12: */
  4996  	return uintptr(unsafe.Pointer(&version))
  4997  }
  4998  
  4999  var version = *(*[6]int8)(unsafe.Pointer(ts /* "1.0.9" */)) /* SKP_Silk_dec_API.c:277:23 */
  5000  
  5001  // 7.18.2  Limits of specified-width integer types
  5002  
  5003  // 7.18.2.1  Limits of exact-width integer types
  5004  
  5005  // 7.18.2.2  Limits of minimum-width integer types
  5006  
  5007  // 7.18.2.3  Limits of fastest minimum-width integer types
  5008  
  5009  // 7.18.2.4  Limits of integer types capable of holding
  5010  //     object pointers
  5011  
  5012  // 7.18.2.5  Limits of greatest-width integer types
  5013  
  5014  // 7.18.3  Limits of other integer types
  5015  
  5016  // wint_t is unsigned short for compatibility with MS runtime
  5017  
  5018  // 7.18.4  Macros for integer constants
  5019  
  5020  // 7.18.4.1  Macros for minimum-width integer constants
  5021  //
  5022  //     Accoding to Douglas Gwyn <gwyn@arl.mil>:
  5023  // 	"This spec was changed in ISO/IEC 9899:1999 TC1; in ISO/IEC
  5024  // 	9899:1999 as initially published, the expansion was required
  5025  // 	to be an integer constant of precisely matching type, which
  5026  // 	is impossible to accomplish for the shorter types on most
  5027  // 	platforms, because C99 provides no standard way to designate
  5028  // 	an integer constant with width less than that of type int.
  5029  // 	TC1 changed this to require just an integer constant
  5030  // 	*expression* with *promoted* type."
  5031  //
  5032  // 	The trick used here is from Clive D W Feather.
  5033  
  5034  //  The 'trick' doesn't work in C89 for long long because, without
  5035  //     suffix, (val) will be evaluated as int, not intmax_t
  5036  
  5037  // 7.18.4.2  Macros for greatest-width integer constants
  5038  
  5039  /* assertions */
  5040  
  5041  /* Limits on bitrate */
  5042  
  5043  /* Transition bitrates between modes */
  5044  
  5045  /* Integration/hysteresis threshold for lowering internal sample frequency */
  5046  /* 30000000 -> 6 sec if bitrate is 5000 bps below limit; 3 sec if bitrate is 10000 bps below limit */
  5047  
  5048  /* DTX settings                                 */
  5049  
  5050  /* Amount of concecutive no FEC packets before telling JB */
  5051  
  5052  /* Maximum delay between real packet and LBRR packet */
  5053  
  5054  /* LBRR usage defines */
  5055  
  5056  /* Frame termination indicator defines */
  5057  
  5058  /* Number of Second order Sections for SWB detection HP filter */
  5059  
  5060  /* Low complexity setting */
  5061  
  5062  /* Activate bandwidth transition filtering for mode switching */
  5063  
  5064  /* Decoder Parameters */
  5065  
  5066  /* Maximum sampling frequency, should be 16 for some embedded platforms */
  5067  
  5068  /* Signal Types used by silk */
  5069  
  5070  /* VAD Types used by silk */
  5071  
  5072  /* Number of samples per frame */
  5073  
  5074  /* Milliseconds of lookahead for pitch analysis */
  5075  
  5076  /* Length of LPC window used in find pitch */
  5077  
  5078  /* Order of LPC used in find pitch */
  5079  
  5080  /* Milliseconds of lookahead for noise shape analysis */
  5081  
  5082  /* Max length of LPC window used in noise shape analysis */
  5083  
  5084  /* Max number of bytes in payload output buffer (may contain multiple frames) */
  5085  
  5086  /* dB level of lowest gain quantization level */
  5087  /* dB level of highest gain quantization level */
  5088  /* Number of gain quantization levels */
  5089  /* Max increase in gain quantization index */
  5090  /* Max decrease in gain quantization index */
  5091  
  5092  /* Quantization offsets (multiples of 4) */
  5093  
  5094  /* Maximum numbers of iterations used to stabilize a LPC vector */
  5095  
  5096  /* Find Pred Coef defines */
  5097  
  5098  /* LTP quantization settings */
  5099  
  5100  /* Number of subframes */
  5101  
  5102  /* Flag to use harmonic noise shaping */
  5103  
  5104  /* Max LPC order of noise shaping filters */
  5105  
  5106  /* Maximum number of delayed decision states */
  5107  
  5108  /* number of subframes for excitation entropy coding */
  5109  
  5110  /* number of rate levels, for entropy coding of excitation */
  5111  
  5112  /* maximum sum of pulses per shell coding frame */
  5113  
  5114  /***********************/
  5115  /* High pass filtering */
  5116  /***********************/
  5117  
  5118  /***************************/
  5119  /* Voice activity detector */
  5120  /***************************/
  5121  
  5122  /* Sigmoid settings */
  5123  
  5124  /* smoothing for SNR measurement */
  5125  
  5126  /******************/
  5127  /* NLSF quantizer */
  5128  /******************/
  5129  
  5130  /* Based on above defines, calculate how much memory is necessary to allocate */
  5131  
  5132  /* Transition filtering for mode switching */
  5133  
  5134  /* Row based */
  5135  
  5136  /* Column based */
  5137  
  5138  /* BWE factors to apply after packet loss */
  5139  
  5140  /* Defines for CN generation */
  5141  
  5142  func SKP_Silk_detect_SWB_input(tls *libc.TLS, psSWBdetect uintptr, samplesIn uintptr, nSamplesIn int32) { /* SKP_Silk_detect_SWB_input.c:34:6: */
  5143  	bp := tls.Alloc(968)
  5144  	defer tls.Free(968)
  5145  
  5146  	var HP_8_kHz_len int32
  5147  	var i int32
  5148  	// var shift int32 at bp+964, 4
  5149  
  5150  	// var in_HP_8_kHz [480]int16 at bp, 960
  5151  
  5152  	// var energy_32 int32 at bp+960, 4
  5153  
  5154  	/* High pass filter with cutoff at 8 khz */
  5155  	HP_8_kHz_len = SKP_min_int(tls, nSamplesIn, (20 * 24))
  5156  	HP_8_kHz_len = SKP_max_int(tls, HP_8_kHz_len, 0)
  5157  
  5158  	/* Cutoff around 9 khz */
  5159  	/* A = conv(conv([8192,14613, 6868], [8192,12883, 7337]), [8192,11586, 7911]); */
  5160  	/* B = conv(conv([575, -948, 575], [575, -221, 575]), [575, 104, 575]); */
  5161  	SKP_Silk_biquad(tls, samplesIn, (uintptr(unsafe.Pointer(&SKP_Silk_SWB_detect_B_HP_Q13))), (uintptr(unsafe.Pointer(&SKP_Silk_SWB_detect_A_HP_Q13))),
  5162  		(psSWBdetect /* &.S_HP_8_kHz */), bp /* &in_HP_8_kHz[0] */, HP_8_kHz_len)
  5163  	for i = 1; i < 3; i++ {
  5164  		SKP_Silk_biquad(tls, bp /* &in_HP_8_kHz[0] */, (uintptr(unsafe.Pointer(&SKP_Silk_SWB_detect_B_HP_Q13)) + uintptr(i)*6), (uintptr(unsafe.Pointer(&SKP_Silk_SWB_detect_A_HP_Q13)) + uintptr(i)*4),
  5165  			((psSWBdetect /* &.S_HP_8_kHz */) + uintptr(i)*8), bp /* &in_HP_8_kHz[0] */, HP_8_kHz_len)
  5166  	}
  5167  
  5168  	/* Calculate energy in HP signal */
  5169  	SKP_Silk_sum_sqr_shift(tls, bp+960 /* &energy_32 */, bp+964 /* &shift */, bp /* &in_HP_8_kHz[0] */, HP_8_kHz_len)
  5170  
  5171  	/* Count concecutive samples above threshold, after adjusting threshold for number of input samples and shift */
  5172  	if *(*int32)(unsafe.Pointer(bp + 960 /* energy_32 */)) > (((int32(int16(10))) * (int32(int16(HP_8_kHz_len)))) >> (*(*int32)(unsafe.Pointer(bp + 964 /* shift */)))) {
  5173  		*(*int32)(unsafe.Pointer(psSWBdetect + 24 /* &.ConsecSmplsAboveThres */)) += (nSamplesIn)
  5174  		if (*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FConsecSmplsAboveThres > (480 * 15) {
  5175  			(*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FSWB_detected = 1
  5176  		}
  5177  	} else {
  5178  		*(*int32)(unsafe.Pointer(psSWBdetect + 24 /* &.ConsecSmplsAboveThres */)) -= (nSamplesIn)
  5179  		(*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FConsecSmplsAboveThres = func() int32 {
  5180  			if ((*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FConsecSmplsAboveThres) > (0) {
  5181  				return (*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FConsecSmplsAboveThres
  5182  			}
  5183  			return 0
  5184  		}()
  5185  	}
  5186  
  5187  	/* If sufficient speech activity and no SWB detected, we detect the signal as being WB */
  5188  	if ((*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FActiveSpeech_ms > 15000) && ((*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FSWB_detected == 0) {
  5189  		(*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FWB_detected = 1
  5190  	}
  5191  }
  5192  
  5193  /***********************************************************************
  5194  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  5195  Redistribution and use in source and binary forms, with or without
  5196  modification, (subject to the limitations in the disclaimer below)
  5197  are permitted provided that the following conditions are met:
  5198  - Redistributions of source code must retain the above copyright notice,
  5199  this list of conditions and the following disclaimer.
  5200  - Redistributions in binary form must reproduce the above copyright
  5201  notice, this list of conditions and the following disclaimer in the
  5202  documentation and/or other materials provided with the distribution.
  5203  - Neither the name of Skype Limited, nor the names of specific
  5204  contributors, may be used to endorse or promote products derived from
  5205  this software without specific prior written permission.
  5206  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  5207  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  5208  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  5209  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  5210  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  5211  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  5212  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  5213  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  5214  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  5215  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  5216  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  5217  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  5218  ***********************************************************************/
  5219  
  5220  /*******************/
  5221  /* Pitch estimator */
  5222  /*******************/
  5223  
  5224  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  5225  
  5226  /* Bandwidth expansion for whitening filter in pitch analysis */
  5227  
  5228  /* Threshold used by pitch estimator for early escape */
  5229  
  5230  /*********************/
  5231  /* Linear prediction */
  5232  /*********************/
  5233  
  5234  /* LPC analysis defines: regularization and bandwidth expansion */
  5235  
  5236  /* LTP analysis defines */
  5237  
  5238  /* LTP quantization settings */
  5239  
  5240  /***********************/
  5241  /* High pass filtering */
  5242  /***********************/
  5243  
  5244  /* Smoothing parameters for low end of pitch frequency range estimation */
  5245  
  5246  /* Min and max values for low end of pitch frequency range estimation */
  5247  
  5248  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  5249  
  5250  /***********/
  5251  /* Various */
  5252  /***********/
  5253  
  5254  /* Required speech activity for counting frame as active */
  5255  
  5256  /* Speech Activity LBRR enable threshold (needs tuning) */
  5257  
  5258  /*************************/
  5259  /* Perceptual parameters */
  5260  /*************************/
  5261  
  5262  /* reduction in coding SNR during low speech activity */
  5263  
  5264  /* factor for reducing quantization noise during voiced speech */
  5265  
  5266  /* factor for reducing quantization noise for unvoiced sparse signals */
  5267  
  5268  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  5269  
  5270  /* warping control */
  5271  
  5272  /* fraction added to first autocorrelation value */
  5273  
  5274  /* noise shaping filter chirp factor */
  5275  
  5276  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  5277  
  5278  /* gain reduction for fricatives */
  5279  
  5280  /* extra harmonic boosting (signal shaping) at low bitrates */
  5281  
  5282  /* extra harmonic boosting (signal shaping) for noisy input signals */
  5283  
  5284  /* harmonic noise shaping */
  5285  
  5286  /* extra harmonic noise shaping for high bitrates or noisy input */
  5287  
  5288  /* parameter for shaping noise towards higher frequencies */
  5289  
  5290  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  5291  
  5292  /* parameter for applying a high-pass tilt to the input signal */
  5293  
  5294  /* parameter for extra high-pass tilt to the input signal at high rates */
  5295  
  5296  /* parameter for reducing noise at the very low frequencies */
  5297  
  5298  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  5299  
  5300  /* noise floor to put a lower limit on the quantization step size */
  5301  
  5302  /* noise floor relative to active speech gain level */
  5303  
  5304  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  5305  
  5306  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  5307  
  5308  /* parameters defining the R/D tradeoff in the residual quantizer */
  5309  
  5310  /****************/
  5311  /* Encode frame */
  5312  /****************/
  5313  func SKP_Silk_encode_frame_FIX(tls *libc.TLS, psEnc uintptr, pCode uintptr, pnBytesOut uintptr, pIn uintptr) int32 { /* SKP_Silk_encode_frame_FIX.c:34:9: */
  5314  	bp := tls.Alloc(5644)
  5315  	defer tls.Free(5644)
  5316  
  5317  	// var sEncCtrl SKP_Silk_encoder_control_FIX at bp+2020, 672
  5318  
  5319  	// var nBytes int32 at bp+5640, 4
  5320  
  5321  	var ret int32 = 0
  5322  	var x_frame uintptr
  5323  	var res_pitch_frame uintptr
  5324  	// var xfw [480]int16 at bp+3652, 960
  5325  
  5326  	// var pIn_HP [480]int16 at bp+2692, 960
  5327  
  5328  	// var res_pitch [1008]int16 at bp, 2016
  5329  
  5330  	var LBRR_idx int32
  5331  	var frame_terminator int32
  5332  	// var SNR_dB_Q7 int32 at bp+2016, 4
  5333  
  5334  	var FrameTermination_CDF uintptr
  5335  	/* Low bitrate redundancy parameters */
  5336  	// var LBRRpayload [1024]uint8 at bp+4612, 1024
  5337  
  5338  	// var nBytesLBRR int16 at bp+5636, 2
  5339  
  5340  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp + 2020 /* &sEncCtrl */)).FsCmn.FSeed = (libc.PostIncInt32(&(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FframeCounter, 1) & 3)
  5341  	/**************************************************************/
  5342  	/* Setup Input Pointers, and insert frame in input buffer    */
  5343  	/*************************************************************/
  5344  	x_frame = ((psEnc + 20784 /* &.x_buf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2)  /* start of frame to encode */
  5345  	res_pitch_frame = (bp /* &res_pitch[0] */ + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2) /* start of pitch LPC residual frame */
  5346  
  5347  	/****************************/
  5348  	/* Voice Activity Detection */
  5349  	/****************************/
  5350  	ret = SKP_Silk_VAD_GetSA_Q8(tls, (psEnc /* &.sCmn */ + 15032 /* &.sVAD */), (psEnc + 22968 /* &.speech_activity_Q8 */), bp+2016, /* &SNR_dB_Q7 */
  5351  		bp+2020 /* &sEncCtrl */ +620 /* &.input_quality_bands_Q15 */, (bp + 2020 /* &sEncCtrl */ + 636 /* &.input_tilt_Q15 */),
  5352  		pIn, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)
  5353  
  5354  	/*******************************************/
  5355  	/* High-pass filtering of the input signal */
  5356  	/*******************************************/
  5357  	/* Variable high-pass filter */
  5358  	SKP_Silk_HP_variable_cutoff_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp+2692 /* &pIn_HP[0] */, pIn)
  5359  
  5360  	/* Ensure smooth bandwidth transitions */
  5361  	SKP_Silk_LP_variable_cutoff(tls, (psEnc /* &.sCmn */ + 15016 /* &.sLP */), (x_frame + uintptr((5*(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))*2), bp+2692 /* &pIn_HP[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)
  5362  
  5363  	/*****************************************/
  5364  	/* Find pitch lags, initial LPC analysis */
  5365  	/*****************************************/
  5366  	SKP_Silk_find_pitch_lags_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp /* &res_pitch[0] */, x_frame)
  5367  
  5368  	/************************/
  5369  	/* Noise shape analysis */
  5370  	/************************/
  5371  	SKP_Silk_noise_shape_analysis_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, res_pitch_frame, x_frame)
  5372  
  5373  	/*****************************************/
  5374  	/* Prefiltering for noise shaper         */
  5375  	/*****************************************/
  5376  	SKP_Silk_prefilter_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp+3652 /* &xfw[0] */, x_frame)
  5377  
  5378  	/***************************************************/
  5379  	/* Find linear prediction coefficients (LPC + LTP) */
  5380  	/***************************************************/
  5381  	SKP_Silk_find_pred_coefs_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp /* &res_pitch[0] */)
  5382  
  5383  	/****************************************/
  5384  	/* Process gains                        */
  5385  	/****************************************/
  5386  	SKP_Silk_process_gains_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */)
  5387  
  5388  	/****************************************/
  5389  	/* Low Bitrate Redundant Encoding       */
  5390  	/****************************************/
  5391  	*(*int16)(unsafe.Pointer(bp + 5636 /* nBytesLBRR */)) = int16(1024)
  5392  	SKP_Silk_LBRR_encode_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp+4612 /* &LBRRpayload[0] */, bp+5636 /* &nBytesLBRR */, bp+3652 /* &xfw[0] */)
  5393  
  5394  	/*****************************************/
  5395  	/* Noise shaping quantization            */
  5396  	/*****************************************/
  5397  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnStatesDelayedDecision > 1) || ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0) {
  5398  		SKP_Silk_NSQ_del_dec(tls, (psEnc /* &.sCmn */), (bp + 2020 /* &sEncCtrl */ /* &.sCmn */), (psEnc /* &.sCmn */ + 2088 /* &.sNSQ */), bp+3652, /* &xfw[0] */
  5399  			psEnc /* &.sCmn */ +18608 /* &.q */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FsCmn.FNLSFInterpCoef_Q2,
  5400  			(bp + 2020 /* &sEncCtrl */ + 144 /* &.PredCoef_Q12 */), bp+2020 /* &sEncCtrl */ +208 /* &.LTPCoef_Q14 */, bp+2020 /* &sEncCtrl */ +380 /* &.AR2_Q13 */, bp+2020 /* &sEncCtrl */ +572, /* &.HarmShapeGain_Q14 */
  5401  			bp+2020 /* &sEncCtrl */ +556 /* &.Tilt_Q14 */, bp+2020 /* &sEncCtrl */ +508 /* &.LF_shp_Q14 */, bp+2020 /* &sEncCtrl */ +128 /* &.Gains_Q16 */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FLambda_Q10,
  5402  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FLTP_scale_Q14)
  5403  	} else {
  5404  		SKP_Silk_NSQ(tls, (psEnc /* &.sCmn */), (bp + 2020 /* &sEncCtrl */ /* &.sCmn */), (psEnc /* &.sCmn */ + 2088 /* &.sNSQ */), bp+3652, /* &xfw[0] */
  5405  			psEnc /* &.sCmn */ +18608 /* &.q */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FsCmn.FNLSFInterpCoef_Q2,
  5406  			(bp + 2020 /* &sEncCtrl */ + 144 /* &.PredCoef_Q12 */), bp+2020 /* &sEncCtrl */ +208 /* &.LTPCoef_Q14 */, bp+2020 /* &sEncCtrl */ +380 /* &.AR2_Q13 */, bp+2020 /* &sEncCtrl */ +572, /* &.HarmShapeGain_Q14 */
  5407  			bp+2020 /* &sEncCtrl */ +556 /* &.Tilt_Q14 */, bp+2020 /* &sEncCtrl */ +508 /* &.LF_shp_Q14 */, bp+2020 /* &sEncCtrl */ +128 /* &.Gains_Q16 */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FLambda_Q10,
  5408  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FLTP_scale_Q14)
  5409  	}
  5410  
  5411  	/**************************************************/
  5412  	/* Convert speech activity into VAD and DTX flags */
  5413  	/**************************************************/
  5414  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 < SKP_FIX_CONST(tls, 0.1, 8) {
  5415  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FvadFlag = 0
  5416  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter++
  5417  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter > 5 {
  5418  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinDTX = 1
  5419  		}
  5420  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter > (20 + 5) {
  5421  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter = 5
  5422  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinDTX = 0
  5423  		}
  5424  	} else {
  5425  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter = 0
  5426  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinDTX = 0
  5427  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FvadFlag = 1
  5428  	}
  5429  
  5430  	/****************************************/
  5431  	/* Initialize range coder               */
  5432  	/****************************************/
  5433  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf == 0 {
  5434  		SKP_Silk_range_enc_init(tls, (psEnc /* &.sCmn */ /* &.sRC */))
  5435  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf = 0
  5436  	}
  5437  
  5438  	/****************************************/
  5439  	/* Encode Parameters                    */
  5440  	/****************************************/
  5441  	SKP_Silk_encode_parameters(tls, (psEnc /* &.sCmn */), (bp + 2020 /* &sEncCtrl */ /* &.sCmn */), (psEnc /* &.sCmn */ /* &.sRC */), psEnc /* &.sCmn */ +18608 /* &.q */)
  5442  	FrameTermination_CDF = uintptr(unsafe.Pointer(&SKP_Silk_FrameTermination_CDF))
  5443  
  5444  	/****************************************/
  5445  	/* Update Buffers and State             */
  5446  	/****************************************/
  5447  	/* Update input buffer */
  5448  	libc.Xmemmove(tls, psEnc+20784 /* &.x_buf */, ((psEnc + 20784 /* &.x_buf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2), ((uint64((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length + (5 * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))) * uint64(unsafe.Sizeof(int16(0)))))
  5449  
  5450  	/* Parameters needed for next frame */
  5451  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_sigtype = (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp + 2020 /* &sEncCtrl */)).FsCmn.Fsigtype
  5452  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FprevLag = *(*int32)(unsafe.Pointer((bp + 2020 /* &sEncCtrl */ /* &.sCmn */ + 108 /* &.pitchL */) + 3*4))
  5453  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset = 0
  5454  
  5455  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsRC.Ferror != 0 {
  5456  		/* Encoder returned error: clear payload buffer */
  5457  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf = 0
  5458  	} else {
  5459  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf++
  5460  	}
  5461  
  5462  	/****************************************/
  5463  	/* Finalize payload and copy to output  */
  5464  	/****************************************/
  5465  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf * 20) >= (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms {
  5466  
  5467  		LBRR_idx = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx + 1) & 1)
  5468  
  5469  		/* Check if FEC information should be added */
  5470  		frame_terminator = 0
  5471  		if (*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ +16264 /* &.LBRR_buffer */)+uintptr(LBRR_idx)*1032)).Fusage == 1 {
  5472  			frame_terminator = 2
  5473  		}
  5474  		if (*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ +16264 /* &.LBRR_buffer */)+uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx)*1032)).Fusage == 2 {
  5475  			frame_terminator = 3
  5476  			LBRR_idx = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx
  5477  		}
  5478  
  5479  		/* Add the frame termination info to stream */
  5480  		SKP_Silk_range_encoder(tls, (psEnc /* &.sCmn */ /* &.sRC */), frame_terminator, FrameTermination_CDF)
  5481  
  5482  		/* Payload length so far */
  5483  		SKP_Silk_range_coder_get_length(tls, (psEnc /* &.sCmn */ /* &.sRC */), bp+5640 /* &nBytes */)
  5484  
  5485  		/* Check that there is enough space in external output buffer, and move data */
  5486  		if int32(*(*int16)(unsafe.Pointer(pnBytesOut))) >= *(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) {
  5487  			SKP_Silk_range_enc_wrap_up(tls, (psEnc /* &.sCmn */ /* &.sRC */))
  5488  			libc.Xmemcpy(tls, pCode, psEnc /* &.sCmn */ /* &.sRC */ +20 /* &.buffer */, (uint64(*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */))) * uint64(unsafe.Sizeof(uint8(0)))))
  5489  
  5490  			if (frame_terminator > 1) && (int32(*(*int16)(unsafe.Pointer(pnBytesOut))) >= (*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) + (*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ +16264 /* &.LBRR_buffer */)+uintptr(LBRR_idx)*1032)).FnBytes)) {
  5491  				/* Get old packet and add to payload. */
  5492  				libc.Xmemcpy(tls, (pCode + uintptr(*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)))), (psEnc /* &.sCmn */ +16264 /* &.LBRR_buffer */)+uintptr(LBRR_idx)*1032 /* &.payload */, (uint64((*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ +16264 /* &.LBRR_buffer */)+uintptr(LBRR_idx)*1032)).FnBytes) * uint64(unsafe.Sizeof(uint8(0)))))
  5493  				*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) += (*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ + 16264 /* &.LBRR_buffer */) + uintptr(LBRR_idx)*1032)).FnBytes
  5494  			}
  5495  
  5496  			*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)))
  5497  
  5498  			/* Update FEC buffer */
  5499  			libc.Xmemcpy(tls, (psEnc /* &.sCmn */ +16264 /* &.LBRR_buffer */)+uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx)*1032 /* &.payload */, bp+4612 /* &LBRRpayload[0] */, (uint64(*(*int16)(unsafe.Pointer(bp + 5636 /* nBytesLBRR */))) * uint64(unsafe.Sizeof(uint8(0)))))
  5500  			(*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ + 16264 /* &.LBRR_buffer */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx)*1032)).FnBytes = int32(*(*int16)(unsafe.Pointer(bp + 5636 /* nBytesLBRR */)))
  5501  			/* The line below describes how FEC should be used */
  5502  			(*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ + 16264 /* &.LBRR_buffer */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx)*1032)).Fusage = (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp + 2020 /* &sEncCtrl */)).FsCmn.FLBRR_usage
  5503  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx + 1) & 1)
  5504  
  5505  		} else {
  5506  			/* Not enough space: Payload will be discarded */
  5507  			*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(0)
  5508  			*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) = 0
  5509  			ret = -4
  5510  		}
  5511  
  5512  		/* Reset the number of frames in payload buffer */
  5513  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf = 0
  5514  	} else {
  5515  		/* No payload this time */
  5516  		*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(0)
  5517  
  5518  		/* Encode that more frames follows */
  5519  		frame_terminator = 1
  5520  		SKP_Silk_range_encoder(tls, (psEnc /* &.sCmn */ /* &.sRC */), frame_terminator, FrameTermination_CDF)
  5521  
  5522  		/* Payload length so far */
  5523  		SKP_Silk_range_coder_get_length(tls, (psEnc /* &.sCmn */ /* &.sRC */), bp+5640 /* &nBytes */)
  5524  
  5525  	}
  5526  
  5527  	/* Check for arithmetic coder errors */
  5528  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsRC.Ferror != 0 {
  5529  		ret = -9
  5530  	}
  5531  
  5532  	/* Simulate number of ms buffered in channel because of exceeding TargetRate */
  5533  
  5534  	*(*int32)(unsafe.Pointer(psEnc + 22964 /* &.BufferedInChannel_ms */)) += (((8 * 1000) * (*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf)) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps))
  5535  	*(*int32)(unsafe.Pointer(psEnc + 22964 /* &.BufferedInChannel_ms */)) -= (20)
  5536  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms = func() int32 {
  5537  		if (0) > (100) {
  5538  			return func() int32 {
  5539  				if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) > (0) {
  5540  					return 0
  5541  				}
  5542  				return func() int32 {
  5543  					if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) < (100) {
  5544  						return 100
  5545  					}
  5546  					return (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms
  5547  				}()
  5548  			}()
  5549  		}
  5550  		return func() int32 {
  5551  			if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) > (100) {
  5552  				return 100
  5553  			}
  5554  			return func() int32 {
  5555  				if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) < (0) {
  5556  					return 0
  5557  				}
  5558  				return (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms
  5559  			}()
  5560  		}()
  5561  	}()
  5562  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf = *(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */))
  5563  
  5564  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 > SKP_FIX_CONST(tls, 0.7, 8) {
  5565  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FActiveSpeech_ms = func() int32 {
  5566  			if ((uint32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FActiveSpeech_ms) + (20))) & 0x80000000) != 0 {
  5567  				return 0x7FFFFFFF
  5568  			}
  5569  			return (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FActiveSpeech_ms) + (20))
  5570  		}()
  5571  	}
  5572  
  5573  	return ret
  5574  }
  5575  
  5576  /* Low BitRate Redundancy encoding functionality. Reuse all parameters but encode residual with lower bitrate */
  5577  func SKP_Silk_LBRR_encode_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, pCode uintptr, pnBytesOut uintptr, xfw uintptr) { /* SKP_Silk_encode_frame_FIX.c:279:6: */
  5578  	bp := tls.Alloc(36)
  5579  	defer tls.Free(36)
  5580  
  5581  	// var TempGainsIndices [4]int32 at bp, 16
  5582  
  5583  	var frame_terminator int32
  5584  	// var nBytes int32 at bp+32, 4
  5585  
  5586  	var nFramesInPayloadBuf int32
  5587  	// var TempGains_Q16 [4]int32 at bp+16, 16
  5588  
  5589  	var typeOffset int32
  5590  	var LTP_scaleIndex int32
  5591  	var Rate_only_parameters int32 = 0
  5592  	/*******************************************/
  5593  	/* Control use of inband LBRR              */
  5594  	/*******************************************/
  5595  	SKP_Silk_LBRR_ctrl_FIX(tls, psEnc, (psEncCtrl /* &.sCmn */))
  5596  
  5597  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled != 0 {
  5598  		/* Save original gains */
  5599  		libc.Xmemcpy(tls, bp /* &TempGainsIndices[0] */, psEncCtrl /* &.sCmn */ +72 /* &.GainsIndices */, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
  5600  		libc.Xmemcpy(tls, bp+16 /* &TempGains_Q16[0] */, psEncCtrl+128 /* &.Gains_Q16 */, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
  5601  
  5602  		typeOffset = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FtypeOffsetPrev // Temp save as cannot be overwritten
  5603  		LTP_scaleIndex = (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex
  5604  
  5605  		/* Set max rate where quant signal is encoded */
  5606  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 8 {
  5607  			Rate_only_parameters = 13500
  5608  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 12 {
  5609  			Rate_only_parameters = 15500
  5610  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
  5611  			Rate_only_parameters = 17500
  5612  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 24 {
  5613  			Rate_only_parameters = 19500
  5614  		} else {
  5615  
  5616  		}
  5617  
  5618  		if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FComplexity > 0) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps > Rate_only_parameters) {
  5619  			if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf == 0 {
  5620  				/* First frame in packet; copy everything */
  5621  				libc.Xmemcpy(tls, (psEnc /* &.sCmn */ + 8548 /* &.sNSQ_LBRR */), (psEnc /* &.sCmn */ + 2088 /* &.sNSQ */), uint64(unsafe.Sizeof(SKP_Silk_nsq_state{})))
  5622  
  5623  				(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRRprevLastGainIndex = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsShape.FLastGainIndex
  5624  				/* Increase Gains to get target LBRR rate */
  5625  				*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */))) = (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */))) + (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_GainIncreases)
  5626  				*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */))) = func() int32 {
  5627  					if (0) > (64 - 1) {
  5628  						return func() int32 {
  5629  							if (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))) > (0) {
  5630  								return 0
  5631  							}
  5632  							return func() int32 {
  5633  								if (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))) < (64 - 1) {
  5634  									return (64 - 1)
  5635  								}
  5636  								return *(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))
  5637  							}()
  5638  						}()
  5639  					}
  5640  					return func() int32 {
  5641  						if (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))) > (64 - 1) {
  5642  							return (64 - 1)
  5643  						}
  5644  						return func() int32 {
  5645  							if (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))) < (0) {
  5646  								return 0
  5647  							}
  5648  							return *(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))
  5649  						}()
  5650  					}()
  5651  				}()
  5652  			}
  5653  			/* Decode to get gains in sync with decoder         */
  5654  			/* Overwrite unquantized gains with quantized gains */
  5655  			SKP_Silk_gains_dequant(tls, psEncCtrl+128 /* &.Gains_Q16 */, psEncCtrl /* &.sCmn */ +72, /* &.GainsIndices */
  5656  				(psEnc /* &.sCmn */ + 15144 /* &.LBRRprevLastGainIndex */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf)
  5657  
  5658  			/*****************************************/
  5659  			/* Noise shaping quantization            */
  5660  			/*****************************************/
  5661  			if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnStatesDelayedDecision > 1) || ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0) {
  5662  				SKP_Silk_NSQ_del_dec(tls, (psEnc /* &.sCmn */), (psEncCtrl /* &.sCmn */), (psEnc /* &.sCmn */ + 8548 /* &.sNSQ_LBRR */), xfw, psEnc /* &.sCmn */ +19088, /* &.q_LBRR */
  5663  					(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2, (psEncCtrl + 144 /* &.PredCoef_Q12 */), psEncCtrl+208, /* &.LTPCoef_Q14 */
  5664  					psEncCtrl+380 /* &.AR2_Q13 */, psEncCtrl+572 /* &.HarmShapeGain_Q14 */, psEncCtrl+556 /* &.Tilt_Q14 */, psEncCtrl+508, /* &.LF_shp_Q14 */
  5665  					psEncCtrl+128 /* &.Gains_Q16 */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLambda_Q10, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTP_scale_Q14)
  5666  			} else {
  5667  				SKP_Silk_NSQ(tls, (psEnc /* &.sCmn */), (psEncCtrl /* &.sCmn */), (psEnc /* &.sCmn */ + 8548 /* &.sNSQ_LBRR */), xfw, psEnc /* &.sCmn */ +19088, /* &.q_LBRR */
  5668  					(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2, (psEncCtrl + 144 /* &.PredCoef_Q12 */), psEncCtrl+208, /* &.LTPCoef_Q14 */
  5669  					psEncCtrl+380 /* &.AR2_Q13 */, psEncCtrl+572 /* &.HarmShapeGain_Q14 */, psEncCtrl+556 /* &.Tilt_Q14 */, psEncCtrl+508, /* &.LF_shp_Q14 */
  5670  					psEncCtrl+128 /* &.Gains_Q16 */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLambda_Q10, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTP_scale_Q14)
  5671  			}
  5672  		} else {
  5673  			libc.Xmemset(tls, psEnc /* &.sCmn */ +19088 /* &.q_LBRR */, 0, (uint64((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length) * uint64(unsafe.Sizeof(int8(0)))))
  5674  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = 0
  5675  		}
  5676  		/****************************************/
  5677  		/* Initialize arithmetic coder          */
  5678  		/****************************************/
  5679  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf == 0 {
  5680  			SKP_Silk_range_enc_init(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */))
  5681  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf = 0
  5682  		}
  5683  
  5684  		/****************************************/
  5685  		/* Encode Parameters                    */
  5686  		/****************************************/
  5687  		SKP_Silk_encode_parameters(tls, (psEnc /* &.sCmn */), (psEncCtrl /* &.sCmn */),
  5688  			(psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */), psEnc /* &.sCmn */ +19088 /* &.q_LBRR */)
  5689  
  5690  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsRC_LBRR.Ferror != 0 {
  5691  			/* Encoder returned error: clear payload buffer */
  5692  			nFramesInPayloadBuf = 0
  5693  		} else {
  5694  			nFramesInPayloadBuf = ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf + 1)
  5695  		}
  5696  
  5697  		/****************************************/
  5698  		/* Finalize payload and copy to output  */
  5699  		/****************************************/
  5700  		if ((int32(int16(nFramesInPayloadBuf))) * (int32(int16(20)))) >= (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms {
  5701  
  5702  			/* Check if FEC information should be added */
  5703  			frame_terminator = 0
  5704  
  5705  			/* Add the frame termination info to stream */
  5706  			SKP_Silk_range_encoder(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */), frame_terminator, uintptr(unsafe.Pointer(&SKP_Silk_FrameTermination_CDF)))
  5707  
  5708  			/* Payload length so far */
  5709  			SKP_Silk_range_coder_get_length(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */), bp+32 /* &nBytes */)
  5710  
  5711  			/* Check that there is enough space in external output buffer and move data */
  5712  			if int32(*(*int16)(unsafe.Pointer(pnBytesOut))) >= *(*int32)(unsafe.Pointer(bp + 32 /* nBytes */)) {
  5713  				SKP_Silk_range_enc_wrap_up(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */))
  5714  				libc.Xmemcpy(tls, pCode, psEnc /* &.sCmn */ +1044 /* &.sRC_LBRR */ +20 /* &.buffer */, (uint64(*(*int32)(unsafe.Pointer(bp + 32 /* nBytes */))) * uint64(unsafe.Sizeof(uint8(0)))))
  5715  
  5716  				*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(*(*int32)(unsafe.Pointer(bp + 32 /* nBytes */)))
  5717  			} else {
  5718  				/* Not enough space: payload will be discarded */
  5719  				*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(0)
  5720  
  5721  			}
  5722  		} else {
  5723  			/* No payload this time */
  5724  			*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(0)
  5725  
  5726  			/* Encode that more frames follows */
  5727  			frame_terminator = 1
  5728  			SKP_Silk_range_encoder(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */), frame_terminator, uintptr(unsafe.Pointer(&SKP_Silk_FrameTermination_CDF)))
  5729  		}
  5730  
  5731  		/* Restore original Gains */
  5732  		libc.Xmemcpy(tls, psEncCtrl /* &.sCmn */ +72 /* &.GainsIndices */, bp /* &TempGainsIndices[0] */, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
  5733  		libc.Xmemcpy(tls, psEncCtrl+128 /* &.Gains_Q16 */, bp+16 /* &TempGains_Q16[0] */, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
  5734  
  5735  		/* Restore LTP scale index and typeoffset */
  5736  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = LTP_scaleIndex
  5737  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FtypeOffsetPrev = typeOffset
  5738  	}
  5739  }
  5740  
  5741  /*******************************************/
  5742  /* Encode parameters to create the payload */
  5743  /*******************************************/
  5744  func SKP_Silk_encode_parameters(tls *libc.TLS, psEncC uintptr, psEncCtrlC uintptr, psRC uintptr, q uintptr) { /* SKP_Silk_encode_parameters.c:33:6: */
  5745  	var i int32
  5746  	var k int32
  5747  	var typeOffset int32
  5748  	var psNLSF_CB uintptr
  5749  
  5750  	/************************/
  5751  	/* Encode sampling rate */
  5752  	/************************/
  5753  	/* only done for first frame in packet */
  5754  	if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnFramesInPayloadBuf == 0 {
  5755  		/* get sampling rate index */
  5756  		for i = 0; i < 3; i++ {
  5757  			if SKP_Silk_SamplingRates_table[i] == (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz {
  5758  				break
  5759  			}
  5760  		}
  5761  		SKP_Silk_range_encoder(tls, psRC, i, uintptr(unsafe.Pointer(&SKP_Silk_SamplingRates_CDF)))
  5762  	}
  5763  
  5764  	/*******************************************/
  5765  	/* Encode signal type and quantizer offset */
  5766  	/*******************************************/
  5767  	typeOffset = ((2 * (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype) + (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FQuantOffsetType)
  5768  	if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnFramesInPayloadBuf == 0 {
  5769  		/* first frame in packet: independent coding */
  5770  		SKP_Silk_range_encoder(tls, psRC, typeOffset, uintptr(unsafe.Pointer(&SKP_Silk_type_offset_CDF)))
  5771  	} else {
  5772  		/* condidtional coding */
  5773  		SKP_Silk_range_encoder(tls, psRC, typeOffset, (uintptr(unsafe.Pointer(&SKP_Silk_type_offset_joint_CDF)) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FtypeOffsetPrev)*10))
  5774  	}
  5775  	(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FtypeOffsetPrev = typeOffset
  5776  
  5777  	/****************/
  5778  	/* Encode gains */
  5779  	/****************/
  5780  	/* first subframe */
  5781  	if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnFramesInPayloadBuf == 0 {
  5782  		/* first frame in packet: independent coding */
  5783  		SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer((psEncCtrlC + 72 /* &.GainsIndices */))), (uintptr(unsafe.Pointer(&SKP_Silk_gain_CDF)) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype)*130))
  5784  	} else {
  5785  		/* condidtional coding */
  5786  		SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer((psEncCtrlC + 72 /* &.GainsIndices */))), uintptr(unsafe.Pointer(&SKP_Silk_delta_gain_CDF)))
  5787  	}
  5788  
  5789  	/* remaining subframes */
  5790  	for i = 1; i < 4; i++ {
  5791  		SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer((psEncCtrlC + 72 /* &.GainsIndices */) + uintptr(i)*4)), uintptr(unsafe.Pointer(&SKP_Silk_delta_gain_CDF)))
  5792  	}
  5793  
  5794  	/****************/
  5795  	/* Encode NLSFs */
  5796  	/****************/
  5797  	/* Range encoding of the NLSF path */
  5798  	psNLSF_CB = *(*uintptr)(unsafe.Pointer((psEncC + 16248 /* &.psNLSF_CB */) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype)*8))
  5799  	SKP_Silk_range_encoder_multi(tls, psRC, psEncCtrlC+28 /* &.NLSFIndices */, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FStartPtr, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)
  5800  
  5801  	/* Encode NLSF interpolation factor */
  5802  
  5803  	SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FNLSFInterpCoef_Q2, uintptr(unsafe.Pointer(&SKP_Silk_NLSF_interpolation_factor_CDF)))
  5804  
  5805  	if (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype == 0 {
  5806  		/*********************/
  5807  		/* Encode pitch lags */
  5808  		/*********************/
  5809  
  5810  		/* lag index */
  5811  		if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 8 {
  5812  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FlagIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_NB_CDF)))
  5813  		} else if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 12 {
  5814  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FlagIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_MB_CDF)))
  5815  		} else if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 16 {
  5816  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FlagIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_WB_CDF)))
  5817  		} else {
  5818  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FlagIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_SWB_CDF)))
  5819  		}
  5820  
  5821  		/* countour index */
  5822  		if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 8 {
  5823  			/* Less codevectors used in 8 khz mode */
  5824  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FcontourIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_contour_NB_CDF)))
  5825  		} else {
  5826  			/* Joint for 12, 16, 24 khz */
  5827  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FcontourIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_contour_CDF)))
  5828  		}
  5829  
  5830  		/********************/
  5831  		/* Encode LTP gains */
  5832  		/********************/
  5833  
  5834  		/* PERIndex value */
  5835  		SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FPERIndex, uintptr(unsafe.Pointer(&SKP_Silk_LTP_per_index_CDF)))
  5836  
  5837  		/* Codebook Indices */
  5838  		for k = 0; k < 4; k++ {
  5839  			SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer((psEncCtrlC + 12 /* &.LTPIndex */) + uintptr(k)*4)), SKP_Silk_LTP_gain_CDF_ptrs[(*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FPERIndex])
  5840  		}
  5841  
  5842  		/**********************/
  5843  		/* Encode LTP scaling */
  5844  		/**********************/
  5845  		SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FLTP_scaleIndex, uintptr(unsafe.Pointer(&SKP_Silk_LTPscale_CDF)))
  5846  	}
  5847  
  5848  	/***************/
  5849  	/* Encode seed */
  5850  	/***************/
  5851  	SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FSeed, uintptr(unsafe.Pointer(&SKP_Silk_Seed_CDF)))
  5852  
  5853  	/*********************************************/
  5854  	/* Encode quantization indices of excitation */
  5855  	/*********************************************/
  5856  	SKP_Silk_encode_pulses(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FQuantOffsetType, q, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)
  5857  
  5858  	/*********************************************/
  5859  	/* Encode VAD flag                           */
  5860  	/*********************************************/
  5861  	SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FvadFlag, uintptr(unsafe.Pointer(&SKP_Silk_vadflag_CDF)))
  5862  }
  5863  
  5864  /*********************************************/
  5865  /* Encode quantization indices of excitation */
  5866  /*********************************************/
  5867  
  5868  func combine_and_check(tls *libc.TLS, pulses_comb uintptr, pulses_in uintptr, max_pulses int32, len int32) int32 { /* SKP_Silk_encode_pulses.c:34:20: */
  5869  	var k int32
  5870  	var sum int32
  5871  
  5872  	for k = 0; k < len; k++ {
  5873  		sum = (*(*int32)(unsafe.Pointer(pulses_in + uintptr((2*k))*4)) + *(*int32)(unsafe.Pointer(pulses_in + uintptr(((2*k)+1))*4)))
  5874  		if sum > max_pulses {
  5875  			return 1
  5876  		}
  5877  		*(*int32)(unsafe.Pointer(pulses_comb + uintptr(k)*4)) = sum
  5878  	}
  5879  
  5880  	return 0
  5881  }
  5882  
  5883  /* Encode quantization indices of excitation */
  5884  func SKP_Silk_encode_pulses(tls *libc.TLS, psRC uintptr, sigtype int32, QuantOffsetType int32, q uintptr, frame_length int32) { /* SKP_Silk_encode_pulses.c:55:6: */
  5885  	bp := tls.Alloc(2192)
  5886  	defer tls.Free(2192)
  5887  
  5888  	var i int32
  5889  	var k int32
  5890  	var j int32
  5891  	var iter int32
  5892  	var bit int32
  5893  	var nLS int32
  5894  	var scale_down int32
  5895  	var RateLevelIndex int32 = 0
  5896  	var abs_q int32
  5897  	var minSumBits_Q6 int32
  5898  	var sumBits_Q6 int32
  5899  	// var abs_pulses [480]int32 at bp+32, 1920
  5900  
  5901  	// var sum_pulses [30]int32 at bp+2072, 120
  5902  
  5903  	// var nRshifts [30]int32 at bp+1952, 120
  5904  
  5905  	// var pulses_comb [8]int32 at bp, 32
  5906  
  5907  	var abs_pulses_ptr uintptr
  5908  	var pulses_ptr uintptr
  5909  	var cdf_ptr uintptr
  5910  	var nBits_ptr uintptr
  5911  
  5912  	libc.Xmemset(tls, bp /* &pulses_comb[0] */, 0, (uint64(8) * uint64(unsafe.Sizeof(int32(0))))) // Fixing Valgrind reported problem
  5913  
  5914  	/****************************/
  5915  	/* Prepare for shell coding */
  5916  	/****************************/
  5917  	/* Calculate number of shell blocks */
  5918  	iter = (frame_length / 16)
  5919  
  5920  	/* Take the absolute value of the pulses */
  5921  	for i = 0; i < frame_length; i = i + (4) {
  5922  		*(*int32)(unsafe.Pointer(bp + 32 /* &abs_pulses[0] */ + uintptr((i+0))*4)) = func() int32 {
  5923  			if (int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 0)))))) > 0 {
  5924  				return int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 0)))))
  5925  			}
  5926  			return -int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 0)))))
  5927  		}()
  5928  		*(*int32)(unsafe.Pointer(bp + 32 /* &abs_pulses[0] */ + uintptr((i+1))*4)) = func() int32 {
  5929  			if (int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 1)))))) > 0 {
  5930  				return int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 1)))))
  5931  			}
  5932  			return -int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 1)))))
  5933  		}()
  5934  		*(*int32)(unsafe.Pointer(bp + 32 /* &abs_pulses[0] */ + uintptr((i+2))*4)) = func() int32 {
  5935  			if (int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 2)))))) > 0 {
  5936  				return int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 2)))))
  5937  			}
  5938  			return -int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 2)))))
  5939  		}()
  5940  		*(*int32)(unsafe.Pointer(bp + 32 /* &abs_pulses[0] */ + uintptr((i+3))*4)) = func() int32 {
  5941  			if (int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 3)))))) > 0 {
  5942  				return int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 3)))))
  5943  			}
  5944  			return -int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 3)))))
  5945  		}()
  5946  	}
  5947  
  5948  	/* Calc sum pulses per shell code frame */
  5949  	abs_pulses_ptr = bp + 32 /* &abs_pulses[0] */
  5950  	for i = 0; i < iter; i++ {
  5951  		*(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) = 0
  5952  
  5953  		for 1 != 0 {
  5954  			/* 1+1 -> 2 */
  5955  			scale_down = combine_and_check(tls, bp /* &pulses_comb[0] */, abs_pulses_ptr, SKP_Silk_max_pulses_table[0], 8)
  5956  
  5957  			/* 2+2 -> 4 */
  5958  			scale_down = scale_down + (combine_and_check(tls, bp /* &pulses_comb[0] */, bp /* &pulses_comb[0] */, SKP_Silk_max_pulses_table[1], 4))
  5959  
  5960  			/* 4+4 -> 8 */
  5961  			scale_down = scale_down + (combine_and_check(tls, bp /* &pulses_comb[0] */, bp /* &pulses_comb[0] */, SKP_Silk_max_pulses_table[2], 2))
  5962  
  5963  			/* 8+8 -> 16 */
  5964  			*(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)) = (*(*int32)(unsafe.Pointer(bp /* &pulses_comb[0] */)) + *(*int32)(unsafe.Pointer(bp /* &pulses_comb[0] */ + 1*4)))
  5965  			if *(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)) > SKP_Silk_max_pulses_table[3] {
  5966  				scale_down++
  5967  			}
  5968  
  5969  			if scale_down != 0 {
  5970  				/* We need to down scale the quantization signal */
  5971  				*(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4))++
  5972  				for k = 0; k < 16; k++ {
  5973  					*(*int32)(unsafe.Pointer(abs_pulses_ptr + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(abs_pulses_ptr + uintptr(k)*4))) >> (1))
  5974  				}
  5975  			} else {
  5976  				/* Jump out of while(1) loop and go to next shell coding frame */
  5977  				break
  5978  			}
  5979  		}
  5980  		abs_pulses_ptr += 4 * (uintptr(16))
  5981  	}
  5982  
  5983  	/**************/
  5984  	/* Rate level */
  5985  	/**************/
  5986  	/* find rate level that leads to fewest bits for coding of pulses per block info */
  5987  	minSumBits_Q6 = 0x7FFFFFFF
  5988  	for k = 0; k < (10 - 1); k++ {
  5989  		nBits_ptr = (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_BITS_Q6)) + uintptr(k)*40)
  5990  		sumBits_Q6 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_rate_levels_BITS_Q6)) + uintptr(sigtype)*18) + uintptr(k)*2)))
  5991  		for i = 0; i < iter; i++ {
  5992  			if *(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) > 0 {
  5993  				sumBits_Q6 = sumBits_Q6 + (int32(*(*int16)(unsafe.Pointer(nBits_ptr + 19*2))))
  5994  			} else {
  5995  				sumBits_Q6 = sumBits_Q6 + (int32(*(*int16)(unsafe.Pointer(nBits_ptr + uintptr(*(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)))*2))))
  5996  			}
  5997  		}
  5998  		if sumBits_Q6 < minSumBits_Q6 {
  5999  			minSumBits_Q6 = sumBits_Q6
  6000  			RateLevelIndex = k
  6001  		}
  6002  	}
  6003  	SKP_Silk_range_encoder(tls, psRC, RateLevelIndex, (uintptr(unsafe.Pointer(&SKP_Silk_rate_levels_CDF)) + uintptr(sigtype)*20))
  6004  
  6005  	/***************************************************/
  6006  	/* Sum-Weighted-Pulses Encoding                    */
  6007  	/***************************************************/
  6008  	cdf_ptr = (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + uintptr(RateLevelIndex)*42)
  6009  	for i = 0; i < iter; i++ {
  6010  		if *(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) == 0 {
  6011  			SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)), cdf_ptr)
  6012  		} else {
  6013  			SKP_Silk_range_encoder(tls, psRC, (18 + 1), cdf_ptr)
  6014  			for k = 0; k < (*(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) - 1); k++ {
  6015  				SKP_Silk_range_encoder(tls, psRC, (18 + 1), (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + 9*42))
  6016  			}
  6017  			SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)), (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + 9*42))
  6018  		}
  6019  	}
  6020  
  6021  	/******************/
  6022  	/* Shell Encoding */
  6023  	/******************/
  6024  	for i = 0; i < iter; i++ {
  6025  		if *(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)) > 0 {
  6026  			SKP_Silk_shell_encoder(tls, psRC, (bp + 32 /* &abs_pulses */ + uintptr((i*16))*4))
  6027  		}
  6028  	}
  6029  
  6030  	/****************/
  6031  	/* LSB Encoding */
  6032  	/****************/
  6033  	for i = 0; i < iter; i++ {
  6034  		if *(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) > 0 {
  6035  			pulses_ptr = (q + uintptr((i * 16)))
  6036  			nLS = (*(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) - 1)
  6037  			for k = 0; k < 16; k++ {
  6038  				abs_q = int32(func() int8 {
  6039  					if (int32(*(*int8)(unsafe.Pointer(pulses_ptr + uintptr(k))))) > 0 {
  6040  						return *(*int8)(unsafe.Pointer(pulses_ptr + uintptr(k)))
  6041  					}
  6042  					return int8(-int32(*(*int8)(unsafe.Pointer(pulses_ptr + uintptr(k)))))
  6043  				}())
  6044  				for j = nLS; j > 0; j-- {
  6045  					bit = (((abs_q) >> (j)) & 1)
  6046  					SKP_Silk_range_encoder(tls, psRC, bit, uintptr(unsafe.Pointer(&SKP_Silk_lsb_CDF)))
  6047  				}
  6048  				bit = (abs_q & 1)
  6049  				SKP_Silk_range_encoder(tls, psRC, bit, uintptr(unsafe.Pointer(&SKP_Silk_lsb_CDF)))
  6050  			}
  6051  		}
  6052  	}
  6053  
  6054  	/****************/
  6055  	/* Encode signs */
  6056  	/****************/
  6057  	SKP_Silk_encode_signs(tls, psRC, q, frame_length, sigtype, QuantOffsetType, RateLevelIndex)
  6058  }
  6059  
  6060  /***********************************************************************
  6061  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6062  Redistribution and use in source and binary forms, with or without
  6063  modification, (subject to the limitations in the disclaimer below)
  6064  are permitted provided that the following conditions are met:
  6065  - Redistributions of source code must retain the above copyright notice,
  6066  this list of conditions and the following disclaimer.
  6067  - Redistributions in binary form must reproduce the above copyright
  6068  notice, this list of conditions and the following disclaimer in the
  6069  documentation and/or other materials provided with the distribution.
  6070  - Neither the name of Skype Limited, nor the names of specific
  6071  contributors, may be used to endorse or promote products derived from
  6072  this software without specific prior written permission.
  6073  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6074  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6075  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6076  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6077  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6078  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6079  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6080  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6081  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6082  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6083  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6084  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6085  ***********************************************************************/
  6086  
  6087  /***********************************************************************
  6088  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6089  Redistribution and use in source and binary forms, with or without
  6090  modification, (subject to the limitations in the disclaimer below)
  6091  are permitted provided that the following conditions are met:
  6092  - Redistributions of source code must retain the above copyright notice,
  6093  this list of conditions and the following disclaimer.
  6094  - Redistributions in binary form must reproduce the above copyright
  6095  notice, this list of conditions and the following disclaimer in the
  6096  documentation and/or other materials provided with the distribution.
  6097  - Neither the name of Skype Limited, nor the names of specific
  6098  contributors, may be used to endorse or promote products derived from
  6099  this software without specific prior written permission.
  6100  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6101  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6102  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6103  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6104  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6105  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6106  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6107  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6108  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6109  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6110  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6111  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6112  ***********************************************************************/
  6113  
  6114  /***********************************************************************
  6115  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6116  Redistribution and use in source and binary forms, with or without
  6117  modification, (subject to the limitations in the disclaimer below)
  6118  are permitted provided that the following conditions are met:
  6119  - Redistributions of source code must retain the above copyright notice,
  6120  this list of conditions and the following disclaimer.
  6121  - Redistributions in binary form must reproduce the above copyright
  6122  notice, this list of conditions and the following disclaimer in the
  6123  documentation and/or other materials provided with the distribution.
  6124  - Neither the name of Skype Limited, nor the names of specific
  6125  contributors, may be used to endorse or promote products derived from
  6126  this software without specific prior written permission.
  6127  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6128  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6129  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6130  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6131  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6132  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6133  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6134  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6135  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6136  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6137  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6138  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6139  ***********************************************************************/
  6140  
  6141  /****************************************/
  6142  /* Encoder functions                    */
  6143  /****************************************/
  6144  
  6145  func SKP_Silk_SDK_Get_Encoder_Size(tls *libc.TLS, encSizeBytes uintptr) int32 { /* SKP_Silk_enc_API.c:41:9: */
  6146  	var ret int32 = 0
  6147  
  6148  	*(*int32)(unsafe.Pointer(encSizeBytes)) = int32(unsafe.Sizeof(SKP_Silk_encoder_state_FIX{}))
  6149  
  6150  	return ret
  6151  }
  6152  
  6153  /***************************************/
  6154  /* Read control structure from encoder */
  6155  /***************************************/
  6156  func SKP_Silk_SDK_QueryEncoder(tls *libc.TLS, encState uintptr, encStatus uintptr) int32 { /* SKP_Silk_enc_API.c:54:9: */
  6157  	var psEnc uintptr
  6158  	var ret int32 = 0
  6159  
  6160  	psEnc = encState
  6161  
  6162  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FAPI_sampleRate = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz
  6163  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FmaxInternalSampleRate = ((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FmaxInternal_fs_kHz))) * (int32(int16(1000))))
  6164  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FpacketSize = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms) / (1000)) /* convert samples -> ms */
  6165  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FbitRate = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps
  6166  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FpacketLossPercentage = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc
  6167  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).Fcomplexity = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FComplexity
  6168  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FuseInBandFEC = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC
  6169  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FuseDTX = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseDTX
  6170  	return ret
  6171  }
  6172  
  6173  /*************************/
  6174  /* Init or Reset encoder */
  6175  /*************************/
  6176  func SKP_Silk_SDK_InitEncoder(tls *libc.TLS, encState uintptr, encStatus uintptr) int32 { /* SKP_Silk_enc_API.c:78:9: */
  6177  	var psEnc uintptr
  6178  	var ret int32 = 0
  6179  
  6180  	psEnc = encState
  6181  
  6182  	/* Reset Encoder */
  6183  	if libc.AssignAddInt32(&ret, SKP_Silk_init_encoder_FIX(tls, psEnc)) != 0 {
  6184  
  6185  	}
  6186  
  6187  	/* Read control structure */
  6188  	if libc.AssignAddInt32(&ret, SKP_Silk_SDK_QueryEncoder(tls, encState, encStatus)) != 0 {
  6189  
  6190  	}
  6191  
  6192  	return ret
  6193  }
  6194  
  6195  /**************************/
  6196  /* Encode frame with Silk */
  6197  /**************************/
  6198  func SKP_Silk_SDK_Encode(tls *libc.TLS, encState uintptr, encControl uintptr, samplesIn uintptr, nSamplesIn int32, outData uintptr, nBytesOut uintptr) int32 { /* SKP_Silk_enc_API.c:106:9: */
  6199  	bp := tls.Alloc(2)
  6200  	defer tls.Free(2)
  6201  
  6202  	var max_internal_fs_kHz int32
  6203  	var PacketSize_ms int32
  6204  	var PacketLoss_perc int32
  6205  	var UseInBandFEC int32
  6206  	var UseDTX int32
  6207  	var ret int32 = 0
  6208  	var nSamplesToBuffer int32
  6209  	var Complexity int32
  6210  	var input_10ms int32
  6211  	var nSamplesFromInput int32 = 0
  6212  	var TargetRate_bps int32
  6213  	var API_fs_Hz int32
  6214  	// var MaxBytesOut int16 at bp, 2
  6215  
  6216  	var psEnc uintptr = encState
  6217  
  6218  	/* Check sampling frequency first, to avoid divide by zero later */
  6219  	if ((((((((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 8000) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 12000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 16000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 24000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 32000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 44100)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 48000)) || (((((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate != 8000) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate != 12000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate != 16000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate != 24000)) {
  6220  		ret = -2
  6221  
  6222  		return ret
  6223  	}
  6224  
  6225  	/* Set encoder parameters from control structure */
  6226  	API_fs_Hz = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate
  6227  	max_internal_fs_kHz = (((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate >> 10) + 1) /* convert Hz -> kHz */
  6228  	PacketSize_ms = ((1000 * (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FpacketSize) / (API_fs_Hz))
  6229  	TargetRate_bps = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FbitRate
  6230  	PacketLoss_perc = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FpacketLossPercentage
  6231  	UseInBandFEC = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FuseInBandFEC
  6232  	Complexity = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).Fcomplexity
  6233  	UseDTX = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FuseDTX
  6234  
  6235  	/* Save values in state */
  6236  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz = API_fs_Hz
  6237  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FmaxInternal_fs_kHz = max_internal_fs_kHz
  6238  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC = UseInBandFEC
  6239  
  6240  	/* Only accept input lengths that are a multiple of 10 ms */
  6241  	input_10ms = ((100 * nSamplesIn) / (API_fs_Hz))
  6242  	if ((input_10ms * API_fs_Hz) != (100 * nSamplesIn)) || (nSamplesIn < 0) {
  6243  		ret = -1
  6244  
  6245  		return ret
  6246  	}
  6247  
  6248  	TargetRate_bps = func() int32 {
  6249  		if (5000) > (100000) {
  6250  			return func() int32 {
  6251  				if (TargetRate_bps) > (5000) {
  6252  					return 5000
  6253  				}
  6254  				return func() int32 {
  6255  					if (TargetRate_bps) < (100000) {
  6256  						return 100000
  6257  					}
  6258  					return TargetRate_bps
  6259  				}()
  6260  			}()
  6261  		}
  6262  		return func() int32 {
  6263  			if (TargetRate_bps) > (100000) {
  6264  				return 100000
  6265  			}
  6266  			return func() int32 {
  6267  				if (TargetRate_bps) < (5000) {
  6268  					return 5000
  6269  				}
  6270  				return TargetRate_bps
  6271  			}()
  6272  		}()
  6273  	}()
  6274  	if (libc.AssignInt32(&ret, SKP_Silk_control_encoder_FIX(tls, psEnc, PacketSize_ms, TargetRate_bps,
  6275  		PacketLoss_perc, UseDTX, Complexity))) != 0 {
  6276  
  6277  		return ret
  6278  	}
  6279  
  6280  	/* Make sure no more than one packet can be produced */
  6281  	if (1000 * nSamplesIn) > ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms * API_fs_Hz) {
  6282  		ret = -1
  6283  
  6284  		return ret
  6285  	}
  6286  
  6287  	/* Detect energy above 8 kHz */
  6288  	if (((func() int32 {
  6289  		if (API_fs_Hz) < (1000 * max_internal_fs_kHz) {
  6290  			return API_fs_Hz
  6291  		}
  6292  		return (1000 * max_internal_fs_kHz)
  6293  	}()) == 24000) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FSWB_detected == 0)) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FWB_detected == 0) {
  6294  		SKP_Silk_detect_SWB_input(tls, (psEnc /* &.sCmn */ + 18568 /* &.sSWBdetect */), samplesIn, nSamplesIn)
  6295  	}
  6296  
  6297  	/* Input buffering/resampling and encoding */
  6298  	*(*int16)(unsafe.Pointer(bp /* MaxBytesOut */)) = int16(0) /* return 0 output bytes if no encoder called */
  6299  	for 1 != 0 {
  6300  		nSamplesToBuffer = ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx)
  6301  		if API_fs_Hz == ((int32(int16(1000))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz)))) {
  6302  			nSamplesToBuffer = SKP_min_int(tls, nSamplesToBuffer, nSamplesIn)
  6303  			nSamplesFromInput = nSamplesToBuffer
  6304  			/* Copy to buffer */
  6305  			libc.Xmemcpy(tls, ((psEnc /* &.sCmn */ + 15272 /* &.inputBuf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx)*2), samplesIn, (uint64(nSamplesFromInput) * uint64(unsafe.Sizeof(int16(0)))))
  6306  		} else {
  6307  			nSamplesToBuffer = func() int32 {
  6308  				if (nSamplesToBuffer) < ((10 * input_10ms) * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz) {
  6309  					return nSamplesToBuffer
  6310  				}
  6311  				return ((10 * input_10ms) * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz)
  6312  			}()
  6313  			nSamplesFromInput = ((nSamplesToBuffer * API_fs_Hz) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz * 1000))
  6314  			/* Resample and write to buffer */
  6315  			ret = ret + (SKP_Silk_resampler(tls, (psEnc /* &.sCmn */ + 18360 /* &.resampler_state */),
  6316  				((psEnc /* &.sCmn */ + 15272 /* &.inputBuf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx)*2), samplesIn, nSamplesFromInput))
  6317  		}
  6318  		samplesIn += 2 * (uintptr(nSamplesFromInput))
  6319  		nSamplesIn = nSamplesIn - (nSamplesFromInput)
  6320  		*(*int32)(unsafe.Pointer(psEnc /* &.sCmn */ + 16232 /* &.inputBufIx */)) += (nSamplesToBuffer)
  6321  
  6322  		/* Silk encoder */
  6323  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx >= (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length {
  6324  
  6325  			/* Enough data in input buffer, so encode */
  6326  			if int32(*(*int16)(unsafe.Pointer(bp /* MaxBytesOut */))) == 0 {
  6327  				/* No payload obtained so far */
  6328  				*(*int16)(unsafe.Pointer(bp /* MaxBytesOut */)) = *(*int16)(unsafe.Pointer(nBytesOut))
  6329  				if (libc.AssignInt32(&ret, SKP_Silk_encode_frame_FIX(tls, psEnc, outData, bp /* &MaxBytesOut */, psEnc /* &.sCmn */ +15272 /* &.inputBuf */))) != 0 {
  6330  
  6331  				}
  6332  			} else {
  6333  				/* outData already contains a payload */
  6334  				if (libc.AssignInt32(&ret, SKP_Silk_encode_frame_FIX(tls, psEnc, outData, nBytesOut, psEnc /* &.sCmn */ +15272 /* &.inputBuf */))) != 0 {
  6335  
  6336  				}
  6337  				/* Check that no second payload was created */
  6338  
  6339  			}
  6340  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx = 0
  6341  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fcontrolled_since_last_payload = 0
  6342  
  6343  			if nSamplesIn == 0 {
  6344  				break
  6345  			}
  6346  		} else {
  6347  			break
  6348  		}
  6349  	}
  6350  
  6351  	*(*int16)(unsafe.Pointer(nBytesOut)) = *(*int16)(unsafe.Pointer(bp /* MaxBytesOut */))
  6352  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseDTX != 0) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinDTX != 0) {
  6353  		/* DTX simulation */
  6354  		*(*int16)(unsafe.Pointer(nBytesOut)) = int16(0)
  6355  	}
  6356  
  6357  	return ret
  6358  }
  6359  
  6360  /***********************************************************************
  6361  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6362  Redistribution and use in source and binary forms, with or without
  6363  modification, (subject to the limitations in the disclaimer below)
  6364  are permitted provided that the following conditions are met:
  6365  - Redistributions of source code must retain the above copyright notice,
  6366  this list of conditions and the following disclaimer.
  6367  - Redistributions in binary form must reproduce the above copyright
  6368  notice, this list of conditions and the following disclaimer in the
  6369  documentation and/or other materials provided with the distribution.
  6370  - Neither the name of Skype Limited, nor the names of specific
  6371  contributors, may be used to endorse or promote products derived from
  6372  this software without specific prior written permission.
  6373  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6374  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6375  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6376  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6377  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6378  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6379  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6380  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6381  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6382  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6383  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6384  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6385  ***********************************************************************/
  6386  
  6387  /******************/
  6388  /* Error messages */
  6389  /******************/
  6390  
  6391  /**************************/
  6392  /* Encoder error messages */
  6393  /**************************/
  6394  
  6395  /* Input length is not a multiplum of 10 ms, or length is longer than the packet length */
  6396  
  6397  /* Sampling frequency not 8000, 12000, 16000 or 24000 Hertz */
  6398  
  6399  /* Packet size not 20, 40, 60, 80 or 100 ms */
  6400  
  6401  /* Allocated payload buffer too short */
  6402  
  6403  /* Loss rate not between 0 and 100 percent */
  6404  
  6405  /* Complexity setting not valid, use 0, 1 or 2 */
  6406  
  6407  /* Inband FEC setting not valid, use 0 or 1 */
  6408  
  6409  /* DTX setting not valid, use 0 or 1 */
  6410  
  6411  /* Internal encoder error */
  6412  
  6413  /**************************/
  6414  /* Decoder error messages */
  6415  /**************************/
  6416  
  6417  /* Output sampling frequency lower than internal decoded sampling frequency */
  6418  
  6419  /* Payload size exceeded the maximum allowed 1024 bytes */
  6420  
  6421  /* Payload has bit errors */
  6422  
  6423  /***********************************************************************
  6424  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6425  Redistribution and use in source and binary forms, with or without
  6426  modification, (subject to the limitations in the disclaimer below)
  6427  are permitted provided that the following conditions are met:
  6428  - Redistributions of source code must retain the above copyright notice,
  6429  this list of conditions and the following disclaimer.
  6430  - Redistributions in binary form must reproduce the above copyright
  6431  notice, this list of conditions and the following disclaimer in the
  6432  documentation and/or other materials provided with the distribution.
  6433  - Neither the name of Skype Limited, nor the names of specific
  6434  contributors, may be used to endorse or promote products derived from
  6435  this software without specific prior written permission.
  6436  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6437  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6438  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6439  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6440  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6441  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6442  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6443  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6444  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6445  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6446  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6447  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6448  ***********************************************************************/
  6449  
  6450  /*******************/
  6451  /* Pitch estimator */
  6452  /*******************/
  6453  
  6454  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  6455  
  6456  /* Bandwidth expansion for whitening filter in pitch analysis */
  6457  
  6458  /* Threshold used by pitch estimator for early escape */
  6459  
  6460  /*********************/
  6461  /* Linear prediction */
  6462  /*********************/
  6463  
  6464  /* LPC analysis defines: regularization and bandwidth expansion */
  6465  
  6466  /* LTP analysis defines */
  6467  
  6468  /* LTP quantization settings */
  6469  
  6470  /***********************/
  6471  /* High pass filtering */
  6472  /***********************/
  6473  
  6474  /* Smoothing parameters for low end of pitch frequency range estimation */
  6475  
  6476  /* Min and max values for low end of pitch frequency range estimation */
  6477  
  6478  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  6479  
  6480  /***********/
  6481  /* Various */
  6482  /***********/
  6483  
  6484  /* Required speech activity for counting frame as active */
  6485  
  6486  /* Speech Activity LBRR enable threshold (needs tuning) */
  6487  
  6488  /*************************/
  6489  /* Perceptual parameters */
  6490  /*************************/
  6491  
  6492  /* reduction in coding SNR during low speech activity */
  6493  
  6494  /* factor for reducing quantization noise during voiced speech */
  6495  
  6496  /* factor for reducing quantization noise for unvoiced sparse signals */
  6497  
  6498  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  6499  
  6500  /* warping control */
  6501  
  6502  /* fraction added to first autocorrelation value */
  6503  
  6504  /* noise shaping filter chirp factor */
  6505  
  6506  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  6507  
  6508  /* gain reduction for fricatives */
  6509  
  6510  /* extra harmonic boosting (signal shaping) at low bitrates */
  6511  
  6512  /* extra harmonic boosting (signal shaping) for noisy input signals */
  6513  
  6514  /* harmonic noise shaping */
  6515  
  6516  /* extra harmonic noise shaping for high bitrates or noisy input */
  6517  
  6518  /* parameter for shaping noise towards higher frequencies */
  6519  
  6520  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  6521  
  6522  /* parameter for applying a high-pass tilt to the input signal */
  6523  
  6524  /* parameter for extra high-pass tilt to the input signal at high rates */
  6525  
  6526  /* parameter for reducing noise at the very low frequencies */
  6527  
  6528  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  6529  
  6530  /* noise floor to put a lower limit on the quantization step size */
  6531  
  6532  /* noise floor relative to active speech gain level */
  6533  
  6534  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  6535  
  6536  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  6537  
  6538  /* parameters defining the R/D tradeoff in the residual quantizer */
  6539  
  6540  /* Finds LPC vector from correlations, and converts to NLSF */
  6541  func SKP_Silk_find_LPC_FIX(tls *libc.TLS, NLSF_Q15 uintptr, interpIndex uintptr, prev_NLSFq_Q15 uintptr, useInterpolatedNLSFs int32, LPC_order int32, x uintptr, subfr_length int32) { /* SKP_Silk_find_LPC_FIX.c:32:6: */
  6542  	bp := tls.Alloc(832)
  6543  	defer tls.Free(832)
  6544  
  6545  	var k int32
  6546  	// var a_Q16 [16]int32 at bp+8, 64
  6547  
  6548  	var isInterpLower int32
  6549  	var shift int32
  6550  	// var S [16]int16 at bp+240, 32
  6551  
  6552  	// var res_nrg0 int32 at bp+816, 4
  6553  
  6554  	// var res_nrg1 int32 at bp+824, 4
  6555  
  6556  	// var rshift0 int32 at bp+820, 4
  6557  
  6558  	// var rshift1 int32 at bp+828, 4
  6559  
  6560  	/* Used only for LSF interpolation */
  6561  	// var a_tmp_Q16 [16]int32 at bp+80, 64
  6562  
  6563  	var res_nrg_interp int32
  6564  	// var res_nrg int32 at bp, 4
  6565  
  6566  	// var res_tmp_nrg int32 at bp+72, 4
  6567  
  6568  	var res_nrg_interp_Q int32
  6569  	// var res_nrg_Q int32 at bp+4, 4
  6570  
  6571  	// var res_tmp_nrg_Q int32 at bp+76, 4
  6572  
  6573  	// var a_tmp_Q12 [16]int16 at bp+208, 32
  6574  
  6575  	// var NLSF0_Q15 [16]int32 at bp+144, 64
  6576  
  6577  	// var LPC_res [272]int16 at bp+272, 544
  6578  
  6579  	/* Default: no interpolation */
  6580  	*(*int32)(unsafe.Pointer(interpIndex)) = 4
  6581  
  6582  	/* Burg AR analysis for the full frame */
  6583  	SKP_Silk_burg_modified(tls, bp /* &res_nrg */, bp+4 /* &res_nrg_Q */, bp+8 /* &a_Q16[0] */, x, subfr_length, 4, SKP_FIX_CONST(tls, 2.5e-5, 32), LPC_order)
  6584  
  6585  	SKP_Silk_bwexpander_32(tls, bp+8 /* &a_Q16[0] */, LPC_order, SKP_FIX_CONST(tls, 0.99995, 16))
  6586  
  6587  	if useInterpolatedNLSFs == 1 {
  6588  
  6589  		/* Optimal solution for last 10 ms */
  6590  		SKP_Silk_burg_modified(tls, bp+72 /* &res_tmp_nrg */, bp+76 /* &res_tmp_nrg_Q */, bp+80 /* &a_tmp_Q16[0] */, (x + uintptr(((int32(4)>>1)*subfr_length))*2),
  6591  			subfr_length, (int32(4) >> 1), SKP_FIX_CONST(tls, 2.5e-5, 32), LPC_order)
  6592  
  6593  		SKP_Silk_bwexpander_32(tls, bp+80 /* &a_tmp_Q16[0] */, LPC_order, SKP_FIX_CONST(tls, 0.99995, 16))
  6594  
  6595  		/* subtract residual energy here, as that's easier than adding it to the    */
  6596  		/* residual energy of the first 10 ms in each iteration of the search below */
  6597  		shift = (*(*int32)(unsafe.Pointer(bp + 76 /* res_tmp_nrg_Q */)) - *(*int32)(unsafe.Pointer(bp + 4 /* res_nrg_Q */)))
  6598  		if shift >= 0 {
  6599  			if shift < 32 {
  6600  				*(*int32)(unsafe.Pointer(bp /* res_nrg */)) = (*(*int32)(unsafe.Pointer(bp /* res_nrg */)) - ((*(*int32)(unsafe.Pointer(bp + 72 /* res_tmp_nrg */))) >> (shift)))
  6601  			}
  6602  		} else {
  6603  
  6604  			*(*int32)(unsafe.Pointer(bp /* res_nrg */)) = (((*(*int32)(unsafe.Pointer(bp /* res_nrg */))) >> (-shift)) - *(*int32)(unsafe.Pointer(bp + 72 /* res_tmp_nrg */)))
  6605  			*(*int32)(unsafe.Pointer(bp + 4 /* res_nrg_Q */)) = *(*int32)(unsafe.Pointer(bp + 76 /* res_tmp_nrg_Q */))
  6606  		}
  6607  
  6608  		/* Convert to NLSFs */
  6609  		SKP_Silk_A2NLSF(tls, NLSF_Q15, bp+80 /* &a_tmp_Q16[0] */, LPC_order)
  6610  
  6611  		/* Search over interpolation indices to find the one with lowest residual energy */
  6612  		for k = 3; k >= 0; k-- {
  6613  			/* Interpolate NLSFs for first half */
  6614  			SKP_Silk_interpolate(tls, bp+144 /* &NLSF0_Q15[0] */, prev_NLSFq_Q15, NLSF_Q15, k, LPC_order)
  6615  
  6616  			/* Convert to LPC for residual energy evaluation */
  6617  			SKP_Silk_NLSF2A_stable(tls, bp+208 /* &a_tmp_Q12[0] */, bp+144 /* &NLSF0_Q15[0] */, LPC_order)
  6618  
  6619  			/* Calculate residual energy with NLSF interpolation */
  6620  			libc.Xmemset(tls, bp+240 /* &S[0] */, 0, (uint64(LPC_order) * uint64(unsafe.Sizeof(int16(0)))))
  6621  			SKP_Silk_LPC_analysis_filter(tls, x, bp+208 /* &a_tmp_Q12[0] */, bp+240 /* &S[0] */, bp+272 /* &LPC_res[0] */, (2 * subfr_length), LPC_order)
  6622  
  6623  			SKP_Silk_sum_sqr_shift(tls, bp+816 /* &res_nrg0 */, bp+820 /* &rshift0 */, (bp + 272 /* &LPC_res[0] */ + uintptr(LPC_order)*2), (subfr_length - LPC_order))
  6624  			SKP_Silk_sum_sqr_shift(tls, bp+824 /* &res_nrg1 */, bp+828 /* &rshift1 */, ((bp + 272 /* &LPC_res[0] */ + uintptr(LPC_order)*2) + uintptr(subfr_length)*2), (subfr_length - LPC_order))
  6625  
  6626  			/* Add subframe energies from first half frame */
  6627  			shift = (*(*int32)(unsafe.Pointer(bp + 820 /* rshift0 */)) - *(*int32)(unsafe.Pointer(bp + 828 /* rshift1 */)))
  6628  			if shift >= 0 {
  6629  				*(*int32)(unsafe.Pointer(bp + 824 /* res_nrg1 */)) = ((*(*int32)(unsafe.Pointer(bp + 824 /* res_nrg1 */))) >> (shift))
  6630  				res_nrg_interp_Q = -*(*int32)(unsafe.Pointer(bp + 820 /* rshift0 */))
  6631  			} else {
  6632  				*(*int32)(unsafe.Pointer(bp + 816 /* res_nrg0 */)) = ((*(*int32)(unsafe.Pointer(bp + 816 /* res_nrg0 */))) >> (-shift))
  6633  				res_nrg_interp_Q = -*(*int32)(unsafe.Pointer(bp + 828 /* rshift1 */))
  6634  			}
  6635  			res_nrg_interp = ((*(*int32)(unsafe.Pointer(bp + 816 /* res_nrg0 */))) + (*(*int32)(unsafe.Pointer(bp + 824 /* res_nrg1 */))))
  6636  
  6637  			/* Compare with first half energy without NLSF interpolation, or best interpolated value so far */
  6638  			shift = (res_nrg_interp_Q - *(*int32)(unsafe.Pointer(bp + 4 /* res_nrg_Q */)))
  6639  			if shift >= 0 {
  6640  				if ((res_nrg_interp) >> (shift)) < *(*int32)(unsafe.Pointer(bp /* res_nrg */)) {
  6641  					isInterpLower = 1
  6642  				} else {
  6643  					isInterpLower = 0
  6644  				}
  6645  			} else {
  6646  				if -shift < 32 {
  6647  					if res_nrg_interp < ((*(*int32)(unsafe.Pointer(bp /* res_nrg */))) >> (-shift)) {
  6648  						isInterpLower = 1
  6649  					} else {
  6650  						isInterpLower = 0
  6651  					}
  6652  				} else {
  6653  					isInterpLower = 0
  6654  				}
  6655  			}
  6656  
  6657  			/* Determine whether current interpolated NLSFs are best so far */
  6658  			if isInterpLower == 1 {
  6659  				/* Interpolation has lower residual energy */
  6660  				*(*int32)(unsafe.Pointer(bp /* res_nrg */)) = res_nrg_interp
  6661  				*(*int32)(unsafe.Pointer(bp + 4 /* res_nrg_Q */)) = res_nrg_interp_Q
  6662  				*(*int32)(unsafe.Pointer(interpIndex)) = k
  6663  			}
  6664  		}
  6665  	}
  6666  
  6667  	if *(*int32)(unsafe.Pointer(interpIndex)) == 4 {
  6668  		/* NLSF interpolation is currently inactive, calculate NLSFs from full frame AR coefficients */
  6669  		SKP_Silk_A2NLSF(tls, NLSF_Q15, bp+8 /* &a_Q16[0] */, LPC_order)
  6670  	}
  6671  }
  6672  
  6673  func SKP_Silk_find_LTP_FIX(tls *libc.TLS, b_Q14 uintptr, WLTP uintptr, LTPredCodGain_Q7 uintptr, r_first uintptr, r_last uintptr, lag uintptr, Wght_Q15 uintptr, subfr_length int32, mem_offset int32, corr_rshifts uintptr) { /* SKP_Silk_find_LTP_FIX.c:39:6: */
  6674  	bp := tls.Alloc(128)
  6675  	defer tls.Free(128)
  6676  
  6677  	var i int32
  6678  	var k int32
  6679  	var lshift int32
  6680  	var r_ptr uintptr
  6681  	var lag_ptr uintptr
  6682  	var b_Q14_ptr uintptr
  6683  	var regu int32
  6684  	var WLTP_ptr uintptr
  6685  	// var b_Q16 [5]int32 at bp+40, 20
  6686  
  6687  	// var delta_b_Q14 [5]int32 at bp+108, 20
  6688  
  6689  	// var d_Q14 [4]int32 at bp+92, 16
  6690  
  6691  	// var nrg [4]int32 at bp+60, 16
  6692  
  6693  	var g_Q26 int32
  6694  	// var w [4]int32 at bp+76, 16
  6695  
  6696  	var WLTP_max int32
  6697  	var max_abs_d_Q14 int32
  6698  	var max_w_bits int32
  6699  	var temp32 int32
  6700  	var denom32 int32
  6701  	var extra_shifts int32
  6702  	// var rr_shifts int32 at bp+16, 4
  6703  
  6704  	var maxRshifts int32
  6705  	var maxRshifts_wxtra int32
  6706  	var LZs int32
  6707  	var LPC_res_nrg int32
  6708  	var LPC_LTP_res_nrg int32
  6709  	var div_Q16 int32
  6710  	// var Rr [5]int32 at bp+20, 20
  6711  
  6712  	// var rr [4]int32 at bp, 16
  6713  
  6714  	var wd int32
  6715  	var m_Q12 int32
  6716  
  6717  	b_Q14_ptr = b_Q14
  6718  	WLTP_ptr = WLTP
  6719  	r_ptr = (r_first + uintptr(mem_offset)*2)
  6720  	for k = 0; k < 4; k++ {
  6721  		if k == (int32(4) >> 1) { /* shift residual for last 10 ms */
  6722  			r_ptr = (r_last + uintptr(mem_offset)*2)
  6723  		}
  6724  		lag_ptr = (r_ptr - uintptr((*(*int32)(unsafe.Pointer(lag + uintptr(k)*4))+(5/2)))*2)
  6725  
  6726  		SKP_Silk_sum_sqr_shift(tls, (bp /* &rr */ + uintptr(k)*4), bp+16 /* &rr_shifts */, r_ptr, subfr_length) /* rr[ k ] in Q( -rr_shifts ) */
  6727  
  6728  		/* Assure headroom */
  6729  		LZs = SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4)))
  6730  		if LZs < 2 {
  6731  			*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4)) = func() int32 {
  6732  				if (2 - LZs) == 1 {
  6733  					return (((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) & 1))
  6734  				}
  6735  				return ((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> ((2 - LZs) - 1)) + 1) >> 1)
  6736  			}()
  6737  			*(*int32)(unsafe.Pointer(bp + 16 /* rr_shifts */)) += (2 - LZs)
  6738  		}
  6739  		*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) = *(*int32)(unsafe.Pointer(bp + 16 /* rr_shifts */))
  6740  		SKP_Silk_corrMatrix_FIX(tls, lag_ptr, subfr_length, 5, 2, WLTP_ptr, (corr_rshifts + uintptr(k)*4)) /* WLTP_fix_ptr in Q( -corr_rshifts[ k ] ) */
  6741  
  6742  		/* The correlation vector always has lower max abs value than rr and/or RR so head room is assured */
  6743  		SKP_Silk_corrVector_FIX(tls, lag_ptr, r_ptr, subfr_length, 5, bp+20 /* &Rr[0] */, *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))) /* Rr_fix_ptr   in Q( -corr_rshifts[ k ] ) */
  6744  		if *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) > *(*int32)(unsafe.Pointer(bp + 16 /* rr_shifts */)) {
  6745  			*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(bp + 16 /* rr_shifts */)))) /* rr[ k ] in Q( -corr_rshifts[ k ] ) */
  6746  		}
  6747  
  6748  		regu = 1
  6749  		regu = ((regu) + ((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) + ((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) >> 16)))
  6750  		regu = ((regu) + ((((*(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((0)*(5))+(0)))*4)))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) + ((((*(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((0)*(5))+(0)))*4)))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) >> 16)))
  6751  		regu = ((regu) + ((((*(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((5-1)*(5))+(5-1)))*4)))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) + ((((*(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((5-1)*(5))+(5-1)))*4)))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) >> 16)))
  6752  		SKP_Silk_regularize_correlations_FIX(tls, WLTP_ptr, (bp /* &rr */ + uintptr(k)*4), regu, 5)
  6753  
  6754  		SKP_Silk_solve_LDL_FIX(tls, WLTP_ptr, 5, bp+20 /* &Rr[0] */, bp+40 /* &b_Q16[0] */) /* WLTP_fix_ptr and Rr_fix_ptr both in Q(-corr_rshifts[k]) */
  6755  
  6756  		/* Limit and store in Q14 */
  6757  		SKP_Silk_fit_LTP(tls, bp+40 /* &b_Q16[0] */, b_Q14_ptr)
  6758  
  6759  		/* Calculate residual energy */
  6760  		*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4)) = SKP_Silk_residual_energy16_covar_FIX(tls, b_Q14_ptr, WLTP_ptr, bp+20 /* &Rr[0] */, *(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4)), 5, 14) /* nrg_fix in Q( -corr_rshifts[ k ] ) */
  6761  
  6762  		/* temp = Wght[ k ] / ( nrg[ k ] * Wght[ k ] + 0.01f * subfr_length ); */
  6763  		extra_shifts = SKP_min_int(tls, *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)), 2)
  6764  		denom32 = (((func() int32 {
  6765  			if (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts)) > (int32((0x7FFFFFFF)) >> (1 + extra_shifts)) {
  6766  				return func() int32 {
  6767  					if ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) > (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts)) {
  6768  						return (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts))
  6769  					}
  6770  					return func() int32 {
  6771  						if ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) < (int32((0x7FFFFFFF)) >> (1 + extra_shifts)) {
  6772  							return (int32((0x7FFFFFFF)) >> (1 + extra_shifts))
  6773  						}
  6774  						return ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16))
  6775  					}()
  6776  				}()
  6777  			}
  6778  			return func() int32 {
  6779  				if ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) > (int32((0x7FFFFFFF)) >> (1 + extra_shifts)) {
  6780  					return (int32((0x7FFFFFFF)) >> (1 + extra_shifts))
  6781  				}
  6782  				return func() int32 {
  6783  					if ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) < (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts)) {
  6784  						return (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts))
  6785  					}
  6786  					return ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16))
  6787  				}()
  6788  			}()
  6789  		}()) << (1 + extra_shifts)) + (((((subfr_length) >> 16) * (int32(int16(655)))) + ((((subfr_length) & 0x0000FFFF) * (int32(int16(655)))) >> 16)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - extra_shifts))) /* Q( -corr_rshifts[ k ] + extra_shifts ) */
  6790  		denom32 = func() int32 {
  6791  			if (denom32) > (1) {
  6792  				return denom32
  6793  			}
  6794  			return 1
  6795  		}()
  6796  		/* Wght always < 0.5 in Q0 */
  6797  		temp32 = (((*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4))) << (16)) / (denom32))                        /* Q( 15 + 16 + corr_rshifts[k] - extra_shifts ) */
  6798  		temp32 = ((temp32) >> (((31 + *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))) - extra_shifts) - 26)) /* Q26 */
  6799  
  6800  		/* Limit temp such that the below scaling never wraps around */
  6801  		WLTP_max = 0
  6802  		for i = 0; i < (5 * 5); i++ {
  6803  			WLTP_max = func() int32 {
  6804  				if (*(*int32)(unsafe.Pointer(WLTP_ptr + uintptr(i)*4))) > (WLTP_max) {
  6805  					return *(*int32)(unsafe.Pointer(WLTP_ptr + uintptr(i)*4))
  6806  				}
  6807  				return WLTP_max
  6808  			}()
  6809  		}
  6810  		lshift = ((SKP_Silk_CLZ32(tls, WLTP_max) - 1) - 3) /* keep 3 bits free for vq_nearest_neighbor_fix */
  6811  
  6812  		if ((26 - 18) + lshift) < 31 {
  6813  			temp32 = SKP_min_32(tls, temp32, (int32((1)) << ((26 - 18) + lshift)))
  6814  		}
  6815  
  6816  		SKP_Silk_scale_vector32_Q26_lshift_18(tls, WLTP_ptr, temp32, (5 * 5)) /* WLTP_ptr in Q( 18 - corr_rshifts[ k ] ) */
  6817  
  6818  		*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4)) = *(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((int32(5)>>1)*(5))+(int32(5)>>1)))*4))) /* w in Q( 18 - corr_rshifts[ k ] ) */
  6819  
  6820  		r_ptr += 2 * (uintptr(subfr_length))
  6821  		b_Q14_ptr += 2 * (uintptr(5))
  6822  		WLTP_ptr += 4 * (uintptr(5 * 5))
  6823  	}
  6824  
  6825  	maxRshifts = 0
  6826  	for k = 0; k < 4; k++ {
  6827  		maxRshifts = SKP_max_int(tls, *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)), maxRshifts)
  6828  	}
  6829  
  6830  	/* Compute LTP coding gain */
  6831  	if LTPredCodGain_Q7 != (uintptr(0)) {
  6832  		LPC_LTP_res_nrg = 0
  6833  		LPC_res_nrg = 0
  6834  		/* Check that no overflow will happen when adding */
  6835  		for k = 0; k < 4; k++ {
  6836  			LPC_res_nrg = ((LPC_res_nrg) + ((((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) + (1)) >> (1 + (maxRshifts - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))))))                     /*  Q( -maxRshifts ) */
  6837  			LPC_LTP_res_nrg = ((LPC_LTP_res_nrg) + ((((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) + (1)) >> (1 + (maxRshifts - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)))))) /*  Q( -maxRshifts ) */
  6838  		}
  6839  		LPC_LTP_res_nrg = func() int32 {
  6840  			if (LPC_LTP_res_nrg) > (1) {
  6841  				return LPC_LTP_res_nrg
  6842  			}
  6843  			return 1
  6844  		}() /* avoid division by zero */
  6845  
  6846  		div_Q16 = SKP_DIV32_varQ(tls, LPC_res_nrg, LPC_LTP_res_nrg, 16)
  6847  		*(*int32)(unsafe.Pointer(LTPredCodGain_Q7)) = ((int32(int16(3))) * (int32((int16(SKP_Silk_lin2log(tls, div_Q16) - (int32(16) << 7))))))
  6848  
  6849  	}
  6850  
  6851  	/* smoothing */
  6852  	/* d = sum( B, 1 ); */
  6853  	b_Q14_ptr = b_Q14
  6854  	for k = 0; k < 4; k++ {
  6855  		*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4)) = 0
  6856  		for i = 0; i < 5; i++ {
  6857  			*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14 */ + uintptr(k)*4)) += (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))))
  6858  		}
  6859  		b_Q14_ptr += 2 * (uintptr(5))
  6860  	}
  6861  
  6862  	/* m = ( w * d' ) / ( sum( w ) + 1e-3 ); */
  6863  
  6864  	/* Find maximum absolute value of d_Q14 and the bits used by w in Q0 */
  6865  	max_abs_d_Q14 = 0
  6866  	max_w_bits = 0
  6867  	for k = 0; k < 4; k++ {
  6868  		max_abs_d_Q14 = SKP_max_32(tls, max_abs_d_Q14, func() int32 {
  6869  			if (*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) > 0 {
  6870  				return *(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))
  6871  			}
  6872  			return -*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))
  6873  		}())
  6874  		/* w[ k ] is in Q( 18 - corr_rshifts[ k ] ) */
  6875  		/* Find bits needed in Q( 18 - maxRshifts ) */
  6876  		max_w_bits = SKP_max_32(tls, max_w_bits, (((32 - SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4)))) + *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))) - maxRshifts))
  6877  	}
  6878  
  6879  	/* max_abs_d_Q14 = (5 << 15); worst case, i.e. LTP_ORDER * -SKP_int16_MIN */
  6880  
  6881  	/* How many bits is needed for w*d' in Q( 18 - maxRshifts ) in the worst case, of all d_Q14's being equal to max_abs_d_Q14 */
  6882  	extra_shifts = (((max_w_bits + 32) - SKP_Silk_CLZ32(tls, max_abs_d_Q14)) - 14)
  6883  
  6884  	/* Subtract what we got available; bits in output var plus maxRshifts */
  6885  	extra_shifts = extra_shifts - (((32 - 1) - 2) + maxRshifts) /* Keep sign bit free as well as 2 bits for accumulation */
  6886  	extra_shifts = SKP_max_int(tls, extra_shifts, 0)
  6887  
  6888  	maxRshifts_wxtra = (maxRshifts + extra_shifts)
  6889  
  6890  	temp32 = ((int32((262)) >> (maxRshifts + extra_shifts)) + 1) /* 1e-3f in Q( 18 - (maxRshifts + extra_shifts) ) */
  6891  	wd = 0
  6892  	for k = 0; k < 4; k++ {
  6893  		/* w has at least 2 bits of headroom so no overflow should happen */
  6894  		temp32 = ((temp32) + ((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (maxRshifts_wxtra - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))))) /* Q( 18 - maxRshifts_wxtra ) */
  6895  		wd = ((wd) + (((((((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (maxRshifts_wxtra - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4)))))) + (((((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (maxRshifts_wxtra - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4)))))) >> 16)) + (((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (maxRshifts_wxtra - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)))) * (func() int32 {
  6896  			if (16) == 1 {
  6897  				return (((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) & 1))
  6898  			}
  6899  			return ((((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> ((16) - 1)) + 1) >> 1)
  6900  		}()))) << (2))) /* Q( 18 - maxRshifts_wxtra ) */
  6901  	}
  6902  	m_Q12 = SKP_DIV32_varQ(tls, wd, temp32, 12)
  6903  
  6904  	b_Q14_ptr = b_Q14
  6905  	for k = 0; k < 4; k++ {
  6906  		/* w_fix[ k ] from Q( 18 - corr_rshifts[ k ] ) to Q( 16 ) */
  6907  		if (2 - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))) > 0 {
  6908  			temp32 = ((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (2 - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))))
  6909  		} else {
  6910  			temp32 = ((func() int32 {
  6911  				if (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) > (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6912  					return func() int32 {
  6913  						if (*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) > (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6914  							return (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6915  						}
  6916  						return func() int32 {
  6917  							if (*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) < (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6918  								return (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6919  							}
  6920  							return *(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))
  6921  						}()
  6922  					}()
  6923  				}
  6924  				return func() int32 {
  6925  					if (*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) > (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6926  						return (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6927  					}
  6928  					return func() int32 {
  6929  						if (*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) < (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6930  							return (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6931  						}
  6932  						return *(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))
  6933  					}()
  6934  				}()
  6935  			}()) << (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6936  		}
  6937  
  6938  		g_Q26 = (((SKP_FIX_CONST(tls, 0.1, 26)) / (((SKP_FIX_CONST(tls, 0.1, 26)) >> (10)) + temp32)) * ((func() int32 {
  6939  			if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  6940  				return func() int32 {
  6941  					if (func() int32 {
  6942  						if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  6943  							return func() int32 {
  6944  								if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  6945  									return libc.Int32FromUint32(0x80000000)
  6946  								}
  6947  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6948  							}()
  6949  						}
  6950  						return func() int32 {
  6951  							if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  6952  								return 0x7FFFFFFF
  6953  							}
  6954  							return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6955  						}()
  6956  					}()) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  6957  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  6958  					}
  6959  					return func() int32 {
  6960  						if (func() int32 {
  6961  							if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  6962  								return func() int32 {
  6963  									if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  6964  										return libc.Int32FromUint32(0x80000000)
  6965  									}
  6966  									return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6967  								}()
  6968  							}
  6969  							return func() int32 {
  6970  								if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  6971  									return 0x7FFFFFFF
  6972  								}
  6973  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6974  							}()
  6975  						}()) < (int32((0x7FFFFFFF)) >> (4)) {
  6976  							return (int32((0x7FFFFFFF)) >> (4))
  6977  						}
  6978  						return func() int32 {
  6979  							if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  6980  								return func() int32 {
  6981  									if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  6982  										return libc.Int32FromUint32(0x80000000)
  6983  									}
  6984  									return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6985  								}()
  6986  							}
  6987  							return func() int32 {
  6988  								if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  6989  									return 0x7FFFFFFF
  6990  								}
  6991  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6992  							}()
  6993  						}()
  6994  					}()
  6995  				}()
  6996  			}
  6997  			return func() int32 {
  6998  				if (func() int32 {
  6999  					if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  7000  						return func() int32 {
  7001  							if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  7002  								return libc.Int32FromUint32(0x80000000)
  7003  							}
  7004  							return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7005  						}()
  7006  					}
  7007  					return func() int32 {
  7008  						if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  7009  							return 0x7FFFFFFF
  7010  						}
  7011  						return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7012  					}()
  7013  				}()) > (int32((0x7FFFFFFF)) >> (4)) {
  7014  					return (int32((0x7FFFFFFF)) >> (4))
  7015  				}
  7016  				return func() int32 {
  7017  					if (func() int32 {
  7018  						if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  7019  							return func() int32 {
  7020  								if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  7021  									return libc.Int32FromUint32(0x80000000)
  7022  								}
  7023  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7024  							}()
  7025  						}
  7026  						return func() int32 {
  7027  							if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  7028  								return 0x7FFFFFFF
  7029  							}
  7030  							return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7031  						}()
  7032  					}()) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7033  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7034  					}
  7035  					return func() int32 {
  7036  						if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  7037  							return func() int32 {
  7038  								if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  7039  									return libc.Int32FromUint32(0x80000000)
  7040  								}
  7041  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7042  							}()
  7043  						}
  7044  						return func() int32 {
  7045  							if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  7046  								return 0x7FFFFFFF
  7047  							}
  7048  							return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7049  						}()
  7050  					}()
  7051  				}()
  7052  			}()
  7053  		}()) << (4))) /* Q16 */
  7054  
  7055  		temp32 = 0
  7056  		for i = 0; i < 5; i++ {
  7057  			*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)) = int32(SKP_max_16(tls, *(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2)), int16(1638))) /* 1638_Q14 = 0.1_Q0 */
  7058  			temp32 = temp32 + (*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))                                                                        /* Q14 */
  7059  		}
  7060  		temp32 = ((g_Q26) / (temp32)) /* Q14->Q12 */
  7061  		for i = 0; i < 5; i++ {
  7062  			*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2)) = func() int16 {
  7063  				if (-16000) > (28000) {
  7064  					return func() int16 {
  7065  						if (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7066  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7067  								return func() int32 {
  7068  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7069  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7070  									}
  7071  									return func() int32 {
  7072  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7073  											return (int32((0x7FFFFFFF)) >> (4))
  7074  										}
  7075  										return temp32
  7076  									}()
  7077  								}()
  7078  							}
  7079  							return func() int32 {
  7080  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7081  									return (int32((0x7FFFFFFF)) >> (4))
  7082  								}
  7083  								return func() int32 {
  7084  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7085  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7086  									}
  7087  									return temp32
  7088  								}()
  7089  							}()
  7090  						}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7091  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7092  								return func() int32 {
  7093  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7094  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7095  									}
  7096  									return func() int32 {
  7097  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7098  											return (int32((0x7FFFFFFF)) >> (4))
  7099  										}
  7100  										return temp32
  7101  									}()
  7102  								}()
  7103  							}
  7104  							return func() int32 {
  7105  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7106  									return (int32((0x7FFFFFFF)) >> (4))
  7107  								}
  7108  								return func() int32 {
  7109  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7110  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7111  									}
  7112  									return temp32
  7113  								}()
  7114  							}()
  7115  						}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))) > (-16000) {
  7116  							return int16(-16000)
  7117  						}
  7118  						return func() int16 {
  7119  							if (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7120  								if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7121  									return func() int32 {
  7122  										if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7123  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7124  										}
  7125  										return func() int32 {
  7126  											if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7127  												return (int32((0x7FFFFFFF)) >> (4))
  7128  											}
  7129  											return temp32
  7130  										}()
  7131  									}()
  7132  								}
  7133  								return func() int32 {
  7134  									if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7135  										return (int32((0x7FFFFFFF)) >> (4))
  7136  									}
  7137  									return func() int32 {
  7138  										if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7139  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7140  										}
  7141  										return temp32
  7142  									}()
  7143  								}()
  7144  							}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7145  								if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7146  									return func() int32 {
  7147  										if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7148  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7149  										}
  7150  										return func() int32 {
  7151  											if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7152  												return (int32((0x7FFFFFFF)) >> (4))
  7153  											}
  7154  											return temp32
  7155  										}()
  7156  									}()
  7157  								}
  7158  								return func() int32 {
  7159  									if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7160  										return (int32((0x7FFFFFFF)) >> (4))
  7161  									}
  7162  									return func() int32 {
  7163  										if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7164  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7165  										}
  7166  										return temp32
  7167  									}()
  7168  								}()
  7169  							}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))) < (28000) {
  7170  								return int16(28000)
  7171  							}
  7172  							return (int16(int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7173  								if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7174  									return func() int32 {
  7175  										if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7176  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7177  										}
  7178  										return func() int32 {
  7179  											if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7180  												return (int32((0x7FFFFFFF)) >> (4))
  7181  											}
  7182  											return temp32
  7183  										}()
  7184  									}()
  7185  								}
  7186  								return func() int32 {
  7187  									if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7188  										return (int32((0x7FFFFFFF)) >> (4))
  7189  									}
  7190  									return func() int32 {
  7191  										if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7192  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7193  										}
  7194  										return temp32
  7195  									}()
  7196  								}()
  7197  							}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7198  								if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7199  									return func() int32 {
  7200  										if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7201  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7202  										}
  7203  										return func() int32 {
  7204  											if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7205  												return (int32((0x7FFFFFFF)) >> (4))
  7206  											}
  7207  											return temp32
  7208  										}()
  7209  									}()
  7210  								}
  7211  								return func() int32 {
  7212  									if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7213  										return (int32((0x7FFFFFFF)) >> (4))
  7214  									}
  7215  									return func() int32 {
  7216  										if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7217  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7218  										}
  7219  										return temp32
  7220  									}()
  7221  								}()
  7222  							}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))))
  7223  						}()
  7224  					}()
  7225  				}
  7226  				return func() int16 {
  7227  					if (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7228  						if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7229  							return func() int32 {
  7230  								if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7231  									return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7232  								}
  7233  								return func() int32 {
  7234  									if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7235  										return (int32((0x7FFFFFFF)) >> (4))
  7236  									}
  7237  									return temp32
  7238  								}()
  7239  							}()
  7240  						}
  7241  						return func() int32 {
  7242  							if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7243  								return (int32((0x7FFFFFFF)) >> (4))
  7244  							}
  7245  							return func() int32 {
  7246  								if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7247  									return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7248  								}
  7249  								return temp32
  7250  							}()
  7251  						}()
  7252  					}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7253  						if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7254  							return func() int32 {
  7255  								if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7256  									return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7257  								}
  7258  								return func() int32 {
  7259  									if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7260  										return (int32((0x7FFFFFFF)) >> (4))
  7261  									}
  7262  									return temp32
  7263  								}()
  7264  							}()
  7265  						}
  7266  						return func() int32 {
  7267  							if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7268  								return (int32((0x7FFFFFFF)) >> (4))
  7269  							}
  7270  							return func() int32 {
  7271  								if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7272  									return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7273  								}
  7274  								return temp32
  7275  							}()
  7276  						}()
  7277  					}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))) > (28000) {
  7278  						return int16(28000)
  7279  					}
  7280  					return func() int16 {
  7281  						if (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7282  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7283  								return func() int32 {
  7284  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7285  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7286  									}
  7287  									return func() int32 {
  7288  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7289  											return (int32((0x7FFFFFFF)) >> (4))
  7290  										}
  7291  										return temp32
  7292  									}()
  7293  								}()
  7294  							}
  7295  							return func() int32 {
  7296  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7297  									return (int32((0x7FFFFFFF)) >> (4))
  7298  								}
  7299  								return func() int32 {
  7300  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7301  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7302  									}
  7303  									return temp32
  7304  								}()
  7305  							}()
  7306  						}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7307  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7308  								return func() int32 {
  7309  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7310  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7311  									}
  7312  									return func() int32 {
  7313  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7314  											return (int32((0x7FFFFFFF)) >> (4))
  7315  										}
  7316  										return temp32
  7317  									}()
  7318  								}()
  7319  							}
  7320  							return func() int32 {
  7321  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7322  									return (int32((0x7FFFFFFF)) >> (4))
  7323  								}
  7324  								return func() int32 {
  7325  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7326  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7327  									}
  7328  									return temp32
  7329  								}()
  7330  							}()
  7331  						}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))) < (-16000) {
  7332  							return int16(-16000)
  7333  						}
  7334  						return (int16(int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7335  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7336  								return func() int32 {
  7337  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7338  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7339  									}
  7340  									return func() int32 {
  7341  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7342  											return (int32((0x7FFFFFFF)) >> (4))
  7343  										}
  7344  										return temp32
  7345  									}()
  7346  								}()
  7347  							}
  7348  							return func() int32 {
  7349  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7350  									return (int32((0x7FFFFFFF)) >> (4))
  7351  								}
  7352  								return func() int32 {
  7353  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7354  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7355  									}
  7356  									return temp32
  7357  								}()
  7358  							}()
  7359  						}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7360  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7361  								return func() int32 {
  7362  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7363  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7364  									}
  7365  									return func() int32 {
  7366  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7367  											return (int32((0x7FFFFFFF)) >> (4))
  7368  										}
  7369  										return temp32
  7370  									}()
  7371  								}()
  7372  							}
  7373  							return func() int32 {
  7374  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7375  									return (int32((0x7FFFFFFF)) >> (4))
  7376  								}
  7377  								return func() int32 {
  7378  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7379  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7380  									}
  7381  									return temp32
  7382  								}()
  7383  							}()
  7384  						}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))))
  7385  					}()
  7386  				}()
  7387  			}()
  7388  		}
  7389  		b_Q14_ptr += 2 * (uintptr(5))
  7390  	}
  7391  }
  7392  
  7393  func SKP_Silk_fit_LTP(tls *libc.TLS, LTP_coefs_Q16 uintptr, LTP_coefs_Q14 uintptr) { /* SKP_Silk_find_LTP_FIX.c:233:6: */
  7394  	var i int32
  7395  
  7396  	for i = 0; i < 5; i++ {
  7397  		*(*int16)(unsafe.Pointer(LTP_coefs_Q14 + uintptr(i)*2)) = func() int16 {
  7398  			if (func() int32 {
  7399  				if (2) == 1 {
  7400  					return (((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) & 1))
  7401  				}
  7402  				return ((((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> ((2) - 1)) + 1) >> 1)
  7403  			}()) > 0x7FFF {
  7404  				return int16(0x7FFF)
  7405  			}
  7406  			return func() int16 {
  7407  				if (func() int32 {
  7408  					if (2) == 1 {
  7409  						return (((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) & 1))
  7410  					}
  7411  					return ((((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> ((2) - 1)) + 1) >> 1)
  7412  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
  7413  					return libc.Int16FromInt32(0x8000)
  7414  				}
  7415  				return func() int16 {
  7416  					if (2) == 1 {
  7417  						return (int16(((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) & 1)))
  7418  					}
  7419  					return (int16((((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> ((2) - 1)) + 1) >> 1))
  7420  				}()
  7421  			}()
  7422  		}()
  7423  	}
  7424  }
  7425  
  7426  /***********************************************************************
  7427  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  7428  Redistribution and use in source and binary forms, with or without
  7429  modification, (subject to the limitations in the disclaimer below)
  7430  are permitted provided that the following conditions are met:
  7431  - Redistributions of source code must retain the above copyright notice,
  7432  this list of conditions and the following disclaimer.
  7433  - Redistributions in binary form must reproduce the above copyright
  7434  notice, this list of conditions and the following disclaimer in the
  7435  documentation and/or other materials provided with the distribution.
  7436  - Neither the name of Skype Limited, nor the names of specific
  7437  contributors, may be used to endorse or promote products derived from
  7438  this software without specific prior written permission.
  7439  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  7440  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  7441  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  7442  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  7443  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  7444  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  7445  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  7446  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  7447  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  7448  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  7449  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  7450  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  7451  ***********************************************************************/
  7452  
  7453  /*******************/
  7454  /* Pitch estimator */
  7455  /*******************/
  7456  
  7457  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  7458  
  7459  /* Bandwidth expansion for whitening filter in pitch analysis */
  7460  
  7461  /* Threshold used by pitch estimator for early escape */
  7462  
  7463  /*********************/
  7464  /* Linear prediction */
  7465  /*********************/
  7466  
  7467  /* LPC analysis defines: regularization and bandwidth expansion */
  7468  
  7469  /* LTP analysis defines */
  7470  
  7471  /* LTP quantization settings */
  7472  
  7473  /***********************/
  7474  /* High pass filtering */
  7475  /***********************/
  7476  
  7477  /* Smoothing parameters for low end of pitch frequency range estimation */
  7478  
  7479  /* Min and max values for low end of pitch frequency range estimation */
  7480  
  7481  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  7482  
  7483  /***********/
  7484  /* Various */
  7485  /***********/
  7486  
  7487  /* Required speech activity for counting frame as active */
  7488  
  7489  /* Speech Activity LBRR enable threshold (needs tuning) */
  7490  
  7491  /*************************/
  7492  /* Perceptual parameters */
  7493  /*************************/
  7494  
  7495  /* reduction in coding SNR during low speech activity */
  7496  
  7497  /* factor for reducing quantization noise during voiced speech */
  7498  
  7499  /* factor for reducing quantization noise for unvoiced sparse signals */
  7500  
  7501  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  7502  
  7503  /* warping control */
  7504  
  7505  /* fraction added to first autocorrelation value */
  7506  
  7507  /* noise shaping filter chirp factor */
  7508  
  7509  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  7510  
  7511  /* gain reduction for fricatives */
  7512  
  7513  /* extra harmonic boosting (signal shaping) at low bitrates */
  7514  
  7515  /* extra harmonic boosting (signal shaping) for noisy input signals */
  7516  
  7517  /* harmonic noise shaping */
  7518  
  7519  /* extra harmonic noise shaping for high bitrates or noisy input */
  7520  
  7521  /* parameter for shaping noise towards higher frequencies */
  7522  
  7523  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  7524  
  7525  /* parameter for applying a high-pass tilt to the input signal */
  7526  
  7527  /* parameter for extra high-pass tilt to the input signal at high rates */
  7528  
  7529  /* parameter for reducing noise at the very low frequencies */
  7530  
  7531  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  7532  
  7533  /* noise floor to put a lower limit on the quantization step size */
  7534  
  7535  /* noise floor relative to active speech gain level */
  7536  
  7537  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  7538  
  7539  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  7540  
  7541  /* parameters defining the R/D tradeoff in the residual quantizer */
  7542  
  7543  /* Find pitch lags */
  7544  func SKP_Silk_find_pitch_lags_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, res uintptr, x uintptr) { /* SKP_Silk_find_pitch_lags_FIX.c:32:6: */
  7545  	bp := tls.Alloc(1416)
  7546  	defer tls.Free(1416)
  7547  
  7548  	var psPredSt uintptr = (psEnc + 20708 /* &.sPred */)
  7549  	var buf_len int32
  7550  	var i int32
  7551  	// var scale int32 at bp+1220, 4
  7552  
  7553  	var thrhld_Q15 int32
  7554  	var res_nrg int32
  7555  	var x_buf uintptr
  7556  	var x_buf_ptr uintptr
  7557  	// var Wsig [576]int16 at bp, 1152
  7558  
  7559  	var Wsig_ptr uintptr
  7560  	// var auto_corr [17]int32 at bp+1152, 68
  7561  
  7562  	// var rc_Q15 [16]int16 at bp+1224, 32
  7563  
  7564  	// var A_Q24 [16]int32 at bp+1256, 64
  7565  
  7566  	// var FiltState [16]int32 at bp+1352, 64
  7567  
  7568  	// var A_Q12 [16]int16 at bp+1320, 32
  7569  
  7570  	/******************************************/
  7571  	/* Setup buffer lengths etc based on Fs   */
  7572  	/******************************************/
  7573  	buf_len = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length) << (1)))
  7574  
  7575  	/* Safty check */
  7576  
  7577  	x_buf = (x - uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2)
  7578  
  7579  	/*************************************/
  7580  	/* Estimate LPC AR coefficients      */
  7581  	/*************************************/
  7582  
  7583  	/* Calculate windowed signal */
  7584  
  7585  	/* First LA_LTP samples */
  7586  	x_buf_ptr = ((x_buf + uintptr(buf_len)*2) - uintptr((*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length)*2)
  7587  	Wsig_ptr = bp /* &Wsig[0] */
  7588  	SKP_Silk_apply_sine_window(tls, Wsig_ptr, x_buf_ptr, 1, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch)
  7589  
  7590  	/* Middle un - windowed samples */
  7591  	Wsig_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch))
  7592  	x_buf_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch))
  7593  	libc.Xmemcpy(tls, Wsig_ptr, x_buf_ptr, ((uint64((*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch) << (1)))) * uint64(unsafe.Sizeof(int16(0)))))
  7594  
  7595  	/* Last LA_LTP samples */
  7596  	Wsig_ptr += 2 * (uintptr((*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch) << (1))))
  7597  	x_buf_ptr += 2 * (uintptr((*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch) << (1))))
  7598  	SKP_Silk_apply_sine_window(tls, Wsig_ptr, x_buf_ptr, 2, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch)
  7599  
  7600  	/* Calculate autocorrelation sequence */
  7601  	SKP_Silk_autocorr(tls, bp+1152 /* &auto_corr[0] */, bp+1220 /* &scale */, bp /* &Wsig[0] */, (*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length, ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder + 1))
  7602  
  7603  	/* Add white noise, as fraction of energy */
  7604  	*(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */)) = ((*(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */))) + ((((*(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 16))))) + ((((*(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 16))))) >> 16)))
  7605  
  7606  	/* Calculate the reflection coefficients using schur */
  7607  	res_nrg = SKP_Silk_schur(tls, bp+1224 /* &rc_Q15[0] */, bp+1152 /* &auto_corr[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder)
  7608  
  7609  	/* Prediction gain */
  7610  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FpredGain_Q16 = SKP_DIV32_varQ(tls, *(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */)), SKP_max_int(tls, res_nrg, 1), 16)
  7611  
  7612  	/* Convert reflection coefficients to prediction coefficients */
  7613  	SKP_Silk_k2a(tls, bp+1256 /* &A_Q24[0] */, bp+1224 /* &rc_Q15[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder)
  7614  
  7615  	/* Convert From 32 bit Q24 to 16 bit Q12 coefs */
  7616  	for i = 0; i < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder; i++ {
  7617  		*(*int16)(unsafe.Pointer(bp + 1320 /* &A_Q12[0] */ + uintptr(i)*2)) = func() int16 {
  7618  			if ((*(*int32)(unsafe.Pointer(bp + 1256 /* &A_Q24[0] */ + uintptr(i)*4))) >> (12)) > 0x7FFF {
  7619  				return int16(0x7FFF)
  7620  			}
  7621  			return func() int16 {
  7622  				if ((*(*int32)(unsafe.Pointer(bp + 1256 /* &A_Q24[0] */ + uintptr(i)*4))) >> (12)) < (int32(libc.Int16FromInt32(0x8000))) {
  7623  					return libc.Int16FromInt32(0x8000)
  7624  				}
  7625  				return (int16((*(*int32)(unsafe.Pointer(bp + 1256 /* &A_Q24[0] */ + uintptr(i)*4))) >> (12)))
  7626  			}()
  7627  		}()
  7628  	}
  7629  
  7630  	/* Do BWE */
  7631  	SKP_Silk_bwexpander(tls, bp+1320 /* &A_Q12[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder, SKP_FIX_CONST(tls, 0.99, 16))
  7632  
  7633  	/*****************************************/
  7634  	/* LPC analysis filtering                */
  7635  	/*****************************************/
  7636  	libc.Xmemset(tls, bp+1352 /* &FiltState[0] */, 0, (uint64((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder) * uint64(unsafe.Sizeof(int32(0))))) /* Not really necessary, but Valgrind will complain otherwise */
  7637  	SKP_Silk_MA_Prediction(tls, x_buf, bp+1320 /* &A_Q12[0] */, bp+1352 /* &FiltState[0] */, res, buf_len, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder)
  7638  	libc.Xmemset(tls, res, 0, (uint64((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder) * uint64(unsafe.Sizeof(int16(0)))))
  7639  
  7640  	/* Threshold for pitch estimator */
  7641  	thrhld_Q15 = SKP_FIX_CONST(tls, 0.45, 15)
  7642  	thrhld_Q15 = ((thrhld_Q15) + ((int32(int16(SKP_FIX_CONST(tls, -0.004, 15)))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder)))))
  7643  	thrhld_Q15 = ((thrhld_Q15) + ((int32(int16(SKP_FIX_CONST(tls, -0.1, 7)))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))))
  7644  	thrhld_Q15 = ((thrhld_Q15) + ((int32(int16(SKP_FIX_CONST(tls, 0.15, 15)))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_sigtype)))))
  7645  	thrhld_Q15 = ((thrhld_Q15) + ((((SKP_FIX_CONST(tls, -0.1, 16)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15)))) + ((((SKP_FIX_CONST(tls, -0.1, 16)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15)))) >> 16)))
  7646  	thrhld_Q15 = func() int32 {
  7647  		if (thrhld_Q15) > 0x7FFF {
  7648  			return 0x7FFF
  7649  		}
  7650  		return func() int32 {
  7651  			if (thrhld_Q15) < (int32(libc.Int16FromInt32(0x8000))) {
  7652  				return int32(libc.Int16FromInt32(0x8000))
  7653  			}
  7654  			return thrhld_Q15
  7655  		}()
  7656  	}()
  7657  
  7658  	/*****************************************/
  7659  	/* Call pitch estimator                  */
  7660  	/*****************************************/
  7661  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype = SKP_Silk_pitch_analysis_core(tls, res, psEncCtrl /* &.sCmn */ +108 /* &.pitchL */, (psEncCtrl /* &.sCmn */ /* &.lagIndex */),
  7662  		(psEncCtrl /* &.sCmn */ + 4 /* &.contourIndex */), (psEnc + 22944 /* &.LTPCorr_Q15 */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FprevLag, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationThreshold_Q16,
  7663  		int32(int16(thrhld_Q15)), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationComplexity, 0)
  7664  }
  7665  
  7666  func SKP_Silk_find_pred_coefs_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, res_pitch uintptr) { /* SKP_Silk_find_pred_coefs_FIX.c:31:6: */
  7667  	bp := tls.Alloc(1616)
  7668  	defer tls.Free(1616)
  7669  
  7670  	var i int32
  7671  	// var WLTP [100]int32 at bp+48, 400
  7672  
  7673  	// var invGains_Q16 [4]int32 at bp, 16
  7674  
  7675  	// var local_gains [4]int32 at bp+32, 16
  7676  
  7677  	// var Wght_Q15 [4]int32 at bp+16, 16
  7678  
  7679  	// var NLSF_Q15 [16]int32 at bp+1552, 64
  7680  
  7681  	var x_ptr uintptr
  7682  	var x_pre_ptr uintptr
  7683  	// var LPC_in_pre [544]int16 at bp+464, 1088
  7684  
  7685  	var tmp int32
  7686  	var min_gain_Q16 int32
  7687  	// var LTP_corrs_rshift [4]int32 at bp+448, 16
  7688  
  7689  	/* weighting for weighted least squares */
  7690  	min_gain_Q16 = (int32(0x7FFFFFFF) >> 6)
  7691  	for i = 0; i < 4; i++ {
  7692  		min_gain_Q16 = func() int32 {
  7693  			if (min_gain_Q16) < (*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(i)*4))) {
  7694  				return min_gain_Q16
  7695  			}
  7696  			return *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(i)*4))
  7697  		}()
  7698  	}
  7699  	for i = 0; i < 4; i++ {
  7700  		/* Divide to Q16 */
  7701  
  7702  		/* Invert and normalize gains, and ensure that maximum invGains_Q16 is within range of a 16 bit int */
  7703  		*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)) = SKP_DIV32_varQ(tls, min_gain_Q16, *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(i)*4)), (16 - 2))
  7704  
  7705  		/* Ensure Wght_Q15 a minimum value 1 */
  7706  		*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)) = func() int32 {
  7707  			if (*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))) > (363) {
  7708  				return *(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))
  7709  			}
  7710  			return 363
  7711  		}()
  7712  
  7713  		/* Square the inverted gains */
  7714  
  7715  		tmp = ((((*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)))))) >> 16))
  7716  		*(*int32)(unsafe.Pointer(bp + 16 /* &Wght_Q15[0] */ + uintptr(i)*4)) = ((tmp) >> (1))
  7717  
  7718  		/* Invert the inverted and normalized gains */
  7719  		*(*int32)(unsafe.Pointer(bp + 32 /* &local_gains[0] */ + uintptr(i)*4)) = ((int32(1) << 16) / (*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))))
  7720  	}
  7721  
  7722  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
  7723  		/**********/
  7724  		/* VOICED */
  7725  		/**********/
  7726  
  7727  		/* LTP analysis */
  7728  		SKP_Silk_find_LTP_FIX(tls, psEncCtrl+208 /* &.LTPCoef_Q14 */, bp+48 /* &WLTP[0] */, (psEncCtrl + 616 /* &.LTPredCodGain_Q7 */), res_pitch,
  7729  			(res_pitch + uintptr((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)>>(1)))*2), psEncCtrl /* &.sCmn */ +108 /* &.pitchL */, bp+16, /* &Wght_Q15[0] */
  7730  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length, bp+448 /* &LTP_corrs_rshift[0] */)
  7731  
  7732  		/* Quantize LTP gain parameters */
  7733  		SKP_Silk_quant_LTP_gains_FIX(tls, psEncCtrl+208 /* &.LTPCoef_Q14 */, psEncCtrl /* &.sCmn */ +12 /* &.LTPIndex */, (psEncCtrl /* &.sCmn */ + 8 /* &.PERIndex */),
  7734  			bp+48 /* &WLTP[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLTPQuantLowComplexity)
  7735  
  7736  		/* Control LTP scaling */
  7737  		SKP_Silk_LTP_scale_ctrl_FIX(tls, psEnc, psEncCtrl)
  7738  
  7739  		/* Create LTP residual */
  7740  		SKP_Silk_LTP_analysis_filter_FIX(tls, bp+464 /* &LPC_in_pre[0] */, (((psEnc + 20784 /* &.x_buf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2) - uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)*2),
  7741  			psEncCtrl+208 /* &.LTPCoef_Q14 */, psEncCtrl /* &.sCmn */ +108 /* &.pitchL */, bp /* &invGains_Q16[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
  7742  
  7743  	} else {
  7744  		/************/
  7745  		/* UNVOICED */
  7746  		/************/
  7747  		/* Create signal with prepended subframes, scaled by inverse gains */
  7748  		x_ptr = (((psEnc + 20784 /* &.x_buf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2) - uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)*2)
  7749  		x_pre_ptr = bp + 464 /* &LPC_in_pre[0] */
  7750  		for i = 0; i < 4; i++ {
  7751  			SKP_Silk_scale_copy_vector16(tls, x_pre_ptr, x_ptr, *(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)),
  7752  				((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length + (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder))
  7753  			x_pre_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length + (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder))
  7754  			x_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
  7755  		}
  7756  
  7757  		libc.Xmemset(tls, psEncCtrl+208 /* &.LTPCoef_Q14 */, 0, ((uint64(4 * 5)) * uint64(unsafe.Sizeof(int16(0)))))
  7758  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 = 0
  7759  	}
  7760  
  7761  	/* LPC_in_pre contains the LTP-filtered input for voiced, and the unfiltered input for unvoiced */
  7762  
  7763  	SKP_Silk_find_LPC_FIX(tls, bp+1552 /* &NLSF_Q15[0] */, (psEncCtrl /* &.sCmn */ + 68 /* &.NLSFInterpCoef_Q2 */), psEnc+20708 /* &.sPred */ +12, /* &.prev_NLSFq_Q15 */
  7764  		((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInterpolatedNLSFs * (1 - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset)), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder,
  7765  		bp+464 /* &LPC_in_pre[0] */, ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length + (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder))
  7766  
  7767  	/* Quantize LSFs */
  7768  
  7769  	SKP_Silk_process_NLSFs_FIX(tls, psEnc, psEncCtrl, bp+1552 /* &NLSF_Q15[0] */)
  7770  
  7771  	/* Calculate residual energy using quantized LPC coefficients */
  7772  	SKP_Silk_residual_energy_FIX(tls, psEncCtrl+640 /* &.ResNrg */, psEncCtrl+656 /* &.ResNrgQ */, bp+464 /* &LPC_in_pre[0] */, psEncCtrl+144 /* &.PredCoef_Q12 */, bp+32, /* &local_gains[0] */
  7773  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
  7774  
  7775  	/* Copy to prediction struct for use in next frame for fluctuation reduction */
  7776  	libc.Xmemcpy(tls, psEnc+20708 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, bp+1552 /* &NLSF_Q15[0] */, (uint64((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder) * uint64(unsafe.Sizeof(int32(0)))))
  7777  
  7778  }
  7779  
  7780  /* Gain scalar quantization with hysteresis, uniform on log scale */
  7781  func SKP_Silk_gains_quant(tls *libc.TLS, ind uintptr, gain_Q16 uintptr, prev_ind uintptr, conditional int32) { /* SKP_Silk_gain_quant.c:35:6: */
  7782  	var k int32
  7783  
  7784  	for k = 0; k < 4; k++ {
  7785  		/* Add half of previous quantization error, convert to log scale, scale, floor() */
  7786  		*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) = (((int32(((65536 * (64 - 1)) / (((86 - 6) * 128) / 6))) >> 16) * (int32((int16(SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer(gain_Q16 + uintptr(k)*4))) - (((6 * 128) / 6) + (16 * 128))))))) + (((((65536 * (64 - 1)) / (((86 - 6) * 128) / 6)) & 0x0000FFFF) * (int32((int16(SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer(gain_Q16 + uintptr(k)*4))) - (((6 * 128) / 6) + (16 * 128))))))) >> 16))
  7787  
  7788  		/* Round towards previous quantized gain (hysteresis) */
  7789  		if *(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) < *(*int32)(unsafe.Pointer(prev_ind)) {
  7790  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))++
  7791  		}
  7792  
  7793  		/* Compute delta indices and limit */
  7794  		if (k == 0) && (conditional == 0) {
  7795  			/* Full index */
  7796  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) = func() int32 {
  7797  				if (0) > (64 - 1) {
  7798  					return func() int32 {
  7799  						if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))) > (0) {
  7800  							return 0
  7801  						}
  7802  						return func() int32 {
  7803  							if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))) < (64 - 1) {
  7804  								return (64 - 1)
  7805  							}
  7806  							return *(*int32)(unsafe.Pointer(ind + uintptr(k)*4))
  7807  						}()
  7808  					}()
  7809  				}
  7810  				return func() int32 {
  7811  					if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))) > (64 - 1) {
  7812  						return (64 - 1)
  7813  					}
  7814  					return func() int32 {
  7815  						if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))) < (0) {
  7816  							return 0
  7817  						}
  7818  						return *(*int32)(unsafe.Pointer(ind + uintptr(k)*4))
  7819  					}()
  7820  				}()
  7821  			}()
  7822  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) = SKP_max_int(tls, *(*int32)(unsafe.Pointer(ind + uintptr(k)*4)), (*(*int32)(unsafe.Pointer(prev_ind)) + -4))
  7823  			*(*int32)(unsafe.Pointer(prev_ind)) = *(*int32)(unsafe.Pointer(ind + uintptr(k)*4))
  7824  		} else {
  7825  			/* Delta index */
  7826  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) = func() int32 {
  7827  				if (-4) > (40) {
  7828  					return func() int32 {
  7829  						if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind))) > (-4) {
  7830  							return -4
  7831  						}
  7832  						return func() int32 {
  7833  							if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind))) < (40) {
  7834  								return 40
  7835  							}
  7836  							return (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind)))
  7837  						}()
  7838  					}()
  7839  				}
  7840  				return func() int32 {
  7841  					if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind))) > (40) {
  7842  						return 40
  7843  					}
  7844  					return func() int32 {
  7845  						if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind))) < (-4) {
  7846  							return -4
  7847  						}
  7848  						return (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind)))
  7849  					}()
  7850  				}()
  7851  			}()
  7852  			/* Accumulate deltas */
  7853  			*(*int32)(unsafe.Pointer(prev_ind)) += (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)))
  7854  			/* Shift to make non-negative */
  7855  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) -= (-4)
  7856  		}
  7857  
  7858  		/* Convert to linear scale and scale */
  7859  		*(*int32)(unsafe.Pointer(gain_Q16 + uintptr(k)*4)) = SKP_Silk_log2lin(tls, SKP_min_32(tls, ((((int32(((65536*(((86-6)*128)/6))/(64-1)))>>16)*(int32(int16(*(*int32)(unsafe.Pointer(prev_ind))))))+(((((65536*(((86-6)*128)/6))/(64-1))&0x0000FFFF)*(int32(int16(*(*int32)(unsafe.Pointer(prev_ind))))))>>16))+(((6*128)/6)+(16*128))), 3967)) /* 3968 = 31 in Q7 */
  7860  	}
  7861  }
  7862  
  7863  /* Gains scalar dequantization, uniform on log scale */
  7864  func SKP_Silk_gains_dequant(tls *libc.TLS, gain_Q16 uintptr, ind uintptr, prev_ind uintptr, conditional int32) { /* SKP_Silk_gain_quant.c:74:6: */
  7865  	var k int32
  7866  
  7867  	for k = 0; k < 4; k++ {
  7868  		if (k == 0) && (conditional == 0) {
  7869  			*(*int32)(unsafe.Pointer(prev_ind)) = *(*int32)(unsafe.Pointer(ind + uintptr(k)*4))
  7870  		} else {
  7871  			/* Delta index */
  7872  			*(*int32)(unsafe.Pointer(prev_ind)) += (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) + -4)
  7873  		}
  7874  
  7875  		/* Convert to linear scale and scale */
  7876  		*(*int32)(unsafe.Pointer(gain_Q16 + uintptr(k)*4)) = SKP_Silk_log2lin(tls, SKP_min_32(tls, ((((int32(((65536*(((86-6)*128)/6))/(64-1)))>>16)*(int32(int16(*(*int32)(unsafe.Pointer(prev_ind))))))+(((((65536*(((86-6)*128)/6))/(64-1))&0x0000FFFF)*(int32(int16(*(*int32)(unsafe.Pointer(prev_ind))))))>>16))+(((6*128)/6)+(16*128))), 3967)) /* 3968 = 31 in Q7 */
  7877  	}
  7878  }
  7879  
  7880  /***********************************************************************
  7881  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  7882  Redistribution and use in source and binary forms, with or without
  7883  modification, (subject to the limitations in the disclaimer below)
  7884  are permitted provided that the following conditions are met:
  7885  - Redistributions of source code must retain the above copyright notice,
  7886  this list of conditions and the following disclaimer.
  7887  - Redistributions in binary form must reproduce the above copyright
  7888  notice, this list of conditions and the following disclaimer in the
  7889  documentation and/or other materials provided with the distribution.
  7890  - Neither the name of Skype Limited, nor the names of specific
  7891  contributors, may be used to endorse or promote products derived from
  7892  this software without specific prior written permission.
  7893  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  7894  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  7895  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  7896  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  7897  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  7898  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  7899  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  7900  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  7901  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  7902  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  7903  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  7904  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  7905  ***********************************************************************/
  7906  
  7907  /*******************/
  7908  /* Pitch estimator */
  7909  /*******************/
  7910  
  7911  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  7912  
  7913  /* Bandwidth expansion for whitening filter in pitch analysis */
  7914  
  7915  /* Threshold used by pitch estimator for early escape */
  7916  
  7917  /*********************/
  7918  /* Linear prediction */
  7919  /*********************/
  7920  
  7921  /* LPC analysis defines: regularization and bandwidth expansion */
  7922  
  7923  /* LTP analysis defines */
  7924  
  7925  /* LTP quantization settings */
  7926  
  7927  /***********************/
  7928  /* High pass filtering */
  7929  /***********************/
  7930  
  7931  /* Smoothing parameters for low end of pitch frequency range estimation */
  7932  
  7933  /* Min and max values for low end of pitch frequency range estimation */
  7934  
  7935  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  7936  
  7937  /***********/
  7938  /* Various */
  7939  /***********/
  7940  
  7941  /* Required speech activity for counting frame as active */
  7942  
  7943  /* Speech Activity LBRR enable threshold (needs tuning) */
  7944  
  7945  /*************************/
  7946  /* Perceptual parameters */
  7947  /*************************/
  7948  
  7949  /* reduction in coding SNR during low speech activity */
  7950  
  7951  /* factor for reducing quantization noise during voiced speech */
  7952  
  7953  /* factor for reducing quantization noise for unvoiced sparse signals */
  7954  
  7955  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  7956  
  7957  /* warping control */
  7958  
  7959  /* fraction added to first autocorrelation value */
  7960  
  7961  /* noise shaping filter chirp factor */
  7962  
  7963  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  7964  
  7965  /* gain reduction for fricatives */
  7966  
  7967  /* extra harmonic boosting (signal shaping) at low bitrates */
  7968  
  7969  /* extra harmonic boosting (signal shaping) for noisy input signals */
  7970  
  7971  /* harmonic noise shaping */
  7972  
  7973  /* extra harmonic noise shaping for high bitrates or noisy input */
  7974  
  7975  /* parameter for shaping noise towards higher frequencies */
  7976  
  7977  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  7978  
  7979  /* parameter for applying a high-pass tilt to the input signal */
  7980  
  7981  /* parameter for extra high-pass tilt to the input signal at high rates */
  7982  
  7983  /* parameter for reducing noise at the very low frequencies */
  7984  
  7985  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  7986  
  7987  /* noise floor to put a lower limit on the quantization step size */
  7988  
  7989  /* noise floor relative to active speech gain level */
  7990  
  7991  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  7992  
  7993  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  7994  
  7995  /* parameters defining the R/D tradeoff in the residual quantizer */
  7996  
  7997  /* High-pass filter with cutoff frequency adaptation based on pitch lag statistics */
  7998  func SKP_Silk_HP_variable_cutoff_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, out uintptr, in uintptr) { /* SKP_Silk_HP_variable_cutoff_FIX.c:37:6: */
  7999  	bp := tls.Alloc(20)
  8000  	defer tls.Free(20)
  8001  
  8002  	var quality_Q15 int32
  8003  	// var B_Q28 [3]int32 at bp, 12
  8004  
  8005  	// var A_Q28 [2]int32 at bp+12, 8
  8006  
  8007  	var Fc_Q19 int32
  8008  	var r_Q28 int32
  8009  	var r_Q22 int32
  8010  	var pitch_freq_Hz_Q16 int32
  8011  	var pitch_freq_log_Q7 int32
  8012  	var delta_freq_Q7 int32
  8013  
  8014  	/*********************************************/
  8015  	/* Estimate Low End of Pitch Frequency Range */
  8016  	/*********************************************/
  8017  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_sigtype == 0 {
  8018  		/* difference, in log domain */
  8019  		pitch_freq_Hz_Q16 = (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz) * (1000)) << (16)) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FprevLag))
  8020  		pitch_freq_log_Q7 = (SKP_Silk_lin2log(tls, pitch_freq_Hz_Q16) - (int32(16) << 7)) //0x70
  8021  
  8022  		/* adjustment based on quality */
  8023  		quality_Q15 = *(*int32)(unsafe.Pointer((psEncCtrl + 620 /* &.input_quality_bands_Q15 */)))
  8024  		pitch_freq_log_Q7 = ((pitch_freq_log_Q7) - ((((((((quality_Q15) << (2)) >> 16) * (int32(int16(quality_Q15)))) + (((((quality_Q15) << (2)) & 0x0000FFFF) * (int32(int16(quality_Q15)))) >> 16)) >> 16) * (int32((int16(pitch_freq_log_Q7 - 809))))) + ((((((((quality_Q15) << (2)) >> 16) * (int32(int16(quality_Q15)))) + (((((quality_Q15) << (2)) & 0x0000FFFF) * (int32(int16(quality_Q15)))) >> 16)) & 0x0000FFFF) * (int32((int16(pitch_freq_log_Q7 - 809))))) >> 16)))
  8025  		pitch_freq_log_Q7 = ((pitch_freq_log_Q7) + ((SKP_FIX_CONST(tls, 0.6, 15) - quality_Q15) >> (9)))
  8026  
  8027  		//delta_freq = pitch_freq_log - psEnc->variable_HP_smth1;
  8028  		delta_freq_Q7 = (pitch_freq_log_Q7 - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15) >> (8)))
  8029  		if delta_freq_Q7 < 0 {
  8030  			/* less smoothing for decreasing pitch frequency, to track something close to the minimum */
  8031  			delta_freq_Q7 = ((delta_freq_Q7) * (3))
  8032  		}
  8033  
  8034  		/* limit delta, to reduce impact of outliers */
  8035  		delta_freq_Q7 = func() int32 {
  8036  			if (-SKP_FIX_CONST(tls, 0.4, 7)) > (SKP_FIX_CONST(tls, 0.4, 7)) {
  8037  				return func() int32 {
  8038  					if (delta_freq_Q7) > (-SKP_FIX_CONST(tls, 0.4, 7)) {
  8039  						return -SKP_FIX_CONST(tls, 0.4, 7)
  8040  					}
  8041  					return func() int32 {
  8042  						if (delta_freq_Q7) < (SKP_FIX_CONST(tls, 0.4, 7)) {
  8043  							return SKP_FIX_CONST(tls, 0.4, 7)
  8044  						}
  8045  						return delta_freq_Q7
  8046  					}()
  8047  				}()
  8048  			}
  8049  			return func() int32 {
  8050  				if (delta_freq_Q7) > (SKP_FIX_CONST(tls, 0.4, 7)) {
  8051  					return SKP_FIX_CONST(tls, 0.4, 7)
  8052  				}
  8053  				return func() int32 {
  8054  					if (delta_freq_Q7) < (-SKP_FIX_CONST(tls, 0.4, 7)) {
  8055  						return -SKP_FIX_CONST(tls, 0.4, 7)
  8056  					}
  8057  					return delta_freq_Q7
  8058  				}()
  8059  			}()
  8060  		}()
  8061  
  8062  		/* update smoother */
  8063  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15 = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15) + (((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8) << (1)) * (delta_freq_Q7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + (((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8) << (1)) * (delta_freq_Q7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16)))
  8064  	}
  8065  	/* second smoother */
  8066  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15 = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15) + (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15 - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.015, 16))))) + (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15 - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.015, 16))))) >> 16)))
  8067  
  8068  	/* convert from log scale to Hertz */
  8069  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz = SKP_Silk_log2lin(tls, (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15) >> (8)))
  8070  
  8071  	/* limit frequency range */
  8072  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz = func() int32 {
  8073  		if (SKP_FIX_CONST(tls, 80.0, 0)) > (SKP_FIX_CONST(tls, 150.0, 0)) {
  8074  			return func() int32 {
  8075  				if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz) > (SKP_FIX_CONST(tls, 80.0, 0)) {
  8076  					return SKP_FIX_CONST(tls, 80.0, 0)
  8077  				}
  8078  				return func() int32 {
  8079  					if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz) < (SKP_FIX_CONST(tls, 150.0, 0)) {
  8080  						return SKP_FIX_CONST(tls, 150.0, 0)
  8081  					}
  8082  					return (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz
  8083  				}()
  8084  			}()
  8085  		}
  8086  		return func() int32 {
  8087  			if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz) > (SKP_FIX_CONST(tls, 150.0, 0)) {
  8088  				return SKP_FIX_CONST(tls, 150.0, 0)
  8089  			}
  8090  			return func() int32 {
  8091  				if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz) < (SKP_FIX_CONST(tls, 80.0, 0)) {
  8092  					return SKP_FIX_CONST(tls, 80.0, 0)
  8093  				}
  8094  				return (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz
  8095  			}()
  8096  		}()
  8097  	}()
  8098  
  8099  	/********************************/
  8100  	/* Compute Filter Coefficients  */
  8101  	/********************************/
  8102  	/* compute cut-off frequency, in radians */
  8103  	//Fc_num   = (SKP_float)( 0.45f * 2.0f * 3.14159265359 * psEncCtrl->pitch_freq_low_Hz );
  8104  	//Fc_denom = (SKP_float)( 1e3f * psEnc->sCmn.fs_kHz );
  8105  
  8106  	Fc_Q19 = (((int32(int16(1482))) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz)))) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz)) // range: 3704 - 27787, 11-15 bits
  8107  
  8108  	r_Q28 = (SKP_FIX_CONST(tls, 1.0, 28) - ((SKP_FIX_CONST(tls, 0.92, 9)) * (Fc_Q19)))
  8109  
  8110  	/* b = r * [ 1; -2; 1 ]; */
  8111  	/* a = [ 1; -2 * r * ( 1 - 0.5 * Fc^2 ); r^2 ]; */
  8112  	*(*int32)(unsafe.Pointer(bp /* &B_Q28[0] */)) = r_Q28
  8113  	*(*int32)(unsafe.Pointer(bp /* &B_Q28[0] */ + 1*4)) = ((-r_Q28) << (1))
  8114  	*(*int32)(unsafe.Pointer(bp /* &B_Q28[0] */ + 2*4)) = r_Q28
  8115  
  8116  	// -r * ( 2 - Fc * Fc );
  8117  	r_Q22 = ((r_Q28) >> (6))
  8118  	*(*int32)(unsafe.Pointer(bp + 12 /* &A_Q28[0] */)) = (((((r_Q22) >> 16) * (int32((int16((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8119  		if (16) == 1 {
  8120  			return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8121  		}
  8122  		return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8123  	}()))) - SKP_FIX_CONST(tls, 2.0, 22)))))) + ((((r_Q22) & 0x0000FFFF) * (int32((int16((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8124  		if (16) == 1 {
  8125  			return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8126  		}
  8127  		return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8128  	}()))) - SKP_FIX_CONST(tls, 2.0, 22)))))) >> 16)) + ((r_Q22) * (func() int32 {
  8129  		if (16) == 1 {
  8130  			return ((((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8131  				if (16) == 1 {
  8132  					return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8133  				}
  8134  				return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8135  			}()))) - SKP_FIX_CONST(tls, 2.0, 22)) >> 1) + (((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8136  				if (16) == 1 {
  8137  					return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8138  				}
  8139  				return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8140  			}()))) - SKP_FIX_CONST(tls, 2.0, 22)) & 1))
  8141  		}
  8142  		return (((((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8143  			if (16) == 1 {
  8144  				return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8145  			}
  8146  			return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8147  		}()))) - SKP_FIX_CONST(tls, 2.0, 22)) >> ((16) - 1)) + 1) >> 1)
  8148  	}())))
  8149  	*(*int32)(unsafe.Pointer(bp + 12 /* &A_Q28[0] */ + 1*4)) = (((((r_Q22) >> 16) * (int32(int16(r_Q22)))) + ((((r_Q22) & 0x0000FFFF) * (int32(int16(r_Q22)))) >> 16)) + ((r_Q22) * (func() int32 {
  8150  		if (16) == 1 {
  8151  			return (((r_Q22) >> 1) + ((r_Q22) & 1))
  8152  		}
  8153  		return ((((r_Q22) >> ((16) - 1)) + 1) >> 1)
  8154  	}())))
  8155  
  8156  	/********************************/
  8157  	/* High-Pass Filter             */
  8158  	/********************************/
  8159  	SKP_Silk_biquad_alt(tls, in, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, psEnc /* &.sCmn */ +15008 /* &.In_HP_State */, out, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)
  8160  }
  8161  
  8162  /*********************************/
  8163  /* Initialize Silk Encoder state */
  8164  /*********************************/
  8165  func SKP_Silk_init_encoder_FIX(tls *libc.TLS, psEnc uintptr) int32 { /* SKP_Silk_init_encoder_FIX.c:33:9: */
  8166  	var ret int32 = 0
  8167  	/* Clear the entire encoder state */
  8168  	libc.Xmemset(tls, psEnc, 0, uint64(unsafe.Sizeof(SKP_Silk_encoder_state_FIX{})))
  8169  
  8170  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15 = 200844 /* = SKP_Silk_log2(70)_Q0; */
  8171  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15 = 200844 /* = SKP_Silk_log2(70)_Q0; */
  8172  
  8173  	/* Used to deactivate e.g. LSF interpolation and fluctuation reduction */
  8174  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset = 1
  8175  
  8176  	/* Initialize Silk VAD */
  8177  	ret = ret + (SKP_Silk_VAD_Init(tls, (psEnc /* &.sCmn */ + 15032 /* &.sVAD */)))
  8178  
  8179  	/* Initialize NSQ */
  8180  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ.Fprev_inv_gain_Q16 = 65536
  8181  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ_LBRR.Fprev_inv_gain_Q16 = 65536
  8182  
  8183  	return ret
  8184  }
  8185  
  8186  /* sum= for(i=0;i<len;i++)inVec1[i]*inVec2[i];      ---        inner product    */
  8187  /* Note for ARM asm:                                                            */
  8188  /*        * inVec1 and inVec2 should be at least 2 byte aligned.    (Or defined as short/int16) */
  8189  /*        * len should be positive 16bit integer.                               */
  8190  /*        * only when len>6, memory access can be reduced by half.              */
  8191  
  8192  func SKP_Silk_inner_prod_aligned(tls *libc.TLS, inVec1 uintptr, inVec2 uintptr, len int32) int32 { /* SKP_Silk_inner_prod_aligned.c:43:11: */
  8193  	var i int32
  8194  	var sum int32 = 0
  8195  	for i = 0; i < len; i++ {
  8196  		sum = ((sum) + ((int32(*(*int16)(unsafe.Pointer(inVec1 + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(inVec2 + uintptr(i)*2))))))
  8197  	}
  8198  	return sum
  8199  }
  8200  
  8201  func SKP_Silk_inner_prod16_aligned_64(tls *libc.TLS, inVec1 uintptr, inVec2 uintptr, len int32) int64_t { /* SKP_Silk_inner_prod_aligned.c:57:11: */
  8202  	var i int32
  8203  	var sum int64_t = int64(0)
  8204  	for i = 0; i < len; i++ {
  8205  		sum = ((sum) + (int64_t((int32(*(*int16)(unsafe.Pointer(inVec1 + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(inVec2 + uintptr(i)*2)))))))
  8206  	}
  8207  	return sum
  8208  }
  8209  
  8210  /* Interpolate two vectors */
  8211  func SKP_Silk_interpolate(tls *libc.TLS, xi uintptr, x0 uintptr, x1 uintptr, ifact_Q2 int32, d int32) { /* SKP_Silk_interpolate.c:31:6: */
  8212  	var i int32
  8213  
  8214  	for i = 0; i < d; i++ {
  8215  		*(*int32)(unsafe.Pointer(xi + uintptr(i)*4)) = (*(*int32)(unsafe.Pointer(x0 + uintptr(i)*4)) + (((*(*int32)(unsafe.Pointer(x1 + uintptr(i)*4)) - *(*int32)(unsafe.Pointer(x0 + uintptr(i)*4))) * (ifact_Q2)) >> (2)))
  8216  	}
  8217  }
  8218  
  8219  /* Step up function, converts reflection coefficients to prediction coefficients */
  8220  func SKP_Silk_k2a(tls *libc.TLS, A_Q24 uintptr, rc_Q15 uintptr, order int32) { /* SKP_Silk_k2a.c:40:6: */
  8221  	bp := tls.Alloc(64)
  8222  	defer tls.Free(64)
  8223  
  8224  	var k int32
  8225  	var n int32
  8226  	// var Atmp [16]int32 at bp, 64
  8227  
  8228  	for k = 0; k < order; k++ {
  8229  		for n = 0; n < k; n++ {
  8230  			*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(n)*4)) = *(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4))
  8231  		}
  8232  		for n = 0; n < k; n++ {
  8233  			*(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4)) = ((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4))) + (((((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) << (1)) >> 16) * (int32(*(*int16)(unsafe.Pointer(rc_Q15 + uintptr(k)*2))))) + (((((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) << (1)) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(rc_Q15 + uintptr(k)*2))))) >> 16)))
  8234  		}
  8235  		*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4)) = -((int32(*(*int16)(unsafe.Pointer(rc_Q15 + uintptr(k)*2)))) << (9))
  8236  	}
  8237  }
  8238  
  8239  /* Step up function, converts reflection coefficients to prediction coefficients */
  8240  func SKP_Silk_k2a_Q16(tls *libc.TLS, A_Q24 uintptr, rc_Q16 uintptr, order int32) { /* SKP_Silk_k2a_Q16.c:40:6: */
  8241  	bp := tls.Alloc(64)
  8242  	defer tls.Free(64)
  8243  
  8244  	var k int32
  8245  	var n int32
  8246  	// var Atmp [16]int32 at bp, 64
  8247  
  8248  	for k = 0; k < order; k++ {
  8249  		for n = 0; n < k; n++ {
  8250  			*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(n)*4)) = *(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4))
  8251  		}
  8252  		for n = 0; n < k; n++ {
  8253  			*(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4)) = (((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4))) + ((((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) * (func() int32 {
  8254  				if (16) == 1 {
  8255  					return (((*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4))) & 1))
  8256  				}
  8257  				return ((((*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4))) >> ((16) - 1)) + 1) >> 1)
  8258  			}())))
  8259  		}
  8260  		*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4)) = -((*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4))) << (8))
  8261  	}
  8262  }
  8263  
  8264  /* Resets LBRR buffer, used if packet size changes */
  8265  func SKP_Silk_LBRR_reset(tls *libc.TLS, psEncC uintptr) { /* SKP_Silk_LBRR_reset.c:31:6: */
  8266  	var i int32
  8267  
  8268  	for i = 0; i < 2; i++ {
  8269  		(*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEncC + 16264 /* &.LBRR_buffer */) + uintptr(i)*1032)).Fusage = 0
  8270  	}
  8271  }
  8272  
  8273  /* Approximation of 128 * log2() (very close inverse of approx 2^() below) */
  8274  /* Convert input to a log scale    */
  8275  func SKP_Silk_lin2log(tls *libc.TLS, inLin int32) int32 { /* SKP_Silk_lin2log.c:40:11: */
  8276  	bp := tls.Alloc(8)
  8277  	defer tls.Free(8)
  8278  
  8279  	// var lz int32 at bp, 4
  8280  
  8281  	// var frac_Q7 int32 at bp+4, 4
  8282  
  8283  	SKP_Silk_CLZ_FRAC(tls, inLin, bp /* &lz */, bp+4 /* &frac_Q7 */)
  8284  
  8285  	/* Piece-wise parabolic approximation */
  8286  	return (((31 - *(*int32)(unsafe.Pointer(bp /* lz */))) << (7)) + ((*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */))) + (((((*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */))) * (128 - *(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */)))) >> 16) * (int32(int16(179)))) + (((((*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */))) * (128 - *(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */)))) & 0x0000FFFF) * (int32(int16(179)))) >> 16))))
  8287  }
  8288  
  8289  /* Approximation of 2^() (very close inverse of SKP_Silk_lin2log()) */
  8290  /* Convert input to a linear scale    */
  8291  func SKP_Silk_log2lin(tls *libc.TLS, inLog_Q7 int32) int32 { /* SKP_Silk_log2lin.c:40:11: */
  8292  	var out int32
  8293  	var frac_Q7 int32
  8294  
  8295  	if inLog_Q7 < 0 {
  8296  		return 0
  8297  	} else if inLog_Q7 >= (int32(31) << 7) {
  8298  		/* Saturate, and prevent wrap-around */
  8299  		return 0x7FFFFFFF
  8300  	}
  8301  
  8302  	out = (int32((1)) << ((inLog_Q7) >> (7)))
  8303  	frac_Q7 = (inLog_Q7 & 0x7F)
  8304  	if inLog_Q7 < 2048 {
  8305  		/* Piece-wise parabolic approximation */
  8306  		out = ((out) + (((out) * ((frac_Q7) + (((((frac_Q7) * (128 - frac_Q7)) >> 16) * (int32(int16(-174)))) + (((((frac_Q7) * (128 - frac_Q7)) & 0x0000FFFF) * (int32(int16(-174)))) >> 16)))) >> (7)))
  8307  	} else {
  8308  		/* Piece-wise parabolic approximation */
  8309  		out = ((out) + (((out) >> (7)) * ((frac_Q7) + (((((frac_Q7) * (128 - frac_Q7)) >> 16) * (int32(int16(-174)))) + (((((frac_Q7) * (128 - frac_Q7)) & 0x0000FFFF) * (int32(int16(-174)))) >> 16)))))
  8310  	}
  8311  	return out
  8312  }
  8313  
  8314  /* Compute inverse of LPC prediction gain, and                          */
  8315  /* test if LPC coefficients are stable (all poles within unit circle)   */
  8316  func LPC_inverse_pred_gain_QA(tls *libc.TLS, invGain_Q30 uintptr, A_QA uintptr, order int32) int32 { /* SKP_Silk_LPC_inv_pred_gain.c:43:16: */
  8317  	var k int32
  8318  	var n int32
  8319  	var headrm int32
  8320  	var rc_Q31 int32
  8321  	var rc_mult1_Q30 int32
  8322  	var rc_mult2_Q16 int32
  8323  	var tmp_QA int32
  8324  	var Aold_QA uintptr
  8325  	var Anew_QA uintptr
  8326  
  8327  	Anew_QA = A_QA + uintptr((order&1))*64
  8328  
  8329  	*(*int32)(unsafe.Pointer(invGain_Q30)) = (int32(1) << 30)
  8330  	for k = (order - 1); k > 0; k-- {
  8331  		/* Check for stability */
  8332  		if (*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4)) > SKP_FIX_CONST(tls, 0.99975, 16)) || (*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4)) < -SKP_FIX_CONST(tls, 0.99975, 16)) {
  8333  			return 1
  8334  		}
  8335  
  8336  		/* Set RC equal to negated AR coef */
  8337  		rc_Q31 = -((*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4))) << (31 - 16))
  8338  
  8339  		/* rc_mult1_Q30 range: [ 1 : 2^30-1 ] */
  8340  		rc_mult1_Q30 = ((int32(0x7FFFFFFF) >> 1) - (int32(((int64_t(rc_Q31)) * (int64_t(rc_Q31))) >> (32))))
  8341  		/* reduce A_LIMIT if fails */
  8342  
  8343  		/* rc_mult2_Q16 range: [ 2^16 : SKP_int32_MAX ] */
  8344  		rc_mult2_Q16 = SKP_INVERSE32_varQ(tls, rc_mult1_Q30, 46) /* 16 = 46 - 30 */
  8345  
  8346  		/* Update inverse gain */
  8347  		/* invGain_Q30 range: [ 0 : 2^30 ] */
  8348  		*(*int32)(unsafe.Pointer(invGain_Q30)) = ((int32(((int64_t(*(*int32)(unsafe.Pointer(invGain_Q30)))) * (int64_t(rc_mult1_Q30))) >> (32))) << (2))
  8349  
  8350  		/* Swap pointers */
  8351  		Aold_QA = Anew_QA
  8352  		Anew_QA = A_QA + uintptr((k&1))*64
  8353  
  8354  		/* Update AR coefficient */
  8355  		headrm = (SKP_Silk_CLZ32(tls, rc_mult2_Q16) - 1)
  8356  		rc_mult2_Q16 = ((rc_mult2_Q16) << (headrm)) /* Q: 16 + headrm */
  8357  		for n = 0; n < k; n++ {
  8358  			tmp_QA = (*(*int32)(unsafe.Pointer(Aold_QA + uintptr(n)*4)) - ((int32(((int64_t(*(*int32)(unsafe.Pointer(Aold_QA + uintptr(((k-n)-1))*4)))) * (int64_t(rc_Q31))) >> (32))) << (1)))
  8359  			*(*int32)(unsafe.Pointer(Anew_QA + uintptr(n)*4)) = ((int32(((int64_t(tmp_QA)) * (int64_t(rc_mult2_Q16))) >> (32))) << (16 - headrm))
  8360  		}
  8361  	}
  8362  
  8363  	/* Check for stability */
  8364  	if (*(*int32)(unsafe.Pointer(Anew_QA)) > SKP_FIX_CONST(tls, 0.99975, 16)) || (*(*int32)(unsafe.Pointer(Anew_QA)) < -SKP_FIX_CONST(tls, 0.99975, 16)) {
  8365  		return 1
  8366  	}
  8367  
  8368  	/* Set RC equal to negated AR coef */
  8369  	rc_Q31 = -((*(*int32)(unsafe.Pointer(Anew_QA))) << (31 - 16))
  8370  
  8371  	/* Range: [ 1 : 2^30 ] */
  8372  	rc_mult1_Q30 = ((int32(0x7FFFFFFF) >> 1) - (int32(((int64_t(rc_Q31)) * (int64_t(rc_Q31))) >> (32))))
  8373  
  8374  	/* Update inverse gain */
  8375  	/* Range: [ 0 : 2^30 ] */
  8376  	*(*int32)(unsafe.Pointer(invGain_Q30)) = ((int32(((int64_t(*(*int32)(unsafe.Pointer(invGain_Q30)))) * (int64_t(rc_mult1_Q30))) >> (32))) << (2))
  8377  
  8378  	return 0
  8379  }
  8380  
  8381  /* For input in Q12 domain */
  8382  func SKP_Silk_LPC_inverse_pred_gain(tls *libc.TLS, invGain_Q30 uintptr, A_Q12 uintptr, order int32) int32 { /* SKP_Silk_LPC_inv_pred_gain.c:113:9: */
  8383  	bp := tls.Alloc(128)
  8384  	defer tls.Free(128)
  8385  
  8386  	var k int32
  8387  	// var Atmp_QA [2][16]int32 at bp, 128
  8388  
  8389  	var Anew_QA uintptr
  8390  
  8391  	Anew_QA = (bp /* &Atmp_QA[0] */ + uintptr((order&1))*64)
  8392  
  8393  	/* Increase Q domain of the AR coefficients */
  8394  	for k = 0; k < order; k++ {
  8395  		*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4)) = ((int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr(k)*2)))) << (16 - 12))
  8396  	}
  8397  
  8398  	return LPC_inverse_pred_gain_QA(tls, invGain_Q30, bp /* &Atmp_QA[0] */, order)
  8399  }
  8400  
  8401  /* For input in Q24 domain */
  8402  func SKP_Silk_LPC_inverse_pred_gain_Q24(tls *libc.TLS, invGain_Q30 uintptr, A_Q24 uintptr, order int32) int32 { /* SKP_Silk_LPC_inv_pred_gain.c:134:9: */
  8403  	bp := tls.Alloc(128)
  8404  	defer tls.Free(128)
  8405  
  8406  	var k int32
  8407  	// var Atmp_QA [2][16]int32 at bp, 128
  8408  
  8409  	var Anew_QA uintptr
  8410  
  8411  	Anew_QA = (bp /* &Atmp_QA[0] */ + uintptr((order&1))*64)
  8412  
  8413  	/* Increase Q domain of the AR coefficients */
  8414  	for k = 0; k < order; k++ {
  8415  		*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4)) = func() int32 {
  8416  			if (24 - 16) == 1 {
  8417  				return (((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4))) & 1))
  8418  			}
  8419  			return ((((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4))) >> ((24 - 16) - 1)) + 1) >> 1)
  8420  		}()
  8421  	}
  8422  
  8423  	return LPC_inverse_pred_gain_QA(tls, invGain_Q30, bp /* &Atmp_QA[0] */, order)
  8424  }
  8425  
  8426  /* even order AR filter */
  8427  func SKP_Silk_LPC_synthesis_filter(tls *libc.TLS, in uintptr, A_Q12 uintptr, Gain_Q26 int32, S uintptr, out uintptr, len int32, Order int32) { /* SKP_Silk_LPC_synthesis_filter.c:37:6: */
  8428  	var k int32
  8429  	var j int32
  8430  	var idx int32
  8431  	var Order_half int32 = ((Order) >> (1))
  8432  	var SA int32
  8433  	var SB int32
  8434  	var out32_Q10 int32
  8435  	var out32 int32
  8436  
  8437  	/* Order must be even */
  8438  
  8439  	/* S[] values are in Q14 */
  8440  	for k = 0; k < len; k++ {
  8441  		SA = *(*int32)(unsafe.Pointer(S + uintptr((Order-1))*4))
  8442  		out32_Q10 = 0
  8443  		for j = 0; j < (Order_half - 1); j++ {
  8444  			idx = (((int32(int16(2))) * (int32(int16(j)))) + 1)
  8445  			SB = *(*int32)(unsafe.Pointer(S + uintptr(((Order-1)-idx))*4))
  8446  			*(*int32)(unsafe.Pointer(S + uintptr(((Order-1)-idx))*4)) = SA
  8447  			out32_Q10 = ((out32_Q10) + ((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((j<<1))*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((j<<1))*2))))) >> 16)))
  8448  			out32_Q10 = ((out32_Q10) + ((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr(((j<<1)+1))*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr(((j<<1)+1))*2))))) >> 16)))
  8449  			SA = *(*int32)(unsafe.Pointer(S + uintptr(((Order-2)-idx))*4))
  8450  			*(*int32)(unsafe.Pointer(S + uintptr(((Order-2)-idx))*4)) = SB
  8451  		}
  8452  
  8453  		/* unrolled loop: epilog */
  8454  		SB = *(*int32)(unsafe.Pointer(S))
  8455  		*(*int32)(unsafe.Pointer(S)) = SA
  8456  		out32_Q10 = ((out32_Q10) + ((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((Order-2))*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((Order-2))*2))))) >> 16)))
  8457  		out32_Q10 = ((out32_Q10) + ((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((Order-1))*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((Order-1))*2))))) >> 16)))
  8458  		/* apply gain to excitation signal and add to prediction */
  8459  		out32_Q10 = func() int32 {
  8460  			if ((uint32((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) == uint32(0) {
  8461  				return func() int32 {
  8462  					if ((uint32((out32_Q10) & ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) != uint32(0) {
  8463  						return libc.Int32FromUint32(0x80000000)
  8464  					}
  8465  					return ((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))
  8466  				}()
  8467  			}
  8468  			return func() int32 {
  8469  				if ((uint32((out32_Q10) | ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) == uint32(0) {
  8470  					return 0x7FFFFFFF
  8471  				}
  8472  				return ((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))
  8473  			}()
  8474  		}()
  8475  
  8476  		/* scale to Q0 */
  8477  		out32 = func() int32 {
  8478  			if (10) == 1 {
  8479  				return (((out32_Q10) >> 1) + ((out32_Q10) & 1))
  8480  			}
  8481  			return ((((out32_Q10) >> ((10) - 1)) + 1) >> 1)
  8482  		}()
  8483  
  8484  		/* saturate output */
  8485  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  8486  			if (out32) > 0x7FFF {
  8487  				return int16(0x7FFF)
  8488  			}
  8489  			return func() int16 {
  8490  				if (out32) < (int32(libc.Int16FromInt32(0x8000))) {
  8491  					return libc.Int16FromInt32(0x8000)
  8492  				}
  8493  				return int16(out32)
  8494  			}()
  8495  		}()
  8496  
  8497  		/* move result into delay line */
  8498  		*(*int32)(unsafe.Pointer(S + uintptr((Order-1))*4)) = ((func() int32 {
  8499  			if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  8500  				return func() int32 {
  8501  					if (out32_Q10) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  8502  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  8503  					}
  8504  					return func() int32 {
  8505  						if (out32_Q10) < (int32((0x7FFFFFFF)) >> (4)) {
  8506  							return (int32((0x7FFFFFFF)) >> (4))
  8507  						}
  8508  						return out32_Q10
  8509  					}()
  8510  				}()
  8511  			}
  8512  			return func() int32 {
  8513  				if (out32_Q10) > (int32((0x7FFFFFFF)) >> (4)) {
  8514  					return (int32((0x7FFFFFFF)) >> (4))
  8515  				}
  8516  				return func() int32 {
  8517  					if (out32_Q10) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  8518  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  8519  					}
  8520  					return out32_Q10
  8521  				}()
  8522  			}()
  8523  		}()) << (4))
  8524  	}
  8525  }
  8526  
  8527  /* 16th order AR filter */
  8528  func SKP_Silk_LPC_synthesis_order16(tls *libc.TLS, in uintptr, A_Q12 uintptr, Gain_Q26 int32, S uintptr, out uintptr, len int32) { /* SKP_Silk_LPC_synthesis_order16.c:37:6: */
  8529  	var k int32
  8530  	var SA int32
  8531  	var SB int32
  8532  	var out32_Q10 int32
  8533  	var out32 int32
  8534  	for k = 0; k < len; k++ {
  8535  		/* unrolled loop: prolog */
  8536  		/* multiply-add two prediction coefficients per iteration */
  8537  		SA = *(*int32)(unsafe.Pointer(S + 15*4))
  8538  		SB = *(*int32)(unsafe.Pointer(S + 14*4))
  8539  		*(*int32)(unsafe.Pointer(S + 14*4)) = SA
  8540  		out32_Q10 = ((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12))))) >> 16))
  8541  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 1*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 1*2))))) >> 16)))))
  8542  		SA = *(*int32)(unsafe.Pointer(S + 13*4))
  8543  		*(*int32)(unsafe.Pointer(S + 13*4)) = SB
  8544  
  8545  		/* unrolled loop: main loop */
  8546  		SB = *(*int32)(unsafe.Pointer(S + 12*4))
  8547  		*(*int32)(unsafe.Pointer(S + 12*4)) = SA
  8548  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 2*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 2*2))))) >> 16)))))
  8549  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 3*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 3*2))))) >> 16)))))
  8550  		SA = *(*int32)(unsafe.Pointer(S + 11*4))
  8551  		*(*int32)(unsafe.Pointer(S + 11*4)) = SB
  8552  
  8553  		SB = *(*int32)(unsafe.Pointer(S + 10*4))
  8554  		*(*int32)(unsafe.Pointer(S + 10*4)) = SA
  8555  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 4*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 4*2))))) >> 16)))))
  8556  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 5*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 5*2))))) >> 16)))))
  8557  		SA = *(*int32)(unsafe.Pointer(S + 9*4))
  8558  		*(*int32)(unsafe.Pointer(S + 9*4)) = SB
  8559  
  8560  		SB = *(*int32)(unsafe.Pointer(S + 8*4))
  8561  		*(*int32)(unsafe.Pointer(S + 8*4)) = SA
  8562  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 6*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 6*2))))) >> 16)))))
  8563  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 7*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 7*2))))) >> 16)))))
  8564  		SA = *(*int32)(unsafe.Pointer(S + 7*4))
  8565  		*(*int32)(unsafe.Pointer(S + 7*4)) = SB
  8566  
  8567  		SB = *(*int32)(unsafe.Pointer(S + 6*4))
  8568  		*(*int32)(unsafe.Pointer(S + 6*4)) = SA
  8569  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 8*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 8*2))))) >> 16)))))
  8570  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 9*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 9*2))))) >> 16)))))
  8571  		SA = *(*int32)(unsafe.Pointer(S + 5*4))
  8572  		*(*int32)(unsafe.Pointer(S + 5*4)) = SB
  8573  
  8574  		SB = *(*int32)(unsafe.Pointer(S + 4*4))
  8575  		*(*int32)(unsafe.Pointer(S + 4*4)) = SA
  8576  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 10*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 10*2))))) >> 16)))))
  8577  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 11*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 11*2))))) >> 16)))))
  8578  		SA = *(*int32)(unsafe.Pointer(S + 3*4))
  8579  		*(*int32)(unsafe.Pointer(S + 3*4)) = SB
  8580  
  8581  		SB = *(*int32)(unsafe.Pointer(S + 2*4))
  8582  		*(*int32)(unsafe.Pointer(S + 2*4)) = SA
  8583  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 12*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 12*2))))) >> 16)))))
  8584  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 13*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 13*2))))) >> 16)))))
  8585  		SA = *(*int32)(unsafe.Pointer(S + 1*4))
  8586  		*(*int32)(unsafe.Pointer(S + 1*4)) = SB
  8587  
  8588  		/* unrolled loop: epilog */
  8589  		SB = *(*int32)(unsafe.Pointer(S))
  8590  		*(*int32)(unsafe.Pointer(S)) = SA
  8591  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 14*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 14*2))))) >> 16)))))
  8592  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 15*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 15*2))))) >> 16)))))
  8593  
  8594  		/* unrolled loop: end */
  8595  		/* apply gain to excitation signal and add to prediction */
  8596  		out32_Q10 = func() int32 {
  8597  			if ((uint32((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) == uint32(0) {
  8598  				return func() int32 {
  8599  					if ((uint32((out32_Q10) & ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) != uint32(0) {
  8600  						return libc.Int32FromUint32(0x80000000)
  8601  					}
  8602  					return ((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))
  8603  				}()
  8604  			}
  8605  			return func() int32 {
  8606  				if ((uint32((out32_Q10) | ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) == uint32(0) {
  8607  					return 0x7FFFFFFF
  8608  				}
  8609  				return ((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))
  8610  			}()
  8611  		}()
  8612  
  8613  		/* scale to Q0 */
  8614  		out32 = func() int32 {
  8615  			if (10) == 1 {
  8616  				return (((out32_Q10) >> 1) + ((out32_Q10) & 1))
  8617  			}
  8618  			return ((((out32_Q10) >> ((10) - 1)) + 1) >> 1)
  8619  		}()
  8620  
  8621  		/* saturate output */
  8622  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  8623  			if (out32) > 0x7FFF {
  8624  				return int16(0x7FFF)
  8625  			}
  8626  			return func() int16 {
  8627  				if (out32) < (int32(libc.Int16FromInt32(0x8000))) {
  8628  					return libc.Int16FromInt32(0x8000)
  8629  				}
  8630  				return int16(out32)
  8631  			}()
  8632  		}()
  8633  
  8634  		/* move result into delay line */
  8635  		*(*int32)(unsafe.Pointer(S + 15*4)) = ((func() int32 {
  8636  			if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  8637  				return func() int32 {
  8638  					if (out32_Q10) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  8639  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  8640  					}
  8641  					return func() int32 {
  8642  						if (out32_Q10) < (int32((0x7FFFFFFF)) >> (4)) {
  8643  							return (int32((0x7FFFFFFF)) >> (4))
  8644  						}
  8645  						return out32_Q10
  8646  					}()
  8647  				}()
  8648  			}
  8649  			return func() int32 {
  8650  				if (out32_Q10) > (int32((0x7FFFFFFF)) >> (4)) {
  8651  					return (int32((0x7FFFFFFF)) >> (4))
  8652  				}
  8653  				return func() int32 {
  8654  					if (out32_Q10) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  8655  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  8656  					}
  8657  					return out32_Q10
  8658  				}()
  8659  			}()
  8660  		}()) << (4))
  8661  	}
  8662  }
  8663  
  8664  /* Helper function, that interpolates the filter taps */
  8665  func SKP_Silk_LP_interpolate_filter_taps(tls *libc.TLS, B_Q28 uintptr, A_Q28 uintptr, ind int32, fac_Q16 int32) { /* SKP_Silk_LP_variable_cutoff.c:40:17: */
  8666  	var nb int32
  8667  	var na int32
  8668  
  8669  	if ind < (5 - 1) {
  8670  		if fac_Q16 > 0 {
  8671  			if fac_Q16 == (func() int32 {
  8672  				if (fac_Q16) > 0x7FFF {
  8673  					return 0x7FFF
  8674  				}
  8675  				return func() int32 {
  8676  					if (fac_Q16) < (int32(libc.Int16FromInt32(0x8000))) {
  8677  						return int32(libc.Int16FromInt32(0x8000))
  8678  					}
  8679  					return fac_Q16
  8680  				}()
  8681  			}()) { /* fac_Q16 is in range of a 16-bit int */
  8682  				/* Piece-wise linear interpolation of B and A */
  8683  				for nb = 0; nb < 3; nb++ {
  8684  					*(*int32)(unsafe.Pointer(B_Q28 + uintptr(nb)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4))) >> 16) * (int32(int16(fac_Q16)))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4))) & 0x0000FFFF) * (int32(int16(fac_Q16)))) >> 16)))
  8685  				}
  8686  				for na = 0; na < 2; na++ {
  8687  					*(*int32)(unsafe.Pointer(A_Q28 + uintptr(na)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4))) >> 16) * (int32(int16(fac_Q16)))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4))) & 0x0000FFFF) * (int32(int16(fac_Q16)))) >> 16)))
  8688  				}
  8689  			} else if fac_Q16 == (int32(1) << 15) { /* Neither fac_Q16 nor ( ( 1 << 16 ) - fac_Q16 ) is in range of a 16-bit int */
  8690  
  8691  				/* Piece-wise linear interpolation of B and A */
  8692  				for nb = 0; nb < 3; nb++ {
  8693  					*(*int32)(unsafe.Pointer(B_Q28 + uintptr(nb)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4)) + *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4))) >> (1))
  8694  				}
  8695  				for na = 0; na < 2; na++ {
  8696  					*(*int32)(unsafe.Pointer(A_Q28 + uintptr(na)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4)) + *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4))) >> (1))
  8697  				}
  8698  			} else { /* ( ( 1 << 16 ) - fac_Q16 ) is in range of a 16-bit int */
  8699  
  8700  				/* Piece-wise linear interpolation of B and A */
  8701  				for nb = 0; nb < 3; nb++ {
  8702  					*(*int32)(unsafe.Pointer(B_Q28 + uintptr(nb)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4))) >> 16) * (int32((int16((int32(1) << 16) - fac_Q16))))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4))) & 0x0000FFFF) * (int32((int16((int32(1) << 16) - fac_Q16))))) >> 16)))
  8703  				}
  8704  				for na = 0; na < 2; na++ {
  8705  					*(*int32)(unsafe.Pointer(A_Q28 + uintptr(na)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4))) >> 16) * (int32((int16((int32(1) << 16) - fac_Q16))))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4))) & 0x0000FFFF) * (int32((int16((int32(1) << 16) - fac_Q16))))) >> 16)))
  8706  				}
  8707  			}
  8708  		} else {
  8709  			libc.Xmemcpy(tls, B_Q28, (uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12), (uint64(3) * uint64(unsafe.Sizeof(int32(0)))))
  8710  			libc.Xmemcpy(tls, A_Q28, (uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8), (uint64(2) * uint64(unsafe.Sizeof(int32(0)))))
  8711  		}
  8712  	} else {
  8713  		libc.Xmemcpy(tls, B_Q28, (uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + 4*12), (uint64(3) * uint64(unsafe.Sizeof(int32(0)))))
  8714  		libc.Xmemcpy(tls, A_Q28, (uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + 4*8), (uint64(2) * uint64(unsafe.Sizeof(int32(0)))))
  8715  	}
  8716  }
  8717  
  8718  /* Low-pass filter with variable cutoff frequency based on  */
  8719  /* piece-wise linear interpolation between elliptic filters */
  8720  /* Start by setting psEncC->transition_frame_no = 1;            */
  8721  /* Deactivate by setting psEncC->transition_frame_no = 0;   */
  8722  func SKP_Silk_LP_variable_cutoff(tls *libc.TLS, psLP uintptr, out uintptr, in uintptr, frame_length int32) { /* SKP_Silk_LP_variable_cutoff.c:115:6: */
  8723  	bp := tls.Alloc(20)
  8724  	defer tls.Free(20)
  8725  
  8726  	// var B_Q28 [3]int32 at bp, 12
  8727  
  8728  	// var A_Q28 [2]int32 at bp+12, 8
  8729  
  8730  	var fac_Q16 int32 = 0
  8731  	var ind int32 = 0
  8732  
  8733  	/* Interpolate filter coefficients if needed */
  8734  	if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no > 0 {
  8735  		if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Fmode == 0 {
  8736  			if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no < (2560 / 20) {
  8737  				/* Calculate index and interpolation factor for interpolation */
  8738  				fac_Q16 = (((*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no) << (16 - 5))
  8739  				ind = ((fac_Q16) >> (16))
  8740  				fac_Q16 = fac_Q16 - ((ind) << (16))
  8741  
  8742  				/* Interpolate filter coefficients */
  8743  				SKP_Silk_LP_interpolate_filter_taps(tls, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, ind, fac_Q16)
  8744  
  8745  				/* Increment transition frame number for next frame */
  8746  				(*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no++
  8747  
  8748  			} else {
  8749  
  8750  				/* End of transition phase */
  8751  				SKP_Silk_LP_interpolate_filter_taps(tls, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, (5 - 1), 0)
  8752  			}
  8753  		} else {
  8754  
  8755  			if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no < (5120 / 20) {
  8756  				/* Calculate index and interpolation factor for interpolation */
  8757  				fac_Q16 = (((5120 / 20) - (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no) << (16 - 6))
  8758  				ind = ((fac_Q16) >> (16))
  8759  				fac_Q16 = fac_Q16 - ((ind) << (16))
  8760  
  8761  				/* Interpolate filter coefficients */
  8762  				SKP_Silk_LP_interpolate_filter_taps(tls, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, ind, fac_Q16)
  8763  
  8764  				/* Increment transition frame number for next frame */
  8765  				(*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no++
  8766  
  8767  			} else {
  8768  
  8769  				/* End of transition phase */
  8770  				SKP_Silk_LP_interpolate_filter_taps(tls, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, 0, 0)
  8771  			}
  8772  		}
  8773  	}
  8774  
  8775  	if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no > 0 {
  8776  		/* ARMA low-pass filtering */
  8777  
  8778  		SKP_Silk_biquad_alt(tls, in, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, psLP /* &.In_LP_State */, out, frame_length)
  8779  	} else {
  8780  		/* Instead of using the filter, copy input directly to output */
  8781  		libc.Xmemcpy(tls, out, in, (uint64(frame_length) * uint64(unsafe.Sizeof(int16(0)))))
  8782  	}
  8783  }
  8784  
  8785  // Q12 values (even)
  8786  var SKP_Silk_LSFCosTab_FIX_Q12 = [129]int32{
  8787  	8192, 8190, 8182, 8170,
  8788  	8152, 8130, 8104, 8072,
  8789  	8034, 7994, 7946, 7896,
  8790  	7840, 7778, 7714, 7644,
  8791  	7568, 7490, 7406, 7318,
  8792  	7226, 7128, 7026, 6922,
  8793  	6812, 6698, 6580, 6458,
  8794  	6332, 6204, 6070, 5934,
  8795  	5792, 5648, 5502, 5352,
  8796  	5198, 5040, 4880, 4718,
  8797  	4552, 4382, 4212, 4038,
  8798  	3862, 3684, 3502, 3320,
  8799  	3136, 2948, 2760, 2570,
  8800  	2378, 2186, 1990, 1794,
  8801  	1598, 1400, 1202, 1002,
  8802  	802, 602, 402, 202,
  8803  	0, -202, -402, -602,
  8804  	-802, -1002, -1202, -1400,
  8805  	-1598, -1794, -1990, -2186,
  8806  	-2378, -2570, -2760, -2948,
  8807  	-3136, -3320, -3502, -3684,
  8808  	-3862, -4038, -4212, -4382,
  8809  	-4552, -4718, -4880, -5040,
  8810  	-5198, -5352, -5502, -5648,
  8811  	-5792, -5934, -6070, -6204,
  8812  	-6332, -6458, -6580, -6698,
  8813  	-6812, -6922, -7026, -7128,
  8814  	-7226, -7318, -7406, -7490,
  8815  	-7568, -7644, -7714, -7778,
  8816  	-7840, -7896, -7946, -7994,
  8817  	-8034, -8072, -8104, -8130,
  8818  	-8152, -8170, -8182, -8190,
  8819  	-8192,
  8820  } /* SKP_Silk_LSF_cos_table.c:31:15 */
  8821  
  8822  func SKP_Silk_LTP_analysis_filter_FIX(tls *libc.TLS, LTP_res uintptr, x uintptr, LTPCoef_Q14 uintptr, pitchL uintptr, invGains_Q16 uintptr, subfr_length int32, pre_length int32) { /* SKP_Silk_LTP_analysis_filter_FIX.c:30:6: */
  8823  	bp := tls.Alloc(10)
  8824  	defer tls.Free(10)
  8825  
  8826  	var x_ptr uintptr
  8827  	var x_lag_ptr uintptr
  8828  	// var Btmp_Q14 [5]int16 at bp, 10
  8829  
  8830  	var LTP_res_ptr uintptr
  8831  	var k int32
  8832  	var i int32
  8833  	var j int32
  8834  	var LTP_est int32
  8835  
  8836  	x_ptr = x
  8837  	LTP_res_ptr = LTP_res
  8838  	for k = 0; k < 4; k++ {
  8839  
  8840  		x_lag_ptr = (x_ptr - uintptr(*(*int32)(unsafe.Pointer(pitchL + uintptr(k)*4)))*2)
  8841  		for i = 0; i < 5; i++ {
  8842  			*(*int16)(unsafe.Pointer(bp /* &Btmp_Q14[0] */ + uintptr(i)*2)) = *(*int16)(unsafe.Pointer(LTPCoef_Q14 + uintptr(((k*5)+i))*2))
  8843  		}
  8844  
  8845  		/* LTP analysis FIR filter */
  8846  		for i = 0; i < (subfr_length + pre_length); i++ {
  8847  			*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2)) = *(*int16)(unsafe.Pointer(x_ptr + uintptr(i)*2))
  8848  
  8849  			/* Long-term prediction */
  8850  			LTP_est = ((int32(*(*int16)(unsafe.Pointer(x_lag_ptr + 2*2)))) * (int32(*(*int16)(unsafe.Pointer(bp /* &Btmp_Q14[0] */)))))
  8851  			for j = 1; j < 5; j++ {
  8852  				LTP_est = (int32((uint32(LTP_est)) + (uint32((int32(*(*int16)(unsafe.Pointer(x_lag_ptr + uintptr(((5/2)-j))*2)))) * (int32(*(*int16)(unsafe.Pointer(bp /* &Btmp_Q14[0] */ + uintptr(j)*2))))))))
  8853  			}
  8854  			LTP_est = func() int32 {
  8855  				if (14) == 1 {
  8856  					return (((LTP_est) >> 1) + ((LTP_est) & 1))
  8857  				}
  8858  				return ((((LTP_est) >> ((14) - 1)) + 1) >> 1)
  8859  			}() // round and -> Q0
  8860  
  8861  			/* Subtract long-term prediction */
  8862  			*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2)) = func() int16 {
  8863  				if (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(i)*2))) - LTP_est) > 0x7FFF {
  8864  					return int16(0x7FFF)
  8865  				}
  8866  				return func() int16 {
  8867  					if (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(i)*2))) - LTP_est) < (int32(libc.Int16FromInt32(0x8000))) {
  8868  						return libc.Int16FromInt32(0x8000)
  8869  					}
  8870  					return (int16(int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(i)*2))) - LTP_est))
  8871  				}()
  8872  			}()
  8873  
  8874  			/* Scale residual */
  8875  			*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2)) = (int16((((*(*int32)(unsafe.Pointer(invGains_Q16 + uintptr(k)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2))))) + ((((*(*int32)(unsafe.Pointer(invGains_Q16 + uintptr(k)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2))))) >> 16)))
  8876  
  8877  			x_lag_ptr += 2
  8878  		}
  8879  
  8880  		/* Update pointers */
  8881  		LTP_res_ptr += 2 * (uintptr(subfr_length + pre_length))
  8882  		x_ptr += 2 * (uintptr(subfr_length))
  8883  	}
  8884  }
  8885  
  8886  /* Table containing trained thresholds for LTP scaling */
  8887  var LTPScaleThresholds_Q15 = [11]int16{
  8888  	int16(31129), int16(26214), int16(16384), int16(13107), int16(9830), int16(6554),
  8889  	int16(4915), int16(3276), int16(2621), int16(2458), int16(0),
  8890  } /* SKP_Silk_LTP_scale_ctrl_FIX.c:33:24 */
  8891  
  8892  func SKP_Silk_LTP_scale_ctrl_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr) { /* SKP_Silk_LTP_scale_ctrl_FIX.c:39:6: */
  8893  	var round_loss int32
  8894  	var frames_per_packet int32
  8895  	var g_out_Q5 int32
  8896  	var g_limit_Q15 int32
  8897  	var thrld1_Q15 int32
  8898  	var thrld2_Q15 int32
  8899  
  8900  	/* 1st order high-pass filter */
  8901  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7 = (SKP_max_int(tls, ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7-(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FprevLTPredCodGain_Q7), 0) +
  8902  		(func() int32 {
  8903  			if (1) == 1 {
  8904  				return ((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> 1) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) & 1))
  8905  			}
  8906  			return (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> ((1) - 1)) + 1) >> 1)
  8907  		}()))
  8908  
  8909  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FprevLTPredCodGain_Q7 = (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7
  8910  
  8911  	/* combine input and filtered input */
  8912  	g_out_Q5 = func() int32 {
  8913  		if (3) == 1 {
  8914  			return ((((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7) >> (1)) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> (1))) >> 1) + (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7) >> (1)) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> (1))) & 1))
  8915  		}
  8916  		return (((((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7) >> (1)) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> (1))) >> ((3) - 1)) + 1) >> 1)
  8917  	}()
  8918  	g_limit_Q15 = SKP_Silk_sigm_Q15(tls, (g_out_Q5 - (int32(3) << 5)))
  8919  
  8920  	/* Default is minimum scaling */
  8921  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = 0
  8922  
  8923  	/* Round the loss measure to whole pct */
  8924  	round_loss = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc
  8925  
  8926  	/* Only scale if first frame in packet 0% */
  8927  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf == 0 {
  8928  
  8929  		frames_per_packet = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms) / (20))
  8930  
  8931  		round_loss = round_loss + (frames_per_packet - 1)
  8932  		thrld1_Q15 = int32(LTPScaleThresholds_Q15[SKP_min_int(tls, round_loss, (11-1))])
  8933  		thrld2_Q15 = int32(LTPScaleThresholds_Q15[SKP_min_int(tls, (round_loss+1), (11-1))])
  8934  
  8935  		if g_limit_Q15 > thrld1_Q15 {
  8936  			/* Maximum scaling */
  8937  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = 2
  8938  		} else if g_limit_Q15 > thrld2_Q15 {
  8939  			/* Medium scaling */
  8940  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = 1
  8941  		}
  8942  	}
  8943  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTP_scale_Q14 = int32(SKP_Silk_LTPScales_table_Q14[(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex])
  8944  }
  8945  
  8946  /* Variable order MA prediction error filter */
  8947  func SKP_Silk_MA_Prediction(tls *libc.TLS, in uintptr, B uintptr, S uintptr, out uintptr, len int32, order int32) { /* SKP_Silk_MA.c:39:6: */
  8948  	var k int32
  8949  	var d int32
  8950  	var in16 int32
  8951  	var out32 int32
  8952  
  8953  	for k = 0; k < len; k++ {
  8954  		in16 = int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))
  8955  		out32 = (((in16) << (12)) - *(*int32)(unsafe.Pointer(S)))
  8956  		out32 = func() int32 {
  8957  			if (12) == 1 {
  8958  				return (((out32) >> 1) + ((out32) & 1))
  8959  			}
  8960  			return ((((out32) >> ((12) - 1)) + 1) >> 1)
  8961  		}()
  8962  
  8963  		for d = 0; d < (order - 1); d++ {
  8964  			*(*int32)(unsafe.Pointer(S + uintptr(d)*4)) = (int32((uint32(*(*int32)(unsafe.Pointer(S + uintptr((d+1))*4)))) + (uint32((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B + uintptr(d)*2))))))))
  8965  		}
  8966  		*(*int32)(unsafe.Pointer(S + uintptr((order-1))*4)) = ((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B + uintptr((order-1))*2)))))
  8967  
  8968  		/* Limit */
  8969  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  8970  			if (out32) > 0x7FFF {
  8971  				return int16(0x7FFF)
  8972  			}
  8973  			return func() int16 {
  8974  				if (out32) < (int32(libc.Int16FromInt32(0x8000))) {
  8975  					return libc.Int16FromInt32(0x8000)
  8976  				}
  8977  				return int16(out32)
  8978  			}()
  8979  		}()
  8980  	}
  8981  }
  8982  
  8983  func SKP_Silk_LPC_analysis_filter(tls *libc.TLS, in uintptr, B uintptr, S uintptr, out uintptr, len int32, Order int32) { /* SKP_Silk_MA.c:67:6: */
  8984  	var k int32
  8985  	var j int32
  8986  	var idx int32
  8987  	var Order_half int32 = ((Order) >> (1))
  8988  	var out32_Q12 int32
  8989  	var out32 int32
  8990  	var SA int16
  8991  	var SB int16
  8992  	/* Order must be even */
  8993  
  8994  	/* S[] values are in Q0 */
  8995  	for k = 0; k < len; k++ {
  8996  		SA = *(*int16)(unsafe.Pointer(S))
  8997  		out32_Q12 = 0
  8998  		for j = 0; j < (Order_half - 1); j++ {
  8999  			idx = (((int32(int16(2))) * (int32(int16(j)))) + 1)
  9000  			/* Multiply-add two prediction coefficients for each loop */
  9001  			SB = *(*int16)(unsafe.Pointer(S + uintptr(idx)*2))
  9002  			*(*int16)(unsafe.Pointer(S + uintptr(idx)*2)) = SA
  9003  			out32_Q12 = ((out32_Q12) + ((int32(SA)) * (int32(*(*int16)(unsafe.Pointer(B + uintptr((idx-1))*2))))))
  9004  			out32_Q12 = ((out32_Q12) + ((int32(SB)) * (int32(*(*int16)(unsafe.Pointer(B + uintptr(idx)*2))))))
  9005  			SA = *(*int16)(unsafe.Pointer(S + uintptr((idx+1))*2))
  9006  			*(*int16)(unsafe.Pointer(S + uintptr((idx+1))*2)) = SB
  9007  		}
  9008  
  9009  		/* Unrolled loop: epilog */
  9010  		SB = *(*int16)(unsafe.Pointer(S + uintptr((Order-1))*2))
  9011  		*(*int16)(unsafe.Pointer(S + uintptr((Order-1))*2)) = SA
  9012  		out32_Q12 = ((out32_Q12) + ((int32(SA)) * (int32(*(*int16)(unsafe.Pointer(B + uintptr((Order-2))*2))))))
  9013  		out32_Q12 = ((out32_Q12) + ((int32(SB)) * (int32(*(*int16)(unsafe.Pointer(B + uintptr((Order-1))*2))))))
  9014  
  9015  		/* Subtract prediction */
  9016  		out32_Q12 = func() int32 {
  9017  			if ((uint32(((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12)) - (out32_Q12))) & 0x80000000) == uint32(0) {
  9018  				return func() int32 {
  9019  					if (((uint32((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12))) & ((uint32(out32_Q12)) ^ 0x80000000)) & 0x80000000) != 0 {
  9020  						return libc.Int32FromUint32(0x80000000)
  9021  					}
  9022  					return (((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12)) - (out32_Q12))
  9023  				}()
  9024  			}
  9025  			return func() int32 {
  9026  				if ((((uint32((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12))) ^ 0x80000000) & (uint32(out32_Q12))) & 0x80000000) != 0 {
  9027  					return 0x7FFFFFFF
  9028  				}
  9029  				return (((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12)) - (out32_Q12))
  9030  			}()
  9031  		}()
  9032  
  9033  		/* Scale to Q0 */
  9034  		out32 = func() int32 {
  9035  			if (12) == 1 {
  9036  				return (((out32_Q12) >> 1) + ((out32_Q12) & 1))
  9037  			}
  9038  			return ((((out32_Q12) >> ((12) - 1)) + 1) >> 1)
  9039  		}()
  9040  
  9041  		/* Saturate output */
  9042  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  9043  			if (out32) > 0x7FFF {
  9044  				return int16(0x7FFF)
  9045  			}
  9046  			return func() int16 {
  9047  				if (out32) < (int32(libc.Int16FromInt32(0x8000))) {
  9048  					return libc.Int16FromInt32(0x8000)
  9049  				}
  9050  				return int16(out32)
  9051  			}()
  9052  		}()
  9053  
  9054  		/* Move input line */
  9055  		*(*int16)(unsafe.Pointer(S)) = *(*int16)(unsafe.Pointer(in + uintptr(k)*2))
  9056  	}
  9057  }
  9058  
  9059  /* helper function for NLSF2A(..) */
  9060  func SKP_Silk_NLSF2A_find_poly(tls *libc.TLS, out uintptr, cLSF uintptr, dd int32) { /* SKP_Silk_NLSF2A.c:37:17: */
  9061  	var k int32
  9062  	var n int32
  9063  	var ftmp int32
  9064  
  9065  	*(*int32)(unsafe.Pointer(out)) = (int32((1)) << (20))
  9066  	*(*int32)(unsafe.Pointer(out + 1*4)) = -*(*int32)(unsafe.Pointer(cLSF))
  9067  	for k = 1; k < dd; k++ {
  9068  		ftmp = *(*int32)(unsafe.Pointer(cLSF + uintptr((2*k))*4)) // Q20
  9069  		*(*int32)(unsafe.Pointer(out + uintptr((k+1))*4)) = (((*(*int32)(unsafe.Pointer(out + uintptr((k-1))*4))) << (1)) - (func() int32 {
  9070  			if (20) == 1 {
  9071  				return (int32((((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr(k)*4))))) >> 1) + (((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr(k)*4))))) & int64(1))))
  9072  			}
  9073  			return (int32(((((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr(k)*4))))) >> ((20) - 1)) + int64(1)) >> 1))
  9074  		}()))
  9075  		for n = k; n > 1; n-- {
  9076  			*(*int32)(unsafe.Pointer(out + uintptr(n)*4)) += (*(*int32)(unsafe.Pointer(out + uintptr((n-2))*4)) - (func() int32 {
  9077  				if (20) == 1 {
  9078  					return (int32((((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr((n-1))*4))))) >> 1) + (((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr((n-1))*4))))) & int64(1))))
  9079  				}
  9080  				return (int32(((((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr((n-1))*4))))) >> ((20) - 1)) + int64(1)) >> 1))
  9081  			}()))
  9082  		}
  9083  		*(*int32)(unsafe.Pointer(out + 1*4)) -= (ftmp)
  9084  	}
  9085  }
  9086  
  9087  /* compute whitening filter coefficients from normalized line spectral frequencies */
  9088  func SKP_Silk_NLSF2A(tls *libc.TLS, a uintptr, NLSF uintptr, d int32) { /* SKP_Silk_NLSF2A.c:59:6: */
  9089  	bp := tls.Alloc(200)
  9090  	defer tls.Free(200)
  9091  
  9092  	var k int32
  9093  	var i int32
  9094  	var dd int32
  9095  	// var cos_LSF_Q20 [16]int32 at bp, 64
  9096  
  9097  	// var P [9]int32 at bp+64, 36
  9098  
  9099  	// var Q [9]int32 at bp+100, 36
  9100  
  9101  	var Ptmp int32
  9102  	var Qtmp int32
  9103  	var f_int int32
  9104  	var f_frac int32
  9105  	var cos_val int32
  9106  	var delta int32
  9107  	// var a_int32 [16]int32 at bp+136, 64
  9108  
  9109  	var maxabs int32
  9110  	var absval int32
  9111  	var idx int32 = 0
  9112  	var sc_Q16 int32
  9113  
  9114  	/* convert LSFs to 2*cos(LSF(i)), using piecewise linear curve from table */
  9115  	for k = 0; k < d; k++ {
  9116  
  9117  		/* f_int on a scale 0-127 (rounded down) */
  9118  		f_int = ((*(*int32)(unsafe.Pointer(NLSF + uintptr(k)*4))) >> (15 - 7))
  9119  
  9120  		/* f_frac, range: 0..255 */
  9121  		f_frac = (*(*int32)(unsafe.Pointer(NLSF + uintptr(k)*4)) - ((f_int) << (15 - 7)))
  9122  
  9123  		/* Read start and end value from table */
  9124  		cos_val = SKP_Silk_LSFCosTab_FIX_Q12[f_int]               /* Q12 */
  9125  		delta = (SKP_Silk_LSFCosTab_FIX_Q12[(f_int+1)] - cos_val) /* Q12, with a range of 0..200 */
  9126  
  9127  		/* Linear interpolation */
  9128  		*(*int32)(unsafe.Pointer(bp /* &cos_LSF_Q20[0] */ + uintptr(k)*4)) = (((cos_val) << (8)) + ((delta) * (f_frac))) /* Q20 */
  9129  	}
  9130  
  9131  	dd = ((d) >> (1))
  9132  
  9133  	/* generate even and odd polynomials using convolution */
  9134  	SKP_Silk_NLSF2A_find_poly(tls, bp+64 /* &P[0] */, (bp /* &cos_LSF_Q20 */), dd)
  9135  	SKP_Silk_NLSF2A_find_poly(tls, bp+100 /* &Q[0] */, (bp /* &cos_LSF_Q20 */ + 1*4), dd)
  9136  
  9137  	/* convert even and odd polynomials to SKP_int32 Q12 filter coefs */
  9138  	for k = 0; k < dd; k++ {
  9139  		Ptmp = (*(*int32)(unsafe.Pointer(bp + 64 /* &P[0] */ + uintptr((k+1))*4)) + *(*int32)(unsafe.Pointer(bp + 64 /* &P[0] */ + uintptr(k)*4)))
  9140  		Qtmp = (*(*int32)(unsafe.Pointer(bp + 100 /* &Q[0] */ + uintptr((k+1))*4)) - *(*int32)(unsafe.Pointer(bp + 100 /* &Q[0] */ + uintptr(k)*4)))
  9141  
  9142  		/* the Ptmp and Qtmp values at this stage need to fit in int32 */
  9143  
  9144  		*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4)) = -func() int32 {
  9145  			if (9) == 1 {
  9146  				return (((Ptmp + Qtmp) >> 1) + ((Ptmp + Qtmp) & 1))
  9147  			}
  9148  			return ((((Ptmp + Qtmp) >> ((9) - 1)) + 1) >> 1)
  9149  		}() /* Q20 -> Q12 */
  9150  		*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(((d-k)-1))*4)) = func() int32 {
  9151  			if (9) == 1 {
  9152  				return (((Qtmp - Ptmp) >> 1) + ((Qtmp - Ptmp) & 1))
  9153  			}
  9154  			return ((((Qtmp - Ptmp) >> ((9) - 1)) + 1) >> 1)
  9155  		}() /* Q20 -> Q12 */
  9156  	}
  9157  
  9158  	/* Limit the maximum absolute value of the prediction coefficients */
  9159  	for i = 0; i < 10; i++ {
  9160  		/* Find maximum absolute value and its index */
  9161  		maxabs = 0
  9162  		for k = 0; k < d; k++ {
  9163  			absval = func() int32 {
  9164  				if (*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))) > 0 {
  9165  					return *(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))
  9166  				}
  9167  				return -*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))
  9168  			}()
  9169  			if absval > maxabs {
  9170  				maxabs = absval
  9171  				idx = k
  9172  			}
  9173  		}
  9174  
  9175  		if maxabs > 0x7FFF {
  9176  			/* Reduce magnitude of prediction coefficients */
  9177  			maxabs = func() int32 {
  9178  				if (maxabs) < (98369) {
  9179  					return maxabs
  9180  				}
  9181  				return 98369
  9182  			}() // ( SKP_int32_MAX / ( 65470 >> 2 ) ) + SKP_int16_MAX = 98369
  9183  			sc_Q16 = (65470 - (((int32(65470) >> 2) * (maxabs - 0x7FFF)) / (((maxabs) * (idx + 1)) >> (2))))
  9184  			SKP_Silk_bwexpander_32(tls, bp+136 /* &a_int32[0] */, d, sc_Q16)
  9185  		} else {
  9186  			break
  9187  		}
  9188  	}
  9189  
  9190  	/* Reached the last iteration */
  9191  	if i == 10 {
  9192  
  9193  		for k = 0; k < d; k++ {
  9194  			*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4)) = func() int32 {
  9195  				if (*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))) > 0x7FFF {
  9196  					return 0x7FFF
  9197  				}
  9198  				return func() int32 {
  9199  					if (*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))) < (int32(libc.Int16FromInt32(0x8000))) {
  9200  						return int32(libc.Int16FromInt32(0x8000))
  9201  					}
  9202  					return *(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))
  9203  				}()
  9204  			}()
  9205  		}
  9206  	}
  9207  
  9208  	/* Return as SKP_int16 Q12 coefficients */
  9209  	for k = 0; k < d; k++ {
  9210  		*(*int16)(unsafe.Pointer(a + uintptr(k)*2)) = int16(*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4)))
  9211  	}
  9212  }
  9213  
  9214  /* Convert NLSF parameters to stable AR prediction filter coefficients */
  9215  func SKP_Silk_NLSF2A_stable(tls *libc.TLS, pAR_Q12 uintptr, pNLSF uintptr, LPC_order int32) { /* SKP_Silk_NLSF2A_stable.c:31:6: */
  9216  	bp := tls.Alloc(4)
  9217  	defer tls.Free(4)
  9218  
  9219  	var i int32
  9220  	// var invGain_Q30 int32 at bp, 4
  9221  
  9222  	SKP_Silk_NLSF2A(tls, pAR_Q12, pNLSF, LPC_order)
  9223  
  9224  	/* Ensure stable LPCs */
  9225  	for i = 0; i < 20; i++ {
  9226  		if SKP_Silk_LPC_inverse_pred_gain(tls, bp /* &invGain_Q30 */, pAR_Q12, LPC_order) == 1 {
  9227  			SKP_Silk_bwexpander(tls, pAR_Q12, LPC_order, (65536 - ((int32((int16(10 + i)))) * (int32(int16(i)))))) /* 10_Q16 = 0.00015 */
  9228  		} else {
  9229  			break
  9230  		}
  9231  	}
  9232  
  9233  	/* Reached the last iteration */
  9234  	if i == 20 {
  9235  
  9236  		for i = 0; i < LPC_order; i++ {
  9237  			*(*int16)(unsafe.Pointer(pAR_Q12 + uintptr(i)*2)) = int16(0)
  9238  		}
  9239  	}
  9240  }
  9241  
  9242  /* NLSF vector decoder */
  9243  func SKP_Silk_NLSF_MSVQ_decode(tls *libc.TLS, pNLSF_Q15 uintptr, psNLSF_CB uintptr, NLSFIndices uintptr, LPC_order int32) { /* SKP_Silk_NLSF_MSVQ_decode.c:31:6: */
  9244  	var pCB_element uintptr
  9245  	var s int32
  9246  	var i int32
  9247  
  9248  	/* Check that each index is within valid range */
  9249  
  9250  	/* Point to the first vector element */
  9251  	pCB_element = ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FCBStages)).FCB_NLSF_Q15 + uintptr(((*(*int32)(unsafe.Pointer(NLSFIndices)))*(LPC_order)))*2)
  9252  
  9253  	/* Initialize with the codebook vector from stage 0 */
  9254  	for i = 0; i < LPC_order; i++ {
  9255  		*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(i)*4)) = int32(*(*int16)(unsafe.Pointer(pCB_element + uintptr(i)*2)))
  9256  	}
  9257  
  9258  	for s = 1; s < (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages; s++ {
  9259  		/* Check that each index is within valid range */
  9260  
  9261  		if LPC_order == 16 {
  9262  			/* Point to the first vector element */
  9263  			pCB_element = ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FCBStages+uintptr(s)*24)).FCB_NLSF_Q15 + uintptr(((*(*int32)(unsafe.Pointer(NLSFIndices + uintptr(s)*4)))<<(4)))*2)
  9264  
  9265  			/* Add the codebook vector from the current stage */
  9266  			*(*int32)(unsafe.Pointer(pNLSF_Q15)) += (int32(*(*int16)(unsafe.Pointer(pCB_element))))
  9267  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 1*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 1*2))))
  9268  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 2*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 2*2))))
  9269  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 3*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 3*2))))
  9270  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 4*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 4*2))))
  9271  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 5*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 5*2))))
  9272  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 6*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 6*2))))
  9273  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 7*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 7*2))))
  9274  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 8*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 8*2))))
  9275  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 9*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 9*2))))
  9276  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 10*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 10*2))))
  9277  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 11*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 11*2))))
  9278  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 12*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 12*2))))
  9279  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 13*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 13*2))))
  9280  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 14*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 14*2))))
  9281  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 15*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 15*2))))
  9282  		} else {
  9283  			/* Point to the first vector element */
  9284  			pCB_element = ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FCBStages+uintptr(s)*24)).FCB_NLSF_Q15 + uintptr(((int32(int16(*(*int32)(unsafe.Pointer(NLSFIndices + uintptr(s)*4)))))*(int32(int16(LPC_order)))))*2)
  9285  
  9286  			/* Add the codebook vector from the current stage */
  9287  			for i = 0; i < LPC_order; i++ {
  9288  				*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(i)*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + uintptr(i)*2))))
  9289  			}
  9290  		}
  9291  	}
  9292  
  9293  	/* NLSF stabilization */
  9294  	SKP_Silk_NLSF_stabilize(tls, pNLSF_Q15, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FNDeltaMin_Q15, LPC_order)
  9295  }
  9296  
  9297  /***********************/
  9298  /* NLSF vector encoder */
  9299  /***********************/
  9300  func SKP_Silk_NLSF_MSVQ_encode_FIX(tls *libc.TLS, NLSFIndices uintptr, pNLSF_Q15 uintptr, psNLSF_CB uintptr, pNLSF_q_Q15_prev uintptr, pW_Q6 uintptr, NLSF_mu_Q15 int32, NLSF_mu_fluc_red_Q16 int32, NLSF_MSVQ_Survivors int32, LPC_order int32, deactivate_fluc_red int32) { /* SKP_Silk_NLSF_MSVQ_encode_FIX.c:33:6: */
  9301  	bp := tls.Alloc(4544)
  9302  	defer tls.Free(4544)
  9303  
  9304  	var i int32
  9305  	var s int32
  9306  	var k int32
  9307  	var cur_survivors int32 = 0
  9308  	var prev_survivors int32
  9309  	var min_survivors int32
  9310  	var input_index int32
  9311  	var cb_index int32
  9312  	var bestIndex int32
  9313  	var rateDistThreshold_Q18 int32
  9314  	var se_Q15 int32
  9315  	var wsse_Q20 int32
  9316  	var bestRateDist_Q20 int32
  9317  	// var pRateDist_Q18 [256]int32 at bp+1088, 1024
  9318  
  9319  	// var pRate_Q5 [16]int32 at bp, 64
  9320  
  9321  	// var pRate_new_Q5 [16]int32 at bp+3200, 64
  9322  
  9323  	// var pTempIndices [16]int32 at bp+2112, 64
  9324  
  9325  	// var pPath [160]int32 at bp+3264, 640
  9326  
  9327  	// var pPath_new [160]int32 at bp+3904, 640
  9328  
  9329  	// var pRes_Q15 [256]int32 at bp+64, 1024
  9330  
  9331  	// var pRes_new_Q15 [256]int32 at bp+2176, 1024
  9332  
  9333  	var pConstInt uintptr
  9334  	var pInt uintptr
  9335  	var pCB_element uintptr
  9336  	var pCurrentCBStage uintptr
  9337  
  9338  	/****************************************************/
  9339  	/* Tree search for the multi-stage vector quantizer */
  9340  	/****************************************************/
  9341  
  9342  	/* Clear accumulated rates */
  9343  	libc.Xmemset(tls, bp /* &pRate_Q5[0] */, 0, (uint64(NLSF_MSVQ_Survivors) * uint64(unsafe.Sizeof(int32(0)))))
  9344  
  9345  	/* Copy NLSFs into residual signal vector */
  9346  	for i = 0; i < LPC_order; i++ {
  9347  		*(*int32)(unsafe.Pointer(bp + 64 /* &pRes_Q15[0] */ + uintptr(i)*4)) = *(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(i)*4))
  9348  	}
  9349  
  9350  	/* Set first stage values */
  9351  	prev_survivors = 1
  9352  
  9353  	/* Minimum number of survivors */
  9354  	min_survivors = (NLSF_MSVQ_Survivors / 2)
  9355  
  9356  	/* Loop over all stages */
  9357  	for s = 0; s < (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages; s++ {
  9358  
  9359  		/* Set a pointer to the current stage codebook */
  9360  		pCurrentCBStage = ((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FCBStages + uintptr(s)*24)
  9361  
  9362  		/* Calculate the number of survivors in the current stage */
  9363  		cur_survivors = SKP_min_32(tls, NLSF_MSVQ_Survivors, ((int32(int16(prev_survivors))) * (int32(int16((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors)))))
  9364  
  9365  		/* Nearest neighbor clustering for multiple input data vectors */
  9366  		SKP_Silk_NLSF_VQ_rate_distortion_FIX(tls, bp+1088 /* &pRateDist_Q18[0] */, pCurrentCBStage, bp+64 /* &pRes_Q15[0] */, pW_Q6,
  9367  			bp /* &pRate_Q5[0] */, NLSF_mu_Q15, prev_survivors, LPC_order)
  9368  
  9369  		/* Sort the rate-distortion errors */
  9370  		SKP_Silk_insertion_sort_increasing(tls, bp+1088 /* &pRateDist_Q18[0] */, bp+2112, /* &pTempIndices[0] */
  9371  			(prev_survivors * (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors), cur_survivors)
  9372  
  9373  		/* Discard survivors with rate-distortion values too far above the best one */
  9374  		if *(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */)) < (0x7FFFFFFF / 16) {
  9375  			rateDistThreshold_Q18 = ((*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */))) + (((((NLSF_MSVQ_Survivors) * (*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */)))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + (((((NLSF_MSVQ_Survivors) * (*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */)))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16)))
  9376  			for (*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */ + uintptr((cur_survivors-1))*4)) > rateDistThreshold_Q18) && (cur_survivors > min_survivors) {
  9377  				cur_survivors--
  9378  			}
  9379  		}
  9380  		/* Update accumulated codebook contributions for the 'cur_survivors' best codebook indices */
  9381  		for k = 0; k < cur_survivors; k++ {
  9382  			if s > 0 {
  9383  				/* Find the indices of the input and the codebook vector */
  9384  				if (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors == 8 {
  9385  					input_index = ((*(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4))) >> (3))
  9386  					cb_index = (*(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4)) & 7)
  9387  				} else {
  9388  					input_index = ((*(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4))) / ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors))
  9389  					cb_index = (*(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4)) - ((int32(int16(input_index))) * (int32(int16((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors)))))
  9390  				}
  9391  			} else {
  9392  				/* Find the indices of the input and the codebook vector */
  9393  				input_index = 0
  9394  				cb_index = *(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4))
  9395  			}
  9396  
  9397  			/* Subtract new contribution from the previous residual vector for each of 'cur_survivors' */
  9398  			pConstInt = (bp + 64 /* &pRes_Q15 */ + uintptr(((int32(int16(input_index)))*(int32(int16(LPC_order)))))*4)
  9399  			pCB_element = ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FCB_NLSF_Q15 + uintptr(((int32(int16(cb_index)))*(int32(int16(LPC_order)))))*2)
  9400  			pInt = (bp + 2176 /* &pRes_new_Q15 */ + uintptr(((int32(int16(k)))*(int32(int16(LPC_order)))))*4)
  9401  			for i = 0; i < LPC_order; i++ {
  9402  				*(*int32)(unsafe.Pointer(pInt + uintptr(i)*4)) = (*(*int32)(unsafe.Pointer(pConstInt + uintptr(i)*4)) - int32(*(*int16)(unsafe.Pointer(pCB_element + uintptr(i)*2))))
  9403  			}
  9404  
  9405  			/* Update accumulated rate for stage 1 to the current */
  9406  			*(*int32)(unsafe.Pointer(bp + 3200 /* &pRate_new_Q5[0] */ + uintptr(k)*4)) = (*(*int32)(unsafe.Pointer(bp /* &pRate_Q5[0] */ + uintptr(input_index)*4)) + int32(*(*int16)(unsafe.Pointer((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FRates_Q5 + uintptr(cb_index)*2))))
  9407  
  9408  			/* Copy paths from previous matrix, starting with the best path */
  9409  			pConstInt = (bp + 3264 /* &pPath */ + uintptr(((int32(int16(input_index)))*(int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)))))*4)
  9410  			pInt = (bp + 3904 /* &pPath_new */ + uintptr(((int32(int16(k)))*(int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)))))*4)
  9411  			for i = 0; i < s; i++ {
  9412  				*(*int32)(unsafe.Pointer(pInt + uintptr(i)*4)) = *(*int32)(unsafe.Pointer(pConstInt + uintptr(i)*4))
  9413  			}
  9414  			/* Write the current stage indices for the 'cur_survivors' to the best path matrix */
  9415  			*(*int32)(unsafe.Pointer(pInt + uintptr(s)*4)) = cb_index
  9416  		}
  9417  
  9418  		if s < ((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages - 1) {
  9419  			/* Copy NLSF residual matrix for next stage */
  9420  			libc.Xmemcpy(tls, bp+64 /* &pRes_Q15[0] */, bp+2176 /* &pRes_new_Q15[0] */, ((uint64((int32(int16(cur_survivors))) * (int32(int16(LPC_order))))) * uint64(unsafe.Sizeof(int32(0)))))
  9421  
  9422  			/* Copy rate vector for next stage */
  9423  			libc.Xmemcpy(tls, bp /* &pRate_Q5[0] */, bp+3200 /* &pRate_new_Q5[0] */, (uint64(cur_survivors) * uint64(unsafe.Sizeof(int32(0)))))
  9424  
  9425  			/* Copy best path matrix for next stage */
  9426  			libc.Xmemcpy(tls, bp+3264 /* &pPath[0] */, bp+3904 /* &pPath_new[0] */, ((uint64((int32(int16(cur_survivors))) * (int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages))))) * uint64(unsafe.Sizeof(int32(0)))))
  9427  		}
  9428  
  9429  		prev_survivors = cur_survivors
  9430  	}
  9431  
  9432  	/* (Preliminary) index of the best survivor, later to be decoded */
  9433  	bestIndex = 0
  9434  
  9435  	/******************************/
  9436  	/* NLSF fluctuation reduction */
  9437  	/******************************/
  9438  	if deactivate_fluc_red != 1 {
  9439  
  9440  		/* Search among all survivors, now taking also weighted fluctuation errors into account */
  9441  		bestRateDist_Q20 = 0x7FFFFFFF
  9442  		for s = 0; s < cur_survivors; s++ {
  9443  			/* Decode survivor to compare with previous quantized NLSF vector */
  9444  			SKP_Silk_NLSF_MSVQ_decode(tls, pNLSF_Q15, psNLSF_CB, (bp + 3904 /* &pPath_new */ + uintptr(((int32(int16(s)))*(int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)))))*4), LPC_order)
  9445  
  9446  			/* Compare decoded NLSF vector with the previously quantized vector */
  9447  			wsse_Q20 = 0
  9448  			for i = 0; i < LPC_order; i = i + (2) {
  9449  				/* Compute weighted squared quantization error for index i */
  9450  				se_Q15 = (*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(i)*4)) - *(*int32)(unsafe.Pointer(pNLSF_q_Q15_prev + uintptr(i)*4))) // range: [ -32767 : 32767 ]
  9451  				wsse_Q20 = ((wsse_Q20) + (((((int32(int16(se_Q15))) * (int32(int16(se_Q15)))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(pW_Q6 + uintptr(i)*4)))))) + (((((int32(int16(se_Q15))) * (int32(int16(se_Q15)))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(pW_Q6 + uintptr(i)*4)))))) >> 16)))
  9452  
  9453  				/* Compute weighted squared quantization error for index i + 1 */
  9454  				se_Q15 = (*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((i+1))*4)) - *(*int32)(unsafe.Pointer(pNLSF_q_Q15_prev + uintptr((i+1))*4))) // range: [ -32767 : 32767 ]
  9455  				wsse_Q20 = ((wsse_Q20) + (((((int32(int16(se_Q15))) * (int32(int16(se_Q15)))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(pW_Q6 + uintptr((i+1))*4)))))) + (((((int32(int16(se_Q15))) * (int32(int16(se_Q15)))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(pW_Q6 + uintptr((i+1))*4)))))) >> 16)))
  9456  			}
  9457  
  9458  			/* Add the fluctuation reduction penalty to the rate distortion error */
  9459  			wsse_Q20 = func() int32 {
  9460  				if ((uint32((*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */ + uintptr(s)*4))) + ((((wsse_Q20) >> 16) * (int32(int16(NLSF_mu_fluc_red_Q16)))) + ((((wsse_Q20) & 0x0000FFFF) * (int32(int16(NLSF_mu_fluc_red_Q16)))) >> 16)))) & 0x80000000) != 0 {
  9461  					return 0x7FFFFFFF
  9462  				}
  9463  				return ((*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */ + uintptr(s)*4))) + ((((wsse_Q20) >> 16) * (int32(int16(NLSF_mu_fluc_red_Q16)))) + ((((wsse_Q20) & 0x0000FFFF) * (int32(int16(NLSF_mu_fluc_red_Q16)))) >> 16)))
  9464  			}()
  9465  
  9466  			/* Keep index of best survivor */
  9467  			if wsse_Q20 < bestRateDist_Q20 {
  9468  				bestRateDist_Q20 = wsse_Q20
  9469  				bestIndex = s
  9470  			}
  9471  		}
  9472  	}
  9473  
  9474  	/* Copy best path to output argument */
  9475  	libc.Xmemcpy(tls, NLSFIndices, (bp + 3904 /* &pPath_new */ + uintptr(((int32(int16(bestIndex)))*(int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)))))*4), (uint64((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages) * uint64(unsafe.Sizeof(int32(0)))))
  9476  
  9477  	/* Decode and stabilize the best survivor */
  9478  	SKP_Silk_NLSF_MSVQ_decode(tls, pNLSF_Q15, psNLSF_CB, NLSFIndices, LPC_order)
  9479  
  9480  }
  9481  
  9482  /* Constant Definitions */
  9483  
  9484  /* NLSF stabilizer, for a single input data vector */
  9485  func SKP_Silk_NLSF_stabilize(tls *libc.TLS, NLSF_Q15 uintptr, NDeltaMin_Q15 uintptr, L int32) { /* SKP_Silk_NLSF_stabilize.c:42:6: */
  9486  	var center_freq_Q15 int32
  9487  	var diff_Q15 int32
  9488  	var min_center_Q15 int32
  9489  	var max_center_Q15 int32
  9490  	var min_diff_Q15 int32
  9491  	var loops int32
  9492  	var i int32
  9493  	var I int32 = 0
  9494  	var k int32
  9495  
  9496  	/* This is necessary to ensure an output within range of a SKP_int16 */
  9497  
  9498  	for loops = 0; loops < 20; loops++ {
  9499  		/**************************/
  9500  		/* Find smallest distance */
  9501  		/**************************/
  9502  		/* First element */
  9503  		min_diff_Q15 = (*(*int32)(unsafe.Pointer(NLSF_Q15)) - *(*int32)(unsafe.Pointer(NDeltaMin_Q15)))
  9504  		I = 0
  9505  		/* Middle elements */
  9506  		for i = 1; i <= (L - 1); i++ {
  9507  			diff_Q15 = (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)) - (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((i-1))*4)) + *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(i)*4))))
  9508  			if diff_Q15 < min_diff_Q15 {
  9509  				min_diff_Q15 = diff_Q15
  9510  				I = i
  9511  			}
  9512  		}
  9513  		/* Last element */
  9514  		diff_Q15 = ((int32(1) << 15) - (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((L-1))*4)) + *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(L)*4))))
  9515  		if diff_Q15 < min_diff_Q15 {
  9516  			min_diff_Q15 = diff_Q15
  9517  			I = L
  9518  		}
  9519  
  9520  		/***************************************************/
  9521  		/* Now check if the smallest distance non-negative */
  9522  		/***************************************************/
  9523  		if min_diff_Q15 >= 0 {
  9524  			return
  9525  		}
  9526  
  9527  		if I == 0 {
  9528  			/* Move away from lower limit */
  9529  			*(*int32)(unsafe.Pointer(NLSF_Q15)) = *(*int32)(unsafe.Pointer(NDeltaMin_Q15))
  9530  
  9531  		} else if I == L {
  9532  			/* Move away from higher limit */
  9533  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((L-1))*4)) = ((int32(1) << 15) - *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(L)*4)))
  9534  
  9535  		} else {
  9536  			/* Find the lower extreme for the location of the current center frequency */
  9537  			min_center_Q15 = 0
  9538  			for k = 0; k < I; k++ {
  9539  				min_center_Q15 = min_center_Q15 + (*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(k)*4)))
  9540  			}
  9541  			min_center_Q15 = min_center_Q15 + ((*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4))) >> (1))
  9542  
  9543  			/* Find the upper extreme for the location of the current center frequency */
  9544  			max_center_Q15 = (int32(1) << 15)
  9545  			for k = L; k > I; k-- {
  9546  				max_center_Q15 = max_center_Q15 - (*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(k)*4)))
  9547  			}
  9548  			max_center_Q15 = max_center_Q15 - (*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4)) - ((*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4))) >> (1)))
  9549  
  9550  			/* Move apart, sorted by value, keeping the same center frequency */
  9551  			center_freq_Q15 = func() int32 {
  9552  				if (min_center_Q15) > (max_center_Q15) {
  9553  					return func() int32 {
  9554  						if (func() int32 {
  9555  							if (1) == 1 {
  9556  								return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9557  							}
  9558  							return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9559  						}()) > (min_center_Q15) {
  9560  							return min_center_Q15
  9561  						}
  9562  						return func() int32 {
  9563  							if (func() int32 {
  9564  								if (1) == 1 {
  9565  									return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9566  								}
  9567  								return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9568  							}()) < (max_center_Q15) {
  9569  								return max_center_Q15
  9570  							}
  9571  							return func() int32 {
  9572  								if (1) == 1 {
  9573  									return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9574  								}
  9575  								return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9576  							}()
  9577  						}()
  9578  					}()
  9579  				}
  9580  				return func() int32 {
  9581  					if (func() int32 {
  9582  						if (1) == 1 {
  9583  							return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9584  						}
  9585  						return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9586  					}()) > (max_center_Q15) {
  9587  						return max_center_Q15
  9588  					}
  9589  					return func() int32 {
  9590  						if (func() int32 {
  9591  							if (1) == 1 {
  9592  								return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9593  							}
  9594  							return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9595  						}()) < (min_center_Q15) {
  9596  							return min_center_Q15
  9597  						}
  9598  						return func() int32 {
  9599  							if (1) == 1 {
  9600  								return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9601  							}
  9602  							return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9603  						}()
  9604  					}()
  9605  				}()
  9606  			}()
  9607  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) = (center_freq_Q15 - ((*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4))) >> (1)))
  9608  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4)) = (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4)))
  9609  		}
  9610  	}
  9611  
  9612  	/* Safe and simple fall back method, which is less ideal than the above */
  9613  	if loops == 20 {
  9614  		/* Insertion sort (fast for already almost sorted arrays):   */
  9615  		/* Best case:  O(n)   for an already sorted array            */
  9616  		/* Worst case: O(n^2) for an inversely sorted array          */
  9617  		SKP_Silk_insertion_sort_increasing_all_values(tls, (NLSF_Q15), L)
  9618  
  9619  		/* First NLSF should be no less than NDeltaMin[0] */
  9620  		*(*int32)(unsafe.Pointer(NLSF_Q15)) = SKP_max_int(tls, *(*int32)(unsafe.Pointer(NLSF_Q15)), *(*int32)(unsafe.Pointer(NDeltaMin_Q15)))
  9621  
  9622  		/* Keep delta_min distance between the NLSFs */
  9623  		for i = 1; i < L; i++ {
  9624  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)) = SKP_max_int(tls, *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)), (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((i-1))*4)) + *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(i)*4))))
  9625  		}
  9626  
  9627  		/* Last NLSF should be no higher than 1 - NDeltaMin[L] */
  9628  		*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((L-1))*4)) = SKP_min_int(tls, *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((L-1))*4)), ((int32(1) << 15) - *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(L)*4))))
  9629  
  9630  		/* Keep NDeltaMin distance between the NLSFs */
  9631  		for i = (L - 2); i >= 0; i-- {
  9632  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)) = SKP_min_int(tls, *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)), (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((i+1))*4)) - *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr((i+1))*4))))
  9633  		}
  9634  	}
  9635  }
  9636  
  9637  /* Rate-Distortion calculations for multiple input data vectors */
  9638  func SKP_Silk_NLSF_VQ_rate_distortion_FIX(tls *libc.TLS, pRD_Q20 uintptr, psNLSF_CBS uintptr, in_Q15 uintptr, w_Q6 uintptr, rate_acc_Q5 uintptr, mu_Q15 int32, N int32, LPC_order int32) { /* SKP_Silk_NLSF_VQ_rate_distortion_FIX.c:31:6: */
  9639  	var i int32
  9640  	var n int32
  9641  	var pRD_vec_Q20 uintptr
  9642  
  9643  	/* Compute weighted quantization errors for all input vectors over one codebook stage */
  9644  	SKP_Silk_NLSF_VQ_sum_error_FIX(tls, pRD_Q20, in_Q15, w_Q6, (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FCB_NLSF_Q15,
  9645  		N, (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FnVectors, LPC_order)
  9646  
  9647  	/* Loop over input vectors */
  9648  	pRD_vec_Q20 = pRD_Q20
  9649  	for n = 0; n < N; n++ {
  9650  		/* Add rate cost to error for each codebook vector */
  9651  		for i = 0; i < (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FnVectors; i++ {
  9652  
  9653  			*(*int32)(unsafe.Pointer(pRD_vec_Q20 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(pRD_vec_Q20 + uintptr(i)*4))) + ((int32((int16(*(*int32)(unsafe.Pointer(rate_acc_Q5 + uintptr(n)*4)) + int32(*(*int16)(unsafe.Pointer((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FRates_Q5 + uintptr(i)*2))))))) * (int32(int16(mu_Q15)))))
  9654  
  9655  		}
  9656  		pRD_vec_Q20 += 4 * (uintptr((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FnVectors))
  9657  	}
  9658  }
  9659  
  9660  /* Compute weighted quantization errors for an LPC_order element input vector, over one codebook stage */
  9661  func SKP_Silk_NLSF_VQ_sum_error_FIX(tls *libc.TLS, err_Q20 uintptr, in_Q15 uintptr, w_Q6 uintptr, pCB_Q15 uintptr, N int32, K int32, LPC_order int32) { /* SKP_Silk_NLSF_VQ_sum_error_FIX.c:32:6: */
  9662  	bp := tls.Alloc(32)
  9663  	defer tls.Free(32)
  9664  
  9665  	var i int32
  9666  	var n int32
  9667  	var m int32
  9668  	var diff_Q15 int32
  9669  	var sum_error int32
  9670  	var Wtmp_Q6 int32
  9671  	// var Wcpy_Q6 [8]int32 at bp, 32
  9672  
  9673  	var cb_vec_Q15 uintptr
  9674  
  9675  	/* Copy to local stack and pack two weights per int32 */
  9676  	for m = 0; m < ((LPC_order) >> (1)); m++ {
  9677  		*(*int32)(unsafe.Pointer(bp /* &Wcpy_Q6[0] */ + uintptr(m)*4)) = (*(*int32)(unsafe.Pointer(w_Q6 + uintptr((2*m))*4)) | ((*(*int32)(unsafe.Pointer(w_Q6 + uintptr(((2*m)+1))*4))) << (16)))
  9678  	}
  9679  
  9680  	/* Loop over input vectors */
  9681  	for n = 0; n < N; n++ {
  9682  		/* Loop over codebook */
  9683  		cb_vec_Q15 = pCB_Q15
  9684  		for i = 0; i < K; i++ {
  9685  			sum_error = 0
  9686  			for m = 0; m < LPC_order; m = m + (2) {
  9687  				/* Get two weights packed in an int32 */
  9688  				Wtmp_Q6 = *(*int32)(unsafe.Pointer(bp /* &Wcpy_Q6[0] */ + uintptr(((m)>>(1)))*4))
  9689  
  9690  				/* Compute weighted squared quantization error for index m */
  9691  				diff_Q15 = (*(*int32)(unsafe.Pointer(in_Q15 + uintptr(m)*4)) - int32(*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&cb_vec_Q15, 2))))) // range: [ -32767 : 32767 ]
  9692  				sum_error = ((sum_error) + (((((int32(int16(diff_Q15))) * (int32(int16(diff_Q15)))) >> 16) * (int32(int16(Wtmp_Q6)))) + (((((int32(int16(diff_Q15))) * (int32(int16(diff_Q15)))) & 0x0000FFFF) * (int32(int16(Wtmp_Q6)))) >> 16)))
  9693  
  9694  				/* Compute weighted squared quantization error for index m + 1 */
  9695  				diff_Q15 = (*(*int32)(unsafe.Pointer(in_Q15 + uintptr((m+1))*4)) - int32(*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&cb_vec_Q15, 2))))) // range: [ -32767 : 32767 ]
  9696  				sum_error = (((sum_error) + ((((int32(int16(diff_Q15))) * (int32(int16(diff_Q15)))) >> 16) * ((Wtmp_Q6) >> 16))) + (((((int32(int16(diff_Q15))) * (int32(int16(diff_Q15)))) & 0x0000FFFF) * ((Wtmp_Q6) >> 16)) >> 16))
  9697  			}
  9698  
  9699  			*(*int32)(unsafe.Pointer(err_Q20 + uintptr(i)*4)) = sum_error
  9700  		}
  9701  		err_Q20 += 4 * (uintptr(K))
  9702  		in_Q15 += 4 * (uintptr(LPC_order))
  9703  	}
  9704  }
  9705  
  9706  /*
  9707  R. Laroia, N. Phamdo and N. Farvardin, "Robust and Efficient Quantization of Speech LSP
  9708  Parameters Using Structured Vector Quantization", Proc. IEEE Int. Conf. Acoust., Speech,
  9709  Signal Processing, pp. 641-644, 1991.
  9710  */
  9711  
  9712  /* Laroia low complexity NLSF weights */
  9713  func SKP_Silk_NLSF_VQ_weights_laroia(tls *libc.TLS, pNLSFW_Q6 uintptr, pNLSF_Q15 uintptr, D int32) { /* SKP_Silk_NLSF_VQ_weights_laroia.c:40:6: */
  9714  	var k int32
  9715  	var tmp1_int int32
  9716  	var tmp2_int int32
  9717  
  9718  	/* Check that we are guaranteed to end up within the required range */
  9719  
  9720  	/* First value */
  9721  	tmp1_int = SKP_max_int(tls, *(*int32)(unsafe.Pointer(pNLSF_Q15)), 3)
  9722  	tmp1_int = ((int32(1) << (15 + 6)) / (tmp1_int))
  9723  	tmp2_int = SKP_max_int(tls, (*(*int32)(unsafe.Pointer(pNLSF_Q15 + 1*4)) - *(*int32)(unsafe.Pointer(pNLSF_Q15))), 3)
  9724  	tmp2_int = ((int32(1) << (15 + 6)) / (tmp2_int))
  9725  	*(*int32)(unsafe.Pointer(pNLSFW_Q6)) = SKP_min_int(tls, (tmp1_int + tmp2_int), 0x7FFF)
  9726  
  9727  	/* Main loop */
  9728  	for k = 1; k < (D - 1); k = k + (2) {
  9729  		tmp1_int = SKP_max_int(tls, (*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((k+1))*4)) - *(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(k)*4))), 3)
  9730  		tmp1_int = ((int32(1) << (15 + 6)) / (tmp1_int))
  9731  		*(*int32)(unsafe.Pointer(pNLSFW_Q6 + uintptr(k)*4)) = SKP_min_int(tls, (tmp1_int + tmp2_int), 0x7FFF)
  9732  
  9733  		tmp2_int = SKP_max_int(tls, (*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((k+2))*4)) - *(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((k+1))*4))), 3)
  9734  		tmp2_int = ((int32(1) << (15 + 6)) / (tmp2_int))
  9735  		*(*int32)(unsafe.Pointer(pNLSFW_Q6 + uintptr((k+1))*4)) = SKP_min_int(tls, (tmp1_int + tmp2_int), 0x7FFF)
  9736  
  9737  	}
  9738  
  9739  	/* Last value */
  9740  	tmp1_int = SKP_max_int(tls, ((int32(1) << 15) - *(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((D-1))*4))), 3)
  9741  	tmp1_int = ((int32(1) << (15 + 6)) / (tmp1_int))
  9742  	*(*int32)(unsafe.Pointer(pNLSFW_Q6 + uintptr((D-1))*4)) = SKP_min_int(tls, (tmp1_int + tmp2_int), 0x7FFF)
  9743  
  9744  }
  9745  
  9746  /***********************************************************************
  9747  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  9748  Redistribution and use in source and binary forms, with or without
  9749  modification, (subject to the limitations in the disclaimer below)
  9750  are permitted provided that the following conditions are met:
  9751  - Redistributions of source code must retain the above copyright notice,
  9752  this list of conditions and the following disclaimer.
  9753  - Redistributions in binary form must reproduce the above copyright
  9754  notice, this list of conditions and the following disclaimer in the
  9755  documentation and/or other materials provided with the distribution.
  9756  - Neither the name of Skype Limited, nor the names of specific
  9757  contributors, may be used to endorse or promote products derived from
  9758  this software without specific prior written permission.
  9759  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  9760  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  9761  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  9762  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  9763  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  9764  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  9765  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  9766  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  9767  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  9768  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  9769  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  9770  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  9771  ***********************************************************************/
  9772  
  9773  /*******************/
  9774  /* Pitch estimator */
  9775  /*******************/
  9776  
  9777  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  9778  
  9779  /* Bandwidth expansion for whitening filter in pitch analysis */
  9780  
  9781  /* Threshold used by pitch estimator for early escape */
  9782  
  9783  /*********************/
  9784  /* Linear prediction */
  9785  /*********************/
  9786  
  9787  /* LPC analysis defines: regularization and bandwidth expansion */
  9788  
  9789  /* LTP analysis defines */
  9790  
  9791  /* LTP quantization settings */
  9792  
  9793  /***********************/
  9794  /* High pass filtering */
  9795  /***********************/
  9796  
  9797  /* Smoothing parameters for low end of pitch frequency range estimation */
  9798  
  9799  /* Min and max values for low end of pitch frequency range estimation */
  9800  
  9801  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  9802  
  9803  /***********/
  9804  /* Various */
  9805  /***********/
  9806  
  9807  /* Required speech activity for counting frame as active */
  9808  
  9809  /* Speech Activity LBRR enable threshold (needs tuning) */
  9810  
  9811  /*************************/
  9812  /* Perceptual parameters */
  9813  /*************************/
  9814  
  9815  /* reduction in coding SNR during low speech activity */
  9816  
  9817  /* factor for reducing quantization noise during voiced speech */
  9818  
  9819  /* factor for reducing quantization noise for unvoiced sparse signals */
  9820  
  9821  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  9822  
  9823  /* warping control */
  9824  
  9825  /* fraction added to first autocorrelation value */
  9826  
  9827  /* noise shaping filter chirp factor */
  9828  
  9829  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  9830  
  9831  /* gain reduction for fricatives */
  9832  
  9833  /* extra harmonic boosting (signal shaping) at low bitrates */
  9834  
  9835  /* extra harmonic boosting (signal shaping) for noisy input signals */
  9836  
  9837  /* harmonic noise shaping */
  9838  
  9839  /* extra harmonic noise shaping for high bitrates or noisy input */
  9840  
  9841  /* parameter for shaping noise towards higher frequencies */
  9842  
  9843  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  9844  
  9845  /* parameter for applying a high-pass tilt to the input signal */
  9846  
  9847  /* parameter for extra high-pass tilt to the input signal at high rates */
  9848  
  9849  /* parameter for reducing noise at the very low frequencies */
  9850  
  9851  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  9852  
  9853  /* noise floor to put a lower limit on the quantization step size */
  9854  
  9855  /* noise floor relative to active speech gain level */
  9856  
  9857  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  9858  
  9859  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  9860  
  9861  /* parameters defining the R/D tradeoff in the residual quantizer */
  9862  
  9863  /* Compute gain to make warped filter coefficients have a zero mean log frequency response on a     */
  9864  /* non-warped frequency scale. (So that it can be implemented with a minimum-phase monic filter.)   */
  9865  func warped_gain(tls *libc.TLS, coefs_Q24 uintptr, lambda_Q16 int32, order int32) int32 { /* SKP_Silk_noise_shape_analysis_FIX.c:33:22: */
  9866  	var i int32
  9867  	var gain_Q24 int32
  9868  
  9869  	lambda_Q16 = -lambda_Q16
  9870  	gain_Q24 = *(*int32)(unsafe.Pointer(coefs_Q24 + uintptr((order-1))*4))
  9871  	for i = (order - 2); i >= 0; i-- {
  9872  		gain_Q24 = ((*(*int32)(unsafe.Pointer(coefs_Q24 + uintptr(i)*4))) + ((((gain_Q24) >> 16) * (int32(int16(lambda_Q16)))) + ((((gain_Q24) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9873  	}
  9874  	gain_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((gain_Q24) >> 16) * (int32(int16(-lambda_Q16)))) + ((((gain_Q24) & 0x0000FFFF) * (int32(int16(-lambda_Q16)))) >> 16)))
  9875  	return SKP_INVERSE32_varQ(tls, gain_Q24, 40)
  9876  }
  9877  
  9878  /* Convert warped filter coefficients to monic pseudo-warped coefficients and limit maximum     */
  9879  /* amplitude of monic warped coefficients by using bandwidth expansion on the true coefficients */
  9880  func limit_warped_coefs(tls *libc.TLS, coefs_syn_Q24 uintptr, coefs_ana_Q24 uintptr, lambda_Q16 int32, limit_Q24 int32, order int32) { /* SKP_Silk_noise_shape_analysis_FIX.c:52:17: */
  9881  	var i int32
  9882  	var iter int32
  9883  	var ind int32 = 0
  9884  	var tmp int32
  9885  	var maxabs_Q24 int32
  9886  	var chirp_Q16 int32
  9887  	var gain_syn_Q16 int32
  9888  	var gain_ana_Q16 int32
  9889  	var nom_Q16 int32
  9890  	var den_Q24 int32
  9891  
  9892  	/* Convert to monic coefficients */
  9893  	lambda_Q16 = -lambda_Q16
  9894  	for i = (order - 1); i > 0; i-- {
  9895  		*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9896  		*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9897  	}
  9898  	lambda_Q16 = -lambda_Q16
  9899  	nom_Q16 = ((SKP_FIX_CONST(tls, 1.0, 16)) + ((((-lambda_Q16) >> 16) * (int32(int16(lambda_Q16)))) + ((((-lambda_Q16) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9900  	den_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9901  	gain_syn_Q16 = SKP_DIV32_varQ(tls, nom_Q16, den_Q24, 24)
  9902  	den_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9903  	gain_ana_Q16 = SKP_DIV32_varQ(tls, nom_Q16, den_Q24, 24)
  9904  	for i = 0; i < order; i++ {
  9905  		*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)) = (((((gain_syn_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) + ((((gain_syn_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_syn_Q16) * (func() int32 {
  9906  			if (16) == 1 {
  9907  				return (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 1))
  9908  			}
  9909  			return ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9910  		}())))
  9911  		*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)) = (((((gain_ana_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) + ((((gain_ana_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_ana_Q16) * (func() int32 {
  9912  			if (16) == 1 {
  9913  				return (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 1))
  9914  			}
  9915  			return ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9916  		}())))
  9917  	}
  9918  
  9919  	for iter = 0; iter < 10; iter++ {
  9920  		/* Find maximum absolute value */
  9921  		maxabs_Q24 = -1
  9922  		for i = 0; i < order; i++ {
  9923  			tmp = func() int32 {
  9924  				if (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) ^ ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 31)) - ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 31)) > (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) ^ ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 31)) - ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 31)) {
  9925  					return (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) ^ ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 31)) - ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 31))
  9926  				}
  9927  				return (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) ^ ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 31)) - ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 31))
  9928  			}()
  9929  			if tmp > maxabs_Q24 {
  9930  				maxabs_Q24 = tmp
  9931  				ind = i
  9932  			}
  9933  		}
  9934  		if maxabs_Q24 <= limit_Q24 {
  9935  			/* Coefficients are within range - done */
  9936  			return
  9937  		}
  9938  
  9939  		/* Convert back to true warped coefficients */
  9940  		for i = 1; i < order; i++ {
  9941  			*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9942  			*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9943  		}
  9944  		gain_syn_Q16 = SKP_INVERSE32_varQ(tls, gain_syn_Q16, 32)
  9945  		gain_ana_Q16 = SKP_INVERSE32_varQ(tls, gain_ana_Q16, 32)
  9946  		for i = 0; i < order; i++ {
  9947  			*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)) = (((((gain_syn_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) + ((((gain_syn_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_syn_Q16) * (func() int32 {
  9948  				if (16) == 1 {
  9949  					return (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 1))
  9950  				}
  9951  				return ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9952  			}())))
  9953  			*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)) = (((((gain_ana_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) + ((((gain_ana_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_ana_Q16) * (func() int32 {
  9954  				if (16) == 1 {
  9955  					return (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 1))
  9956  				}
  9957  				return ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9958  			}())))
  9959  		}
  9960  
  9961  		/* Apply bandwidth expansion */
  9962  		chirp_Q16 = (SKP_FIX_CONST(tls, 0.99, 16) - SKP_DIV32_varQ(tls,
  9963  			((((maxabs_Q24-limit_Q24)>>16)*(int32((int16((SKP_FIX_CONST(tls, 0.8, 10)) + ((int32(int16(SKP_FIX_CONST(tls, 0.1, 10)))) * (int32(int16(iter)))))))))+((((maxabs_Q24-limit_Q24)&0x0000FFFF)*(int32((int16((SKP_FIX_CONST(tls, 0.8, 10)) + ((int32(int16(SKP_FIX_CONST(tls, 0.1, 10)))) * (int32(int16(iter)))))))))>>16)),
  9964  			((maxabs_Q24)*(ind+1)), 22))
  9965  		SKP_Silk_bwexpander_32(tls, coefs_syn_Q24, order, chirp_Q16)
  9966  		SKP_Silk_bwexpander_32(tls, coefs_ana_Q24, order, chirp_Q16)
  9967  
  9968  		/* Convert to monic warped coefficients */
  9969  		lambda_Q16 = -lambda_Q16
  9970  		for i = (order - 1); i > 0; i-- {
  9971  			*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9972  			*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9973  		}
  9974  		lambda_Q16 = -lambda_Q16
  9975  		nom_Q16 = ((SKP_FIX_CONST(tls, 1.0, 16)) + ((((-lambda_Q16) >> 16) * (int32(int16(lambda_Q16)))) + ((((-lambda_Q16) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9976  		den_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9977  		gain_syn_Q16 = SKP_DIV32_varQ(tls, nom_Q16, den_Q24, 24)
  9978  		den_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9979  		gain_ana_Q16 = SKP_DIV32_varQ(tls, nom_Q16, den_Q24, 24)
  9980  		for i = 0; i < order; i++ {
  9981  			*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)) = (((((gain_syn_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) + ((((gain_syn_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_syn_Q16) * (func() int32 {
  9982  				if (16) == 1 {
  9983  					return (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 1))
  9984  				}
  9985  				return ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9986  			}())))
  9987  			*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)) = (((((gain_ana_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) + ((((gain_ana_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_ana_Q16) * (func() int32 {
  9988  				if (16) == 1 {
  9989  					return (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 1))
  9990  				}
  9991  				return ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9992  			}())))
  9993  		}
  9994  	}
  9995  
  9996  }
  9997  
  9998  /**************************************************************/
  9999  /* Compute noise shaping coefficients and initial gain values */
 10000  /**************************************************************/
 10001  func SKP_Silk_noise_shape_analysis_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, pitch_res uintptr, x uintptr) { /* SKP_Silk_noise_shape_analysis_FIX.c:137:6: */
 10002  	bp := tls.Alloc(992)
 10003  	defer tls.Free(992)
 10004  
 10005  	var psShapeSt uintptr = (psEnc + 19576 /* &.sShape */)
 10006  	var k int32
 10007  	var i int32
 10008  	var nSamples int32
 10009  	var Qnrg int32
 10010  	var b_Q14 int32
 10011  	var warping_Q16 int32
 10012  	*(*int32)(unsafe.Pointer(bp + 4 /* scale */)) = 0
 10013  	var SNR_adj_dB_Q7 int32
 10014  	var HarmBoost_Q16 int32
 10015  	var HarmShapeGain_Q16 int32
 10016  	var Tilt_Q16 int32
 10017  	var tmp32 int32
 10018  	// var nrg int32 at bp, 4
 10019  
 10020  	// var pre_nrg_Q30 int32 at bp+988, 4
 10021  
 10022  	var log_energy_Q7 int32
 10023  	var log_energy_prev_Q7 int32
 10024  	var energy_variation_Q7 int32
 10025  	var delta_Q16 int32
 10026  	var BWExp1_Q16 int32
 10027  	var BWExp2_Q16 int32
 10028  	var gain_mult_Q16 int32
 10029  	var gain_add_Q16 int32
 10030  	var strength_Q16 int32
 10031  	var b_Q8 int32
 10032  	// var auto_corr [17]int32 at bp+728, 68
 10033  
 10034  	// var refl_coef_Q16 [16]int32 at bp+796, 64
 10035  
 10036  	// var AR1_Q24 [16]int32 at bp+924, 64
 10037  
 10038  	// var AR2_Q24 [16]int32 at bp+860, 64
 10039  
 10040  	// var x_windowed [360]int16 at bp+8, 720
 10041  
 10042  	var x_ptr uintptr
 10043  	var pitch_res_ptr uintptr
 10044  
 10045  	/* Point to start of first LPC analysis block */
 10046  	x_ptr = (x - uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_shape)*2)
 10047  
 10048  	/****************/
 10049  	/* CONTROL SNR  */
 10050  	/****************/
 10051  	/* Reduce SNR_dB values if recent bitstream has exceeded TargetRate */
 10052  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7 = ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FSNR_dB_Q7 - ((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) << (7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.05, 16))))) + ((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) << (7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.05, 16))))) >> 16)))
 10053  
 10054  	/* Reduce SNR_dB if inband FEC used */
 10055  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 > SKP_FIX_CONST(tls, 0.5, 8) {
 10056  		*(*int32)(unsafe.Pointer(psEncCtrl + 604 /* &.current_SNR_dB_Q7 */)) -= (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FinBandFEC_SNR_comp_Q8) >> (1))
 10057  	}
 10058  
 10059  	/****************/
 10060  	/* GAIN CONTROL */
 10061  	/****************/
 10062  	/* Input quality is the average of the quality in the lowest two VAD bands */
 10063  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14 = ((*(*int32)(unsafe.Pointer((psEncCtrl + 620 /* &.input_quality_bands_Q15 */))) + *(*int32)(unsafe.Pointer((psEncCtrl + 620 /* &.input_quality_bands_Q15 */) + 1*4))) >> (2))
 10064  
 10065  	/* Coding quality level, between 0.0_Q0 and 1.0_Q0, but in Q14 */
 10066  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14 = ((SKP_Silk_sigm_Q15(tls, func() int32 {
 10067  		if (4) == 1 {
 10068  			return ((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7 - SKP_FIX_CONST(tls, 18.0, 7)) >> 1) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7 - SKP_FIX_CONST(tls, 18.0, 7)) & 1))
 10069  		}
 10070  		return (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7 - SKP_FIX_CONST(tls, 18.0, 7)) >> ((4) - 1)) + 1) >> 1)
 10071  	}())) >> (1))
 10072  
 10073  	/* Reduce coding SNR during low speech activity */
 10074  	b_Q8 = (SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)
 10075  	b_Q8 = (((((b_Q8) << (8)) >> 16) * (int32(int16(b_Q8)))) + (((((b_Q8) << (8)) & 0x0000FFFF) * (int32(int16(b_Q8)))) >> 16))
 10076  	SNR_adj_dB_Q7 = (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7) + (((((int32((int16(SKP_FIX_CONST(tls, float64(-4.0), 7) >> (4 + 1))))) * (int32(int16(b_Q8)))) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 1.0, 14) + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) + ((((SKP_FIX_CONST(tls, 1.0, 14) + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) >> 16)))))) + (((((int32((int16(SKP_FIX_CONST(tls, float64(-4.0), 7) >> (4 + 1))))) * (int32(int16(b_Q8)))) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 1.0, 14) + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) + ((((SKP_FIX_CONST(tls, 1.0, 14) + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) >> 16)))))) >> 16))) // Q12
 10077  
 10078  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 10079  		/* Reduce gains for periodic signals */
 10080  		SNR_adj_dB_Q7 = ((SNR_adj_dB_Q7) + ((((SKP_FIX_CONST(tls, 2.0, 8)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) + ((((SKP_FIX_CONST(tls, 2.0, 8)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) >> 16)))
 10081  	} else {
 10082  		/* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */
 10083  		SNR_adj_dB_Q7 = ((SNR_adj_dB_Q7) + (((((SKP_FIX_CONST(tls, 6.0, 9)) + ((((-SKP_FIX_CONST(tls, 0.4, 18)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7)))) + ((((-SKP_FIX_CONST(tls, 0.4, 18)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7)))) >> 16))) >> 16) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 14) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14))))) + (((((SKP_FIX_CONST(tls, 6.0, 9)) + ((((-SKP_FIX_CONST(tls, 0.4, 18)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7)))) + ((((-SKP_FIX_CONST(tls, 0.4, 18)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7)))) >> 16))) & 0x0000FFFF) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 14) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14))))) >> 16)))
 10084  	}
 10085  
 10086  	/*************************/
 10087  	/* SPARSENESS PROCESSING */
 10088  	/*************************/
 10089  	/* Set quantizer offset */
 10090  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 10091  		/* Initally set to 0; may be overruled in process_gains(..) */
 10092  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 0
 10093  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 = 0
 10094  	} else {
 10095  		/* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */
 10096  		nSamples = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz) << (1))
 10097  		energy_variation_Q7 = 0
 10098  		log_energy_prev_Q7 = 0
 10099  		pitch_res_ptr = pitch_res
 10100  		for k = 0; k < (20 / 2); k++ {
 10101  			SKP_Silk_sum_sqr_shift(tls, bp /* &nrg */, bp+4 /* &scale */, pitch_res_ptr, nSamples)
 10102  			*(*int32)(unsafe.Pointer(bp /* nrg */)) += ((nSamples) >> (*(*int32)(unsafe.Pointer(bp + 4 /* scale */)))) // Q(-scale)
 10103  
 10104  			log_energy_Q7 = SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer(bp /* nrg */)))
 10105  			if k > 0 {
 10106  				energy_variation_Q7 = energy_variation_Q7 + (func() int32 {
 10107  					if (log_energy_Q7 - log_energy_prev_Q7) > 0 {
 10108  						return (log_energy_Q7 - log_energy_prev_Q7)
 10109  					}
 10110  					return -(log_energy_Q7 - log_energy_prev_Q7)
 10111  				}())
 10112  			}
 10113  			log_energy_prev_Q7 = log_energy_Q7
 10114  			pitch_res_ptr += 2 * (uintptr(nSamples))
 10115  		}
 10116  
 10117  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 = ((SKP_Silk_sigm_Q15(tls, ((((energy_variation_Q7 - SKP_FIX_CONST(tls, 5.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + ((((energy_variation_Q7 - SKP_FIX_CONST(tls, 5.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16)))) >> (7))
 10118  
 10119  		/* Set quantization offset depending on sparseness measure */
 10120  		if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 > SKP_FIX_CONST(tls, 0.75, 8) {
 10121  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 0
 10122  		} else {
 10123  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 1
 10124  		}
 10125  
 10126  		/* Increase coding SNR for sparse signals */
 10127  		SNR_adj_dB_Q7 = ((SNR_adj_dB_Q7) + ((((SKP_FIX_CONST(tls, 2.0, 15)) >> 16) * (int32((int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 - SKP_FIX_CONST(tls, 0.5, 8)))))) + ((((SKP_FIX_CONST(tls, 2.0, 15)) & 0x0000FFFF) * (int32((int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 - SKP_FIX_CONST(tls, 0.5, 8)))))) >> 16)))
 10128  	}
 10129  
 10130  	/*******************************/
 10131  	/* Control bandwidth expansion */
 10132  	/*******************************/
 10133  	/* More BWE for signals with high prediction gain */
 10134  	strength_Q16 = (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FpredGain_Q16) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 16))))) + (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FpredGain_Q16) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 16))))) >> 16))
 10135  	BWExp1_Q16 = libc.AssignInt32(&BWExp2_Q16, SKP_DIV32_varQ(tls, SKP_FIX_CONST(tls, 0.95, 16),
 10136  		(((SKP_FIX_CONST(tls, 1.0, 16))+((((strength_Q16)>>16)*(int32(int16(strength_Q16))))+((((strength_Q16)&0x0000FFFF)*(int32(int16(strength_Q16))))>>16)))+((strength_Q16)*(func() int32 {
 10137  			if (16) == 1 {
 10138  				return (((strength_Q16) >> 1) + ((strength_Q16) & 1))
 10139  			}
 10140  			return ((((strength_Q16) >> ((16) - 1)) + 1) >> 1)
 10141  		}()))), 16))
 10142  	delta_Q16 = ((((SKP_FIX_CONST(tls, 1.0, 16) - ((int32(int16(3))) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14))))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.01, 16))))) + ((((SKP_FIX_CONST(tls, 1.0, 16) - ((int32(int16(3))) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14))))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.01, 16))))) >> 16))
 10143  	BWExp1_Q16 = ((BWExp1_Q16) - (delta_Q16))
 10144  	BWExp2_Q16 = ((BWExp2_Q16) + (delta_Q16))
 10145  	/* BWExp1 will be applied after BWExp2, so make it relative */
 10146  	BWExp1_Q16 = (((BWExp1_Q16) << (14)) / ((BWExp2_Q16) >> (2)))
 10147  
 10148  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0 {
 10149  		/* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */
 10150  		warping_Q16 = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16) + (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.01, 18))))) + (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.01, 18))))) >> 16)))
 10151  	} else {
 10152  		warping_Q16 = 0
 10153  	}
 10154  
 10155  	/********************************************/
 10156  	/* Compute noise shaping AR coefs and gains */
 10157  	/********************************************/
 10158  	for k = 0; k < 4; k++ {
 10159  		/* Apply window: sine slope followed by flat part followed by cosine slope */
 10160  		var shift int32
 10161  		var slope_part int32
 10162  		var flat_part int32
 10163  		flat_part = ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz * 5)
 10164  		slope_part = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapeWinLength - flat_part) >> (1))
 10165  
 10166  		SKP_Silk_apply_sine_window(tls, bp+8 /* &x_windowed[0] */, x_ptr, 1, slope_part)
 10167  		shift = slope_part
 10168  		libc.Xmemcpy(tls, (bp + 8 /* &x_windowed[0] */ + uintptr(shift)*2), (x_ptr + uintptr(shift)*2), (uint64(flat_part) * uint64(unsafe.Sizeof(int16(0)))))
 10169  		shift = shift + (flat_part)
 10170  		SKP_Silk_apply_sine_window(tls, (bp + 8 /* &x_windowed[0] */ + uintptr(shift)*2), (x_ptr + uintptr(shift)*2), 2, slope_part)
 10171  
 10172  		/* Update pointer: next LPC analysis block */
 10173  		x_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
 10174  
 10175  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0 {
 10176  			/* Calculate warped auto correlation */
 10177  			SKP_Silk_warped_autocorrelation_FIX(tls, bp+728 /* &auto_corr[0] */, bp+4 /* &scale */, bp+8 /* &x_windowed[0] */, int16(warping_Q16), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapeWinLength, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10178  		} else {
 10179  			/* Calculate regular auto correlation */
 10180  			SKP_Silk_autocorr(tls, bp+728 /* &auto_corr[0] */, bp+4 /* &scale */, bp+8 /* &x_windowed[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapeWinLength, ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder + 1))
 10181  		}
 10182  
 10183  		/* Add white noise, as a fraction of energy */
 10184  		*(*int32)(unsafe.Pointer(bp + 728 /* &auto_corr[0] */)) = ((*(*int32)(unsafe.Pointer(bp + 728 /* &auto_corr[0] */))) + (SKP_max_32(tls, (((((*(*int32)(unsafe.Pointer(bp + 728 /* &auto_corr[0] */))) >> (4)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 1e-5, 20))))) + (((((*(*int32)(unsafe.Pointer(bp + 728 /* &auto_corr[0] */))) >> (4)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 1e-5, 20))))) >> 16)), 1)))
 10185  
 10186  		/* Calculate the reflection coefficients using schur */
 10187  		*(*int32)(unsafe.Pointer(bp /* nrg */)) = SKP_Silk_schur64(tls, bp+796 /* &refl_coef_Q16[0] */, bp+728 /* &auto_corr[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10188  
 10189  		/* Convert reflection coefficients to prediction coefficients */
 10190  		SKP_Silk_k2a_Q16(tls, bp+860 /* &AR2_Q24[0] */, bp+796 /* &refl_coef_Q16[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10191  
 10192  		Qnrg = -*(*int32)(unsafe.Pointer(bp + 4 /* scale */)) // range: -12...30
 10193  
 10194  		/* Make sure that Qnrg is an even number */
 10195  		if (Qnrg & 1) != 0 {
 10196  			Qnrg = Qnrg - (1)
 10197  			*(*int32)(unsafe.Pointer(bp /* nrg */)) >>= 1
 10198  		}
 10199  
 10200  		tmp32 = SKP_Silk_SQRT_APPROX(tls, *(*int32)(unsafe.Pointer(bp /* nrg */)))
 10201  		Qnrg >>= 1 // range: -6...15
 10202  
 10203  		*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = ((func() int32 {
 10204  			if (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg)) > (int32((0x7FFFFFFF)) >> (16 - Qnrg)) {
 10205  				return func() int32 {
 10206  					if (tmp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg)) {
 10207  						return (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg))
 10208  					}
 10209  					return func() int32 {
 10210  						if (tmp32) < (int32((0x7FFFFFFF)) >> (16 - Qnrg)) {
 10211  							return (int32((0x7FFFFFFF)) >> (16 - Qnrg))
 10212  						}
 10213  						return tmp32
 10214  					}()
 10215  				}()
 10216  			}
 10217  			return func() int32 {
 10218  				if (tmp32) > (int32((0x7FFFFFFF)) >> (16 - Qnrg)) {
 10219  					return (int32((0x7FFFFFFF)) >> (16 - Qnrg))
 10220  				}
 10221  				return func() int32 {
 10222  					if (tmp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg)) {
 10223  						return (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg))
 10224  					}
 10225  					return tmp32
 10226  				}()
 10227  			}()
 10228  		}()) << (16 - Qnrg))
 10229  
 10230  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0 {
 10231  			/* Adjust gain for warping */
 10232  			gain_mult_Q16 = warped_gain(tls, bp+860 /* &AR2_Q24[0] */, warping_Q16, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10233  
 10234  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = (((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) >> 16) * (int32(int16(gain_mult_Q16)))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(gain_mult_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) * (func() int32 {
 10235  				if (16) == 1 {
 10236  					return (((gain_mult_Q16) >> 1) + ((gain_mult_Q16) & 1))
 10237  				}
 10238  				return ((((gain_mult_Q16) >> ((16) - 1)) + 1) >> 1)
 10239  			}())))
 10240  			if *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) < 0 {
 10241  				*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = 0x7FFFFFFF
 10242  			}
 10243  		}
 10244  
 10245  		/* Bandwidth expansion for synthesis filter shaping */
 10246  		SKP_Silk_bwexpander_32(tls, bp+860 /* &AR2_Q24[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder, BWExp2_Q16)
 10247  
 10248  		/* Compute noise shaping filter coefficients */
 10249  		libc.Xmemcpy(tls, bp+924 /* &AR1_Q24[0] */, bp+860 /* &AR2_Q24[0] */, (uint64((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder) * uint64(unsafe.Sizeof(int32(0)))))
 10250  
 10251  		/* Bandwidth expansion for analysis filter shaping */
 10252  
 10253  		SKP_Silk_bwexpander_32(tls, bp+924 /* &AR1_Q24[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder, BWExp1_Q16)
 10254  
 10255  		/* Ratio of prediction gains, in energy domain */
 10256  		SKP_Silk_LPC_inverse_pred_gain_Q24(tls, bp+988 /* &pre_nrg_Q30 */, bp+860 /* &AR2_Q24[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10257  		SKP_Silk_LPC_inverse_pred_gain_Q24(tls, bp /* &nrg */, bp+924 /* &AR1_Q24[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10258  
 10259  		//psEncCtrl->GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ) = 0.3f + 0.7f * pre_nrg / nrg;
 10260  		*(*int32)(unsafe.Pointer(bp + 988 /* pre_nrg_Q30 */)) = (((((*(*int32)(unsafe.Pointer(bp + 988 /* pre_nrg_Q30 */))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.7, 15))))) + ((((*(*int32)(unsafe.Pointer(bp + 988 /* pre_nrg_Q30 */))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.7, 15))))) >> 16)) << (1))
 10261  		*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)) = (SKP_FIX_CONST(tls, 0.3, 14) + SKP_DIV32_varQ(tls, *(*int32)(unsafe.Pointer(bp + 988 /* pre_nrg_Q30 */)), *(*int32)(unsafe.Pointer(bp /* nrg */)), 14))
 10262  
 10263  		/* Convert to monic warped prediction coefficients and limit absolute values */
 10264  		limit_warped_coefs(tls, bp+860 /* &AR2_Q24[0] */, bp+924 /* &AR1_Q24[0] */, warping_Q16, SKP_FIX_CONST(tls, 3.999, 24), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10265  
 10266  		/* Convert from Q24 to Q13 and store in int16 */
 10267  		for i = 0; i < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder; i++ {
 10268  			*(*int16)(unsafe.Pointer((psEncCtrl + 252 /* &.AR1_Q13 */) + uintptr(((k*16)+i))*2)) = func() int16 {
 10269  				if (func() int32 {
 10270  					if (11) == 1 {
 10271  						return (((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) & 1))
 10272  					}
 10273  					return ((((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1)
 10274  				}()) > 0x7FFF {
 10275  					return int16(0x7FFF)
 10276  				}
 10277  				return func() int16 {
 10278  					if (func() int32 {
 10279  						if (11) == 1 {
 10280  							return (((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) & 1))
 10281  						}
 10282  						return ((((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1)
 10283  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 10284  						return libc.Int16FromInt32(0x8000)
 10285  					}
 10286  					return func() int16 {
 10287  						if (11) == 1 {
 10288  							return (int16(((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) & 1)))
 10289  						}
 10290  						return (int16((((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1))
 10291  					}()
 10292  				}()
 10293  			}()
 10294  			*(*int16)(unsafe.Pointer((psEncCtrl + 380 /* &.AR2_Q13 */) + uintptr(((k*16)+i))*2)) = func() int16 {
 10295  				if (func() int32 {
 10296  					if (11) == 1 {
 10297  						return (((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) & 1))
 10298  					}
 10299  					return ((((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1)
 10300  				}()) > 0x7FFF {
 10301  					return int16(0x7FFF)
 10302  				}
 10303  				return func() int16 {
 10304  					if (func() int32 {
 10305  						if (11) == 1 {
 10306  							return (((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) & 1))
 10307  						}
 10308  						return ((((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1)
 10309  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 10310  						return libc.Int16FromInt32(0x8000)
 10311  					}
 10312  					return func() int16 {
 10313  						if (11) == 1 {
 10314  							return (int16(((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) & 1)))
 10315  						}
 10316  						return (int16((((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1))
 10317  					}()
 10318  				}()
 10319  			}()
 10320  		}
 10321  	}
 10322  
 10323  	/*****************/
 10324  	/* Gain tweaking */
 10325  	/*****************/
 10326  	/* Increase gains during low speech activity and put lower limit on gains */
 10327  	gain_mult_Q16 = SKP_Silk_log2lin(tls, -((-SKP_FIX_CONST(tls, 16.0, 7)) + ((((SNR_adj_dB_Q7) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) + ((((SNR_adj_dB_Q7) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) >> 16))))
 10328  	gain_add_Q16 = SKP_Silk_log2lin(tls, ((SKP_FIX_CONST(tls, 16.0, 7)) + ((((SKP_FIX_CONST(tls, 4.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) + ((((SKP_FIX_CONST(tls, 4.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) >> 16))))
 10329  	tmp32 = SKP_Silk_log2lin(tls, ((SKP_FIX_CONST(tls, 16.0, 7)) + ((((SKP_FIX_CONST(tls, float64(-50.0), 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) + ((((SKP_FIX_CONST(tls, float64(-50.0), 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) >> 16))))
 10330  	tmp32 = ((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(int16(tmp32)))) + (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(int16(tmp32)))) >> 16)) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) * (func() int32 {
 10331  		if (16) == 1 {
 10332  			return (((tmp32) >> 1) + ((tmp32) & 1))
 10333  		}
 10334  		return ((((tmp32) >> ((16) - 1)) + 1) >> 1)
 10335  	}())))
 10336  	gain_add_Q16 = func() int32 {
 10337  		if ((uint32((gain_add_Q16) + (tmp32))) & 0x80000000) == uint32(0) {
 10338  			return func() int32 {
 10339  				if ((uint32((gain_add_Q16) & (tmp32))) & 0x80000000) != uint32(0) {
 10340  					return libc.Int32FromUint32(0x80000000)
 10341  				}
 10342  				return ((gain_add_Q16) + (tmp32))
 10343  			}()
 10344  		}
 10345  		return func() int32 {
 10346  			if ((uint32((gain_add_Q16) | (tmp32))) & 0x80000000) == uint32(0) {
 10347  				return 0x7FFFFFFF
 10348  			}
 10349  			return ((gain_add_Q16) + (tmp32))
 10350  		}()
 10351  	}()
 10352  
 10353  	for k = 0; k < 4; k++ {
 10354  		*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = (((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) >> 16) * (int32(int16(gain_mult_Q16)))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(gain_mult_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) * (func() int32 {
 10355  			if (16) == 1 {
 10356  				return (((gain_mult_Q16) >> 1) + ((gain_mult_Q16) & 1))
 10357  			}
 10358  			return ((((gain_mult_Q16) >> ((16) - 1)) + 1) >> 1)
 10359  		}())))
 10360  		if *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) < 0 {
 10361  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = 0x7FFFFFFF
 10362  		}
 10363  	}
 10364  
 10365  	for k = 0; k < 4; k++ {
 10366  		*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = func() int32 {
 10367  			if ((uint32((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) + (gain_add_Q16))) & 0x80000000) != 0 {
 10368  				return 0x7FFFFFFF
 10369  			}
 10370  			return ((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) + (gain_add_Q16))
 10371  		}()
 10372  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16 = func() int32 {
 10373  			if ((uint32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10374  				if (2) == 1 {
 10375  					return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10376  				}
 10377  				return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10378  			}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10379  				if (2) == 1 {
 10380  					return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10381  				}
 10382  				return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10383  			}()))) >> 16)))) & 0x80000000) == uint32(0) {
 10384  				return func() int32 {
 10385  					if ((uint32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10386  						if (2) == 1 {
 10387  							return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10388  						}
 10389  						return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10390  					}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10391  						if (2) == 1 {
 10392  							return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10393  						}
 10394  						return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10395  					}()))) >> 16)))) & 0x80000000) != uint32(0) {
 10396  						return libc.Int32FromUint32(0x80000000)
 10397  					}
 10398  					return (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10399  						if (2) == 1 {
 10400  							return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10401  						}
 10402  						return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10403  					}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10404  						if (2) == 1 {
 10405  							return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10406  						}
 10407  						return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10408  					}()))) >> 16)))
 10409  				}()
 10410  			}
 10411  			return func() int32 {
 10412  				if ((uint32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) | ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10413  					if (2) == 1 {
 10414  						return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10415  					}
 10416  					return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10417  				}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10418  					if (2) == 1 {
 10419  						return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10420  					}
 10421  					return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10422  				}()))) >> 16)))) & 0x80000000) == uint32(0) {
 10423  					return 0x7FFFFFFF
 10424  				}
 10425  				return (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10426  					if (2) == 1 {
 10427  						return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10428  					}
 10429  					return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10430  				}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10431  					if (2) == 1 {
 10432  						return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10433  					}
 10434  					return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10435  				}()))) >> 16)))
 10436  			}()
 10437  		}()
 10438  	}
 10439  
 10440  	/************************************************/
 10441  	/* Decrease level during fricatives (de-essing) */
 10442  	/************************************************/
 10443  	gain_mult_Q16 = (SKP_FIX_CONST(tls, 1.0, 16) + (func() int32 {
 10444  		if (10) == 1 {
 10445  			return ((((SKP_FIX_CONST(tls, 0.05, 26)) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) * (SKP_FIX_CONST(tls, 0.1, 12)))) >> 1) + (((SKP_FIX_CONST(tls, 0.05, 26)) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) * (SKP_FIX_CONST(tls, 0.1, 12)))) & 1))
 10446  		}
 10447  		return (((((SKP_FIX_CONST(tls, 0.05, 26)) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) * (SKP_FIX_CONST(tls, 0.1, 12)))) >> ((10) - 1)) + 1) >> 1)
 10448  	}()))
 10449  
 10450  	if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15 <= 0) && ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 1) {
 10451  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 24 {
 10452  			var essStrength_Q15 int32 = (((((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) >> 16) * (int32((int16((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))))))) + ((((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) & 0x0000FFFF) * (int32((int16((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))))))) >> 16)) + ((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) * (func() int32 {
 10453  				if (16) == 1 {
 10454  					return ((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) & 1))
 10455  				}
 10456  				return (((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> ((16) - 1)) + 1) >> 1)
 10457  			}())))
 10458  			tmp32 = SKP_Silk_log2lin(tls, (SKP_FIX_CONST(tls, 16.0, 7) - ((((essStrength_Q15) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 2.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) + ((((SKP_FIX_CONST(tls, 2.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) >> 16)))))) + ((((essStrength_Q15) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 2.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) + ((((SKP_FIX_CONST(tls, 2.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) >> 16)))))) >> 16))))
 10459  			gain_mult_Q16 = (((((gain_mult_Q16) >> 16) * (int32(int16(tmp32)))) + ((((gain_mult_Q16) & 0x0000FFFF) * (int32(int16(tmp32)))) >> 16)) + ((gain_mult_Q16) * (func() int32 {
 10460  				if (16) == 1 {
 10461  					return (((tmp32) >> 1) + ((tmp32) & 1))
 10462  				}
 10463  				return ((((tmp32) >> ((16) - 1)) + 1) >> 1)
 10464  			}())))
 10465  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
 10466  			var essStrength_Q15 int32 = (((((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) >> 16) * (int32((int16((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))))))) + ((((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) & 0x0000FFFF) * (int32((int16((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))))))) >> 16)) + ((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) * (func() int32 {
 10467  				if (16) == 1 {
 10468  					return ((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) & 1))
 10469  				}
 10470  				return (((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> ((16) - 1)) + 1) >> 1)
 10471  			}())))
 10472  			tmp32 = SKP_Silk_log2lin(tls, (SKP_FIX_CONST(tls, 16.0, 7) - ((((essStrength_Q15) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 1.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) + ((((SKP_FIX_CONST(tls, 1.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) >> 16)))))) + ((((essStrength_Q15) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 1.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) + ((((SKP_FIX_CONST(tls, 1.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) >> 16)))))) >> 16))))
 10473  			gain_mult_Q16 = (((((gain_mult_Q16) >> 16) * (int32(int16(tmp32)))) + ((((gain_mult_Q16) & 0x0000FFFF) * (int32(int16(tmp32)))) >> 16)) + ((gain_mult_Q16) * (func() int32 {
 10474  				if (16) == 1 {
 10475  					return (((tmp32) >> 1) + ((tmp32) & 1))
 10476  				}
 10477  				return ((((tmp32) >> ((16) - 1)) + 1) >> 1)
 10478  			}())))
 10479  		} else {
 10480  
 10481  		}
 10482  	}
 10483  
 10484  	for k = 0; k < 4; k++ {
 10485  		*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)) = ((((gain_mult_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)))))) + ((((gain_mult_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)))))) >> 16))
 10486  	}
 10487  
 10488  	/************************************************/
 10489  	/* Control low-frequency shaping and noise tilt */
 10490  	/************************************************/
 10491  	/* Less low frequency shaping for noisy inputs */
 10492  	strength_Q16 = ((SKP_FIX_CONST(tls, 3.0, 0)) * (SKP_FIX_CONST(tls, 1.0, 16) + ((int32(int16(SKP_FIX_CONST(tls, 0.5, 1)))) * (int32((int16(*(*int32)(unsafe.Pointer((psEncCtrl + 620 /* &.input_quality_bands_Q15 */))) - SKP_FIX_CONST(tls, 1.0, 15))))))))
 10493  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 10494  		/* Reduce low frequencies quantization noise for periodic signals, depending on pitch lag */
 10495  		/*f = 400; freqz([1, -0.98 + 2e-4 * f], [1, -0.97 + 7e-4 * f], 2^12, Fs); axis([0, 1000, -10, 1])*/
 10496  		var fs_kHz_inv int32 = ((SKP_FIX_CONST(tls, 0.2, 14)) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))
 10497  		for k = 0; k < 4; k++ {
 10498  			b_Q14 = (fs_kHz_inv + ((SKP_FIX_CONST(tls, 3.0, 14)) / (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 108 /* &.pitchL */) + uintptr(k)*4)))))
 10499  			/* Pack two coefficients in one int32 */
 10500  			*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */) + uintptr(k)*4)) = (((SKP_FIX_CONST(tls, 1.0, 14) - b_Q14) - ((((strength_Q16) >> 16) * (int32(int16(b_Q14)))) + ((((strength_Q16) & 0x0000FFFF) * (int32(int16(b_Q14)))) >> 16))) << (16))
 10501  			*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */) + uintptr(k)*4)) |= (int32((uint16(b_Q14 - SKP_FIX_CONST(tls, 1.0, 14)))))
 10502  		}
 10503  		// Guarantees that second argument to SMULWB() is within range of an SKP_int16
 10504  		Tilt_Q16 = (-SKP_FIX_CONST(tls, 0.3, 16) - ((((SKP_FIX_CONST(tls, 1.0, 16) - SKP_FIX_CONST(tls, 0.3, 16)) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 0.35, 24)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((SKP_FIX_CONST(tls, 0.35, 24)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))))) + ((((SKP_FIX_CONST(tls, 1.0, 16) - SKP_FIX_CONST(tls, 0.3, 16)) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 0.35, 24)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((SKP_FIX_CONST(tls, 0.35, 24)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))))) >> 16)))
 10505  	} else {
 10506  		b_Q14 = ((21299) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz)) // 1.3_Q0 = 21299_Q14
 10507  		/* Pack two coefficients in one int32 */
 10508  		*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */))) = (((SKP_FIX_CONST(tls, 1.0, 14) - b_Q14) - ((((strength_Q16) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 0.6, 16)) >> 16) * (int32(int16(b_Q14)))) + ((((SKP_FIX_CONST(tls, 0.6, 16)) & 0x0000FFFF) * (int32(int16(b_Q14)))) >> 16)))))) + ((((strength_Q16) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 0.6, 16)) >> 16) * (int32(int16(b_Q14)))) + ((((SKP_FIX_CONST(tls, 0.6, 16)) & 0x0000FFFF) * (int32(int16(b_Q14)))) >> 16)))))) >> 16))) << (16))
 10509  		*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */))) |= (int32((uint16(b_Q14 - SKP_FIX_CONST(tls, 1.0, 14)))))
 10510  		for k = 1; k < 4; k++ {
 10511  			*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */) + uintptr(k)*4)) = *(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */)))
 10512  		}
 10513  		Tilt_Q16 = -SKP_FIX_CONST(tls, 0.3, 16)
 10514  	}
 10515  
 10516  	/****************************/
 10517  	/* HARMONIC SHAPING CONTROL */
 10518  	/****************************/
 10519  	/* Control boosting of harmonic frequencies */
 10520  	HarmBoost_Q16 = (((((((SKP_FIX_CONST(tls, 1.0, 17) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (3))) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) + ((((SKP_FIX_CONST(tls, 1.0, 17) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (3))) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) >> 16)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + (((((((SKP_FIX_CONST(tls, 1.0, 17) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (3))) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) + ((((SKP_FIX_CONST(tls, 1.0, 17) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (3))) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) >> 16)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16))
 10521  
 10522  	/* More harmonic boost for noisy input signals */
 10523  	HarmBoost_Q16 = ((HarmBoost_Q16) + ((((SKP_FIX_CONST(tls, 1.0, 16) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) << (2))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + ((((SKP_FIX_CONST(tls, 1.0, 16) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) << (2))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16)))
 10524  
 10525  	if (1 != 0) && ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0) {
 10526  		/* More harmonic noise shaping for high bitrates or noisy input */
 10527  		HarmShapeGain_Q16 = ((SKP_FIX_CONST(tls, 0.3, 16)) + ((((SKP_FIX_CONST(tls, 1.0, 16) - ((((SKP_FIX_CONST(tls, 1.0, 18) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (4))) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) + ((((SKP_FIX_CONST(tls, 1.0, 18) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (4))) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) >> 16))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.2, 16))))) + ((((SKP_FIX_CONST(tls, 1.0, 16) - ((((SKP_FIX_CONST(tls, 1.0, 18) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (4))) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) + ((((SKP_FIX_CONST(tls, 1.0, 18) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (4))) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) >> 16))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.2, 16))))) >> 16)))
 10528  
 10529  		/* Less harmonic noise shaping for less periodic signals */
 10530  		HarmShapeGain_Q16 = (((((HarmShapeGain_Q16) << (1)) >> 16) * (int32(int16(SKP_Silk_SQRT_APPROX(tls, (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15) << (15))))))) + (((((HarmShapeGain_Q16) << (1)) & 0x0000FFFF) * (int32(int16(SKP_Silk_SQRT_APPROX(tls, (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15) << (15))))))) >> 16))
 10531  	} else {
 10532  		HarmShapeGain_Q16 = 0
 10533  	}
 10534  
 10535  	/*************************/
 10536  	/* Smooth over subframes */
 10537  	/*************************/
 10538  	for k = 0; k < 4; k++ {
 10539  		(*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16 = (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) + ((((HarmBoost_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) + ((((HarmBoost_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) >> 16)))
 10540  		(*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16 = (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) + ((((HarmShapeGain_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) + ((((HarmShapeGain_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) >> 16)))
 10541  		(*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16 = (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) + ((((Tilt_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) + ((((Tilt_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) >> 16)))
 10542  
 10543  		*(*int32)(unsafe.Pointer((psEncCtrl + 540 /* &.HarmBoost_Q14 */) + uintptr(k)*4)) = func() int32 {
 10544  			if (2) == 1 {
 10545  				return ((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) >> 1) + (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) & 1))
 10546  			}
 10547  			return (((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) >> ((2) - 1)) + 1) >> 1)
 10548  		}()
 10549  		*(*int32)(unsafe.Pointer((psEncCtrl + 572 /* &.HarmShapeGain_Q14 */) + uintptr(k)*4)) = func() int32 {
 10550  			if (2) == 1 {
 10551  				return ((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) >> 1) + (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) & 1))
 10552  			}
 10553  			return (((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) >> ((2) - 1)) + 1) >> 1)
 10554  		}()
 10555  		*(*int32)(unsafe.Pointer((psEncCtrl + 556 /* &.Tilt_Q14 */) + uintptr(k)*4)) = func() int32 {
 10556  			if (2) == 1 {
 10557  				return ((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) >> 1) + (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) & 1))
 10558  			}
 10559  			return (((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) >> ((2) - 1)) + 1) >> 1)
 10560  		}()
 10561  	}
 10562  }
 10563  
 10564  func SKP_Silk_NSQ(tls *libc.TLS, psEncC uintptr, psEncCtrlC uintptr, NSQ uintptr, x uintptr, q uintptr, LSFInterpFactor_Q2 int32, PredCoef_Q12 uintptr, LTPCoef_Q14 uintptr, AR2_Q13 uintptr, HarmShapeGain_Q14 uintptr, Tilt_Q14 uintptr, LF_shp_Q14 uintptr, Gains_Q16 uintptr, Lambda_Q10 int32, LTP_scale_Q14 int32) { /* SKP_Silk_NSQ.c:65:6: */
 10565  	bp := tls.Alloc(6304)
 10566  	defer tls.Free(6304)
 10567  
 10568  	var k int32
 10569  	var lag int32
 10570  	var start_idx int32
 10571  	var LSF_interpolation_flag int32
 10572  	var A_Q12 uintptr
 10573  	var B_Q14 uintptr
 10574  	var AR_shp_Q13 uintptr
 10575  	var pxq uintptr
 10576  	// var sLTP_Q16 [960]int32 at bp+2464, 3840
 10577  
 10578  	// var sLTP [960]int16 at bp+64, 1920
 10579  
 10580  	var HarmShapeFIRPacked_Q14 int32
 10581  	var offset_Q10 int32
 10582  	// var FiltState [16]int32 at bp, 64
 10583  
 10584  	// var x_sc_Q10 [120]int32 at bp+1984, 480
 10585  
 10586  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frand_seed = (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FSeed
 10587  	/* Set unvoiced lag to the previous one, overwrite later for voiced */
 10588  	lag = (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FlagPrev
 10589  
 10590  	offset_Q10 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Quantization_Offsets_Q10)) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype)*4) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FQuantOffsetType)*2)))
 10591  
 10592  	if LSFInterpFactor_Q2 == (int32(1) << 2) {
 10593  		LSF_interpolation_flag = 0
 10594  	} else {
 10595  		LSF_interpolation_flag = 1
 10596  	}
 10597  
 10598  	/* Setup pointers to start of sub frame */
 10599  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 10600  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 10601  	pxq = ((NSQ /* &.xq */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*2)
 10602  	for k = 0; k < 4; k++ {
 10603  		A_Q12 = (PredCoef_Q12 + uintptr((((k>>1)|(1-LSF_interpolation_flag))*16))*2)
 10604  		B_Q14 = (LTPCoef_Q14 + uintptr((k*5))*2)
 10605  		AR_shp_Q13 = (AR2_Q13 + uintptr((k*16))*2)
 10606  
 10607  		/* Noise shape parameters */
 10608  
 10609  		HarmShapeFIRPacked_Q14 = ((*(*int32)(unsafe.Pointer(HarmShapeGain_Q14 + uintptr(k)*4))) >> (2))
 10610  		HarmShapeFIRPacked_Q14 = HarmShapeFIRPacked_Q14 | (((*(*int32)(unsafe.Pointer(HarmShapeGain_Q14 + uintptr(k)*4))) >> (1)) << (16))
 10611  
 10612  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag = 0
 10613  		if (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype == 0 {
 10614  			/* Voiced */
 10615  			lag = *(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + uintptr(k)*4))
 10616  
 10617  			/* Re-whitening */
 10618  			if (k & (3 - ((LSF_interpolation_flag) << (1)))) == 0 {
 10619  
 10620  				/* Rewhiten with new A coefs */
 10621  				start_idx = ((((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length - lag) - (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder) - (5 / 2))
 10622  
 10623  				libc.Xmemset(tls, bp /* &FiltState[0] */, 0, (uint64((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder) * uint64(unsafe.Sizeof(int32(0)))))
 10624  				SKP_Silk_MA_Prediction(tls, ((NSQ /* &.xq */) + uintptr((start_idx+(k*((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length>>2))))*2),
 10625  					A_Q12, bp /* &FiltState[0] */, (bp + 64 /* &sLTP[0] */ + uintptr(start_idx)*2), ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length - start_idx), (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder)
 10626  
 10627  				(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag = 1
 10628  				(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 10629  			}
 10630  		}
 10631  
 10632  		SKP_Silk_nsq_scale_states(tls, NSQ, x, bp+1984 /* &x_sc_Q10[0] */, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length, bp+64, /* &sLTP[0] */
 10633  			bp+2464 /* &sLTP_Q16[0] */, k, LTP_scale_Q14, Gains_Q16, psEncCtrlC+108 /* &.pitchL */)
 10634  
 10635  		SKP_Silk_noise_shape_quantizer(tls, NSQ, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype, bp+1984 /* &x_sc_Q10[0] */, q, pxq, bp+2464 /* &sLTP_Q16[0] */, A_Q12, B_Q14,
 10636  			AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, *(*int32)(unsafe.Pointer(Tilt_Q14 + uintptr(k)*4)), *(*int32)(unsafe.Pointer(LF_shp_Q14 + uintptr(k)*4)), *(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(k)*4)), Lambda_Q10,
 10637  			offset_Q10, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder)
 10638  
 10639  		x += 2 * uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)
 10640  		q += uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)
 10641  		pxq += 2 * (uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length))
 10642  	}
 10643  
 10644  	/* Update lagPrev for next frame */
 10645  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FlagPrev = *(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + 3*4))
 10646  
 10647  	/* Save quantized speech and noise shaping signals */
 10648  	libc.Xmemcpy(tls, NSQ /* &.xq */, ((NSQ /* &.xq */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*2), (uint64((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length) * uint64(unsafe.Sizeof(int16(0)))))
 10649  	libc.Xmemcpy(tls, NSQ+1920 /* &.sLTP_shp_Q10 */, ((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*4), (uint64((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length) * uint64(unsafe.Sizeof(int32(0)))))
 10650  
 10651  }
 10652  
 10653  /***********************************/
 10654  /* SKP_Silk_noise_shape_quantizer  */
 10655  /***********************************/
 10656  func SKP_Silk_noise_shape_quantizer(tls *libc.TLS, NSQ uintptr, sigtype int32, x_sc_Q10 uintptr, q uintptr, xq uintptr, sLTP_Q16 uintptr, a_Q12 uintptr, b_Q14 uintptr, AR_shp_Q13 uintptr, lag int32, HarmShapeFIRPacked_Q14 int32, Tilt_Q14 int32, LF_shp_Q14 int32, Gain_Q16 int32, Lambda_Q10 int32, offset_Q10 int32, length int32, shapingLPCOrder int32, predictLPCOrder int32) { /* SKP_Silk_NSQ.c:172:17: */
 10657  	var i int32
 10658  	var j int32
 10659  	var LTP_pred_Q14 int32
 10660  	var LPC_pred_Q10 int32
 10661  	var n_AR_Q10 int32
 10662  	var n_LTP_Q14 int32
 10663  	var n_LF_Q10 int32
 10664  	var r_Q10 int32
 10665  	var q_Q0 int32
 10666  	var q_Q10 int32
 10667  	var thr1_Q10 int32
 10668  	var thr2_Q10 int32
 10669  	var thr3_Q10 int32
 10670  	var dither int32
 10671  	var exc_Q10 int32
 10672  	var LPC_exc_Q10 int32
 10673  	var xq_Q10 int32
 10674  	var tmp1 int32
 10675  	var tmp2 int32
 10676  	var sLF_AR_shp_Q10 int32
 10677  	var psLPC_Q14 uintptr
 10678  	var shp_lag_ptr uintptr
 10679  	var pred_lag_ptr uintptr
 10680  
 10681  	shp_lag_ptr = ((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-lag)+(3/2)))*4)
 10682  	pred_lag_ptr = (sLTP_Q16 + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx-lag)+(5/2)))*4)
 10683  
 10684  	/* Setup short term AR state */
 10685  	psLPC_Q14 = ((NSQ + 5760 /* &.sLPC_Q14 */) + 31*4)
 10686  
 10687  	/* Quantization thresholds */
 10688  	thr1_Q10 = ((-1536) - ((Lambda_Q10) >> (1)))
 10689  	thr2_Q10 = ((-512) - ((Lambda_Q10) >> (1)))
 10690  	thr2_Q10 = ((thr2_Q10) + (((int32(int16(offset_Q10))) * (int32(int16(Lambda_Q10)))) >> (10)))
 10691  	thr3_Q10 = ((512) + ((Lambda_Q10) >> (1)))
 10692  
 10693  	for i = 0; i < length; i++ {
 10694  		/* Generate dither */
 10695  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frand_seed = (int32((uint32(907633515)) + ((uint32((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frand_seed)) * (uint32(196314165)))))
 10696  
 10697  		/* dither = rand_seed < 0 ? 0xFFFFFFFF : 0; */
 10698  		dither = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frand_seed) >> (31))
 10699  
 10700  		/* Short-term prediction */
 10701  		/* check that order is even */
 10702  		/* check that array starts at 4-byte aligned address */
 10703  
 10704  		/* check that unrolling works */
 10705  		/* Partially unrolled */
 10706  		LPC_pred_Q10 = ((((*(*int32)(unsafe.Pointer(psLPC_Q14))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12))))) >> 16))
 10707  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 1*2))))) >> 16)))
 10708  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 2*2))))) >> 16)))
 10709  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 3*2))))) >> 16)))
 10710  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 4*2))))) >> 16)))
 10711  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-5)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 5*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-5)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 5*2))))) >> 16)))
 10712  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-6)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 6*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-6)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 6*2))))) >> 16)))
 10713  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-7)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 7*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-7)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 7*2))))) >> 16)))
 10714  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-8)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 8*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-8)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 8*2))))) >> 16)))
 10715  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-9)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 9*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-9)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 9*2))))) >> 16)))
 10716  		for j = 10; j < predictLPCOrder; j++ {
 10717  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + uintptr(-j)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + uintptr(j)*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + uintptr(-j)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + uintptr(j)*2))))) >> 16)))
 10718  		}
 10719  		/* Long-term prediction */
 10720  		if sigtype == 0 {
 10721  			/* Unrolled loop */
 10722  			LTP_pred_Q14 = ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14))))) >> 16))
 10723  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 1*2))))) >> 16)))
 10724  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 2*2))))) >> 16)))
 10725  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 3*2))))) >> 16)))
 10726  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 4*2))))) >> 16)))
 10727  			pred_lag_ptr += 4
 10728  		} else {
 10729  			LTP_pred_Q14 = 0
 10730  		}
 10731  
 10732  		/* Noise shape feedback */
 10733  		/* check that order is even */
 10734  		tmp2 = *(*int32)(unsafe.Pointer(psLPC_Q14))
 10735  		tmp1 = *(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */)))
 10736  		*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */))) = tmp2
 10737  		n_AR_Q10 = ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13))))) >> 16))
 10738  		for j = 2; j < shapingLPCOrder; j = j + (2) {
 10739  			tmp2 = *(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((j-1))*4))
 10740  			*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((j-1))*4)) = tmp1
 10741  			n_AR_Q10 = ((n_AR_Q10) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((j-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((j-1))*2))))) >> 16)))
 10742  			tmp1 = *(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((j+0))*4))
 10743  			*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((j+0))*4)) = tmp2
 10744  			n_AR_Q10 = ((n_AR_Q10) + ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr(j)*2))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr(j)*2))))) >> 16)))
 10745  		}
 10746  		*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((shapingLPCOrder-1))*4)) = tmp1
 10747  		n_AR_Q10 = ((n_AR_Q10) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((shapingLPCOrder-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((shapingLPCOrder-1))*2))))) >> 16)))
 10748  
 10749  		n_AR_Q10 = ((n_AR_Q10) >> (1)) /* Q11 -> Q10 */
 10750  		n_AR_Q10 = ((n_AR_Q10) + (((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) >> 16) * (int32(int16(Tilt_Q14)))) + (((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) & 0x0000FFFF) * (int32(int16(Tilt_Q14)))) >> 16)))
 10751  
 10752  		n_LF_Q10 = (((((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-1))*4))) >> 16) * (int32(int16(LF_shp_Q14)))) + ((((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-1))*4))) & 0x0000FFFF) * (int32(int16(LF_shp_Q14)))) >> 16)) << (2))
 10753  		n_LF_Q10 = (((n_LF_Q10) + ((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) >> 16) * ((LF_shp_Q14) >> 16))) + (((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) & 0x0000FFFF) * ((LF_shp_Q14) >> 16)) >> 16))
 10754  
 10755  		/* Long-term shaping */
 10756  		if lag > 0 {
 10757  			/* Symmetric, packed FIR coefficients */
 10758  			n_LTP_Q14 = (((((*(*int32)(unsafe.Pointer(shp_lag_ptr))) + (*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-2)*4)))) >> 16) * (int32(int16(HarmShapeFIRPacked_Q14)))) + (((((*(*int32)(unsafe.Pointer(shp_lag_ptr))) + (*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-2)*4)))) & 0x0000FFFF) * (int32(int16(HarmShapeFIRPacked_Q14)))) >> 16))
 10759  			n_LTP_Q14 = (((n_LTP_Q14) + (((*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * ((HarmShapeFIRPacked_Q14) >> 16))) + ((((*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * ((HarmShapeFIRPacked_Q14) >> 16)) >> 16))
 10760  			n_LTP_Q14 = ((n_LTP_Q14) << (6))
 10761  			shp_lag_ptr += 4
 10762  		} else {
 10763  			n_LTP_Q14 = 0
 10764  		}
 10765  
 10766  		/* Input minus prediction plus noise feedback  */
 10767  		//r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP;
 10768  		tmp1 = ((LTP_pred_Q14) - (n_LTP_Q14)) /* Add Q14 stuff */
 10769  		tmp1 = ((tmp1) >> (4))                /* convert to Q10  */
 10770  		tmp1 = ((tmp1) + (LPC_pred_Q10))      /* add Q10 stuff */
 10771  		tmp1 = ((tmp1) - (n_AR_Q10))          /* subtract Q10 stuff */
 10772  		tmp1 = ((tmp1) - (n_LF_Q10))          /* subtract Q10 stuff */
 10773  		r_Q10 = ((*(*int32)(unsafe.Pointer(x_sc_Q10 + uintptr(i)*4))) - (tmp1))
 10774  
 10775  		/* Flip sign depending on dither */
 10776  		r_Q10 = ((r_Q10 ^ dither) - dither)
 10777  		r_Q10 = ((r_Q10) - (offset_Q10))
 10778  		r_Q10 = func() int32 {
 10779  			if (int32(-64) << 10) > (int32(64) << 10) {
 10780  				return func() int32 {
 10781  					if (r_Q10) > (int32(-64) << 10) {
 10782  						return (int32(-64) << 10)
 10783  					}
 10784  					return func() int32 {
 10785  						if (r_Q10) < (int32(64) << 10) {
 10786  							return (int32(64) << 10)
 10787  						}
 10788  						return r_Q10
 10789  					}()
 10790  				}()
 10791  			}
 10792  			return func() int32 {
 10793  				if (r_Q10) > (int32(64) << 10) {
 10794  					return (int32(64) << 10)
 10795  				}
 10796  				return func() int32 {
 10797  					if (r_Q10) < (int32(-64) << 10) {
 10798  						return (int32(-64) << 10)
 10799  					}
 10800  					return r_Q10
 10801  				}()
 10802  			}()
 10803  		}()
 10804  
 10805  		/* Quantize */
 10806  		q_Q0 = 0
 10807  		q_Q10 = 0
 10808  		if r_Q10 < thr2_Q10 {
 10809  			if r_Q10 < thr1_Q10 {
 10810  				q_Q0 = func() int32 {
 10811  					if (10) == 1 {
 10812  						return ((((r_Q10) + ((Lambda_Q10) >> (1))) >> 1) + (((r_Q10) + ((Lambda_Q10) >> (1))) & 1))
 10813  					}
 10814  					return (((((r_Q10) + ((Lambda_Q10) >> (1))) >> ((10) - 1)) + 1) >> 1)
 10815  				}()
 10816  				q_Q10 = ((q_Q0) << (10))
 10817  			} else {
 10818  				q_Q0 = -1
 10819  				q_Q10 = -1024
 10820  			}
 10821  		} else {
 10822  			if r_Q10 > thr3_Q10 {
 10823  				q_Q0 = func() int32 {
 10824  					if (10) == 1 {
 10825  						return ((((r_Q10) - ((Lambda_Q10) >> (1))) >> 1) + (((r_Q10) - ((Lambda_Q10) >> (1))) & 1))
 10826  					}
 10827  					return (((((r_Q10) - ((Lambda_Q10) >> (1))) >> ((10) - 1)) + 1) >> 1)
 10828  				}()
 10829  				q_Q10 = ((q_Q0) << (10))
 10830  			}
 10831  		}
 10832  		*(*int8)(unsafe.Pointer(q + uintptr(i))) = int8(q_Q0) /* No saturation needed because max is 64 */
 10833  
 10834  		/* Excitation */
 10835  		exc_Q10 = ((q_Q10) + (offset_Q10))
 10836  		exc_Q10 = ((exc_Q10 ^ dither) - dither)
 10837  
 10838  		/* Add predictions */
 10839  		LPC_exc_Q10 = ((exc_Q10) + (func() int32 {
 10840  			if (4) == 1 {
 10841  				return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
 10842  			}
 10843  			return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
 10844  		}()))
 10845  		xq_Q10 = ((LPC_exc_Q10) + (LPC_pred_Q10))
 10846  
 10847  		/* Scale XQ back to normal level before saving */
 10848  		*(*int16)(unsafe.Pointer(xq + uintptr(i)*2)) = func() int16 {
 10849  			if (func() int32 {
 10850  				if (10) == 1 {
 10851  					return (((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10852  						if (16) == 1 {
 10853  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10854  						}
 10855  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10856  					}()))) >> 1) + ((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10857  						if (16) == 1 {
 10858  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10859  						}
 10860  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10861  					}()))) & 1))
 10862  				}
 10863  				return ((((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10864  					if (16) == 1 {
 10865  						return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10866  					}
 10867  					return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10868  				}()))) >> ((10) - 1)) + 1) >> 1)
 10869  			}()) > 0x7FFF {
 10870  				return int16(0x7FFF)
 10871  			}
 10872  			return func() int16 {
 10873  				if (func() int32 {
 10874  					if (10) == 1 {
 10875  						return (((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10876  							if (16) == 1 {
 10877  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10878  							}
 10879  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10880  						}()))) >> 1) + ((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10881  							if (16) == 1 {
 10882  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10883  							}
 10884  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10885  						}()))) & 1))
 10886  					}
 10887  					return ((((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10888  						if (16) == 1 {
 10889  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10890  						}
 10891  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10892  					}()))) >> ((10) - 1)) + 1) >> 1)
 10893  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 10894  					return libc.Int16FromInt32(0x8000)
 10895  				}
 10896  				return func() int16 {
 10897  					if (10) == 1 {
 10898  						return (int16(((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10899  							if (16) == 1 {
 10900  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10901  							}
 10902  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10903  						}()))) >> 1) + ((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10904  							if (16) == 1 {
 10905  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10906  							}
 10907  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10908  						}()))) & 1)))
 10909  					}
 10910  					return (int16((((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10911  						if (16) == 1 {
 10912  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10913  						}
 10914  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10915  					}()))) >> ((10) - 1)) + 1) >> 1))
 10916  				}()
 10917  			}()
 10918  		}()
 10919  
 10920  		/* Update states */
 10921  		psLPC_Q14 += 4
 10922  		*(*int32)(unsafe.Pointer(psLPC_Q14)) = ((xq_Q10) << (4))
 10923  		sLF_AR_shp_Q10 = ((xq_Q10) - (n_AR_Q10))
 10924  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12 = ((sLF_AR_shp_Q10) << (2))
 10925  
 10926  		*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx)*4)) = ((sLF_AR_shp_Q10) - (n_LF_Q10))
 10927  		*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx)*4)) = ((LPC_exc_Q10) << (6))
 10928  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx++
 10929  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx++
 10930  
 10931  		/* Make dither dependent on quantized signal */
 10932  		*(*int32)(unsafe.Pointer(NSQ + 6448 /* &.rand_seed */)) += (int32(*(*int8)(unsafe.Pointer(q + uintptr(i)))))
 10933  	}
 10934  
 10935  	/* Update LPC synth buffer */
 10936  	libc.Xmemcpy(tls, NSQ+5760 /* &.sLPC_Q14 */, ((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(length)*4), (uint64(32) * uint64(unsafe.Sizeof(int32(0)))))
 10937  }
 10938  
 10939  func SKP_Silk_nsq_scale_states(tls *libc.TLS, NSQ uintptr, x uintptr, x_sc_Q10 uintptr, subfr_length int32, sLTP uintptr, sLTP_Q16 uintptr, subfr int32, LTP_scale_Q14 int32, Gains_Q16 uintptr, pitchL uintptr) { /* SKP_Silk_NSQ.c:353:17: */
 10940  	var i int32
 10941  	var lag int32
 10942  	var inv_gain_Q16 int32
 10943  	var gain_adj_Q16 int32
 10944  	var inv_gain_Q32 int32
 10945  
 10946  	inv_gain_Q16 = SKP_INVERSE32_varQ(tls, func() int32 {
 10947  		if (*(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(subfr)*4))) > (1) {
 10948  			return *(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(subfr)*4))
 10949  		}
 10950  		return 1
 10951  	}(), 32)
 10952  	inv_gain_Q16 = func() int32 {
 10953  		if (inv_gain_Q16) < (0x7FFF) {
 10954  			return inv_gain_Q16
 10955  		}
 10956  		return 0x7FFF
 10957  	}()
 10958  	lag = *(*int32)(unsafe.Pointer(pitchL + uintptr(subfr)*4))
 10959  
 10960  	/* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */
 10961  	if (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag != 0 {
 10962  		inv_gain_Q32 = ((inv_gain_Q16) << (16))
 10963  		if subfr == 0 {
 10964  			/* Do LTP downscaling */
 10965  			inv_gain_Q32 = (((((inv_gain_Q32) >> 16) * (int32(int16(LTP_scale_Q14)))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(int16(LTP_scale_Q14)))) >> 16)) << (2))
 10966  		}
 10967  		for i = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx - lag) - (5 / 2)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx; i++ {
 10968  
 10969  			*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)) = ((((inv_gain_Q32) >> 16) * (int32(*(*int16)(unsafe.Pointer(sLTP + uintptr(i)*2))))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(sLTP + uintptr(i)*2))))) >> 16))
 10970  		}
 10971  	}
 10972  
 10973  	/* Adjust for changing gain */
 10974  	if inv_gain_Q16 != (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16 {
 10975  		gain_adj_Q16 = SKP_DIV32_varQ(tls, inv_gain_Q16, (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16, 16)
 10976  
 10977  		/* Scale long-term shaping state */
 10978  		for i = ((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx - (subfr_length * 4)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx; i++ {
 10979  			*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 10980  				if (16) == 1 {
 10981  					return (((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) & 1))
 10982  				}
 10983  				return ((((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 10984  			}())))
 10985  		}
 10986  
 10987  		/* Scale long-term prediction state */
 10988  		if (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag == 0 {
 10989  			for i = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx - lag) - (5 / 2)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx; i++ {
 10990  				*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 10991  					if (16) == 1 {
 10992  						return (((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) & 1))
 10993  					}
 10994  					return ((((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 10995  				}())))
 10996  			}
 10997  		}
 10998  
 10999  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12 = (((((gain_adj_Q16) >> 16) * (int32(int16((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12)))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12)))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11000  			if (16) == 1 {
 11001  				return ((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) >> 1) + (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) & 1))
 11002  			}
 11003  			return (((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) >> ((16) - 1)) + 1) >> 1)
 11004  		}())))
 11005  
 11006  		/* Scale short-term prediction and shaping states */
 11007  		for i = 0; i < 32; i++ {
 11008  			*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11009  				if (16) == 1 {
 11010  					return (((*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4))) & 1))
 11011  				}
 11012  				return ((((*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11013  			}())))
 11014  		}
 11015  		for i = 0; i < 16; i++ {
 11016  			*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11017  				if (16) == 1 {
 11018  					return (((*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4))) & 1))
 11019  				}
 11020  				return ((((*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11021  			}())))
 11022  		}
 11023  	}
 11024  
 11025  	/* Scale input */
 11026  	for i = 0; i < subfr_length; i++ {
 11027  		*(*int32)(unsafe.Pointer(x_sc_Q10 + uintptr(i)*4)) = (((int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))) * (int32(int16(inv_gain_Q16)))) >> (6))
 11028  	}
 11029  
 11030  	/* save inv_gain */
 11031  
 11032  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16 = inv_gain_Q16
 11033  }
 11034  
 11035  type NSQ_del_dec_struct = struct {
 11036  	FRandState [32]int32
 11037  	FQ_Q10     [32]int32
 11038  	FXq_Q10    [32]int32
 11039  	FPred_Q16  [32]int32
 11040  	FShape_Q10 [32]int32
 11041  	FGain_Q16  [32]int32
 11042  	FsAR2_Q14  [16]int32
 11043  	FsLPC_Q14  [152]int32
 11044  	FLF_AR_Q12 int32
 11045  	FSeed      int32
 11046  	FSeedInit  int32
 11047  	FRD_Q10    int32
 11048  } /* SKP_Silk_NSQ_del_dec.c:43:3 */
 11049  
 11050  type NSQ_sample_struct = struct {
 11051  	FQ_Q10        int32
 11052  	FRD_Q10       int32
 11053  	Fxq_Q14       int32
 11054  	FLF_AR_Q12    int32
 11055  	FsLTP_shp_Q10 int32
 11056  	FLPC_exc_Q16  int32
 11057  } /* SKP_Silk_NSQ_del_dec.c:52:3 */
 11058  
 11059  func SKP_Silk_NSQ_del_dec(tls *libc.TLS, psEncC uintptr, psEncCtrlC uintptr, NSQ uintptr, x uintptr, q uintptr, LSFInterpFactor_Q2 int32, PredCoef_Q12 uintptr, LTPCoef_Q14 uintptr, AR2_Q13 uintptr, HarmShapeGain_Q14 uintptr, Tilt_Q14 uintptr, LF_shp_Q14 uintptr, Gains_Q16 uintptr, Lambda_Q10 int32, LTP_scale_Q14 int32) { /* SKP_Silk_NSQ_del_dec.c:107:6: */
 11060  	bp := tls.Alloc(12132)
 11061  	defer tls.Free(12132)
 11062  
 11063  	var i int32
 11064  	var k int32
 11065  	var lag int32
 11066  	var start_idx int32
 11067  	var LSF_interpolation_flag int32
 11068  	var Winner_ind int32
 11069  	var subfr int32
 11070  	var last_smple_idx int32
 11071  	// var smpl_buf_idx int32 at bp+12128, 4
 11072  
 11073  	var decisionDelay int32
 11074  	var subfr_length int32
 11075  	var A_Q12 uintptr
 11076  	var B_Q14 uintptr
 11077  	var AR_shp_Q13 uintptr
 11078  	var pxq uintptr
 11079  	// var sLTP_Q16 [960]int32 at bp+8288, 3840
 11080  
 11081  	// var sLTP [960]int16 at bp+5888, 1920
 11082  
 11083  	var HarmShapeFIRPacked_Q14 int32
 11084  	var offset_Q10 int32
 11085  	// var FiltState [16]int32 at bp+5824, 64
 11086  
 11087  	var RDmin_Q10 int32
 11088  	// var x_sc_Q10 [120]int32 at bp+7808, 480
 11089  
 11090  	// var psDelDec [4]NSQ_del_dec_struct at bp, 5824
 11091  
 11092  	var psDD uintptr
 11093  
 11094  	subfr_length = ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length / 4)
 11095  
 11096  	/* Set unvoiced lag to the previous one, overwrite later for voiced */
 11097  	lag = (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FlagPrev
 11098  
 11099  	/* Initialize delayed decision states */
 11100  	libc.Xmemset(tls, bp /* &psDelDec[0] */, 0, (uint64((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision) * uint64(unsafe.Sizeof(NSQ_del_dec_struct{}))))
 11101  	for k = 0; k < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision; k++ {
 11102  		psDD = (bp /* &psDelDec */ + uintptr(k)*1456)
 11103  		(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed = ((k + (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FSeed) & 3)
 11104  		(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeedInit = (*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed
 11105  		(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10 = 0
 11106  		(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12 = (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12
 11107  		*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */))) = *(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length-1))*4))
 11108  		libc.Xmemcpy(tls, psDD+832 /* &.sLPC_Q14 */, NSQ+5760 /* &.sLPC_Q14 */, (uint64(32) * uint64(unsafe.Sizeof(int32(0)))))
 11109  		libc.Xmemcpy(tls, psDD+768 /* &.sAR2_Q14 */, NSQ+6368 /* &.sAR2_Q14 */, uint64(unsafe.Sizeof([16]int32{})))
 11110  	}
 11111  
 11112  	offset_Q10 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Quantization_Offsets_Q10)) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype)*4) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FQuantOffsetType)*2)))
 11113  	*(*int32)(unsafe.Pointer(bp + 12128 /* smpl_buf_idx */)) = 0 /* index of oldest samples */
 11114  
 11115  	decisionDelay = SKP_min_int(tls, 32, subfr_length)
 11116  
 11117  	/* For voiced frames limit the decision delay to lower than the pitch lag */
 11118  	if (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype == 0 {
 11119  		for k = 0; k < 4; k++ {
 11120  			decisionDelay = SKP_min_int(tls, decisionDelay, ((*(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + uintptr(k)*4)) - (5 / 2)) - 1))
 11121  		}
 11122  	} else {
 11123  		if lag > 0 {
 11124  			decisionDelay = SKP_min_int(tls, decisionDelay, ((lag - (5 / 2)) - 1))
 11125  		}
 11126  	}
 11127  
 11128  	if LSFInterpFactor_Q2 == (int32(1) << 2) {
 11129  		LSF_interpolation_flag = 0
 11130  	} else {
 11131  		LSF_interpolation_flag = 1
 11132  	}
 11133  
 11134  	/* Setup pointers to start of sub frame */
 11135  	pxq = ((NSQ /* &.xq */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*2)
 11136  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 11137  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 11138  	subfr = 0
 11139  	for k = 0; k < 4; k++ {
 11140  		A_Q12 = (PredCoef_Q12 + uintptr((((k>>1)|(1-LSF_interpolation_flag))*16))*2)
 11141  		B_Q14 = (LTPCoef_Q14 + uintptr((k*5))*2)
 11142  		AR_shp_Q13 = (AR2_Q13 + uintptr((k*16))*2)
 11143  
 11144  		/* Noise shape parameters */
 11145  
 11146  		HarmShapeFIRPacked_Q14 = ((*(*int32)(unsafe.Pointer(HarmShapeGain_Q14 + uintptr(k)*4))) >> (2))
 11147  		HarmShapeFIRPacked_Q14 = HarmShapeFIRPacked_Q14 | (((*(*int32)(unsafe.Pointer(HarmShapeGain_Q14 + uintptr(k)*4))) >> (1)) << (16))
 11148  
 11149  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag = 0
 11150  		if (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype == 0 {
 11151  			/* Voiced */
 11152  			lag = *(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + uintptr(k)*4))
 11153  
 11154  			/* Re-whitening */
 11155  			if (k & (3 - ((LSF_interpolation_flag) << (1)))) == 0 {
 11156  				if k == 2 {
 11157  					/* RESET DELAYED DECISIONS */
 11158  					/* Find winner */
 11159  					RDmin_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */)).FRD_Q10
 11160  					Winner_ind = 0
 11161  					for i = 1; i < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision; i++ {
 11162  						if (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */ +uintptr(i)*1456)).FRD_Q10 < RDmin_Q10 {
 11163  							RDmin_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */ + uintptr(i)*1456)).FRD_Q10
 11164  							Winner_ind = i
 11165  						}
 11166  					}
 11167  					for i = 0; i < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision; i++ {
 11168  						if i != Winner_ind {
 11169  							*(*int32)(unsafe.Pointer(bp /* &psDelDec */ + uintptr(i)*1456 + 1452 /* &.RD_Q10 */)) += (int32(0x7FFFFFFF) >> 4)
 11170  
 11171  						}
 11172  					}
 11173  
 11174  					/* Copy final part of signals from winner state to output and long-term filter states */
 11175  					psDD = (bp /* &psDelDec */ + uintptr(Winner_ind)*1456)
 11176  					last_smple_idx = (*(*int32)(unsafe.Pointer(bp + 12128 /* smpl_buf_idx */)) + decisionDelay)
 11177  					for i = 0; i < decisionDelay; i++ {
 11178  						last_smple_idx = ((last_smple_idx - 1) & (32 - 1))
 11179  						*(*int8)(unsafe.Pointer(q + uintptr((i - decisionDelay)))) = (int8((*(*int32)(unsafe.Pointer((psDD + 128 /* &.Q_Q10 */) + uintptr(last_smple_idx)*4))) >> (10)))
 11180  						*(*int16)(unsafe.Pointer(pxq + uintptr((i-decisionDelay))*2)) = func() int16 {
 11181  							if (func() int32 {
 11182  								if (10) == 1 {
 11183  									return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11184  										if (16) == 1 {
 11185  											return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11186  										}
 11187  										return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11188  									}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11189  										if (16) == 1 {
 11190  											return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11191  										}
 11192  										return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11193  									}()))) & 1))
 11194  								}
 11195  								return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11196  									if (16) == 1 {
 11197  										return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11198  									}
 11199  									return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11200  								}()))) >> ((10) - 1)) + 1) >> 1)
 11201  							}()) > 0x7FFF {
 11202  								return int16(0x7FFF)
 11203  							}
 11204  							return func() int16 {
 11205  								if (func() int32 {
 11206  									if (10) == 1 {
 11207  										return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11208  											if (16) == 1 {
 11209  												return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11210  											}
 11211  											return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11212  										}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11213  											if (16) == 1 {
 11214  												return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11215  											}
 11216  											return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11217  										}()))) & 1))
 11218  									}
 11219  									return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11220  										if (16) == 1 {
 11221  											return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11222  										}
 11223  										return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11224  									}()))) >> ((10) - 1)) + 1) >> 1)
 11225  								}()) < (int32(libc.Int16FromInt32(0x8000))) {
 11226  									return libc.Int16FromInt32(0x8000)
 11227  								}
 11228  								return func() int16 {
 11229  									if (10) == 1 {
 11230  										return (int16(((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11231  											if (16) == 1 {
 11232  												return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11233  											}
 11234  											return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11235  										}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11236  											if (16) == 1 {
 11237  												return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11238  											}
 11239  											return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11240  										}()))) & 1)))
 11241  									}
 11242  									return (int16((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11243  										if (16) == 1 {
 11244  											return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11245  										}
 11246  										return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11247  									}()))) >> ((10) - 1)) + 1) >> 1))
 11248  								}()
 11249  							}()
 11250  						}()
 11251  						*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-decisionDelay)+i))*4)) = *(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(last_smple_idx)*4))
 11252  					}
 11253  
 11254  					subfr = 0
 11255  				}
 11256  
 11257  				/* Rewhiten with new A coefs */
 11258  				start_idx = ((((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length - lag) - (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder) - (5 / 2))
 11259  
 11260  				libc.Xmemset(tls, bp+5824 /* &FiltState[0] */, 0, (uint64((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder) * uint64(unsafe.Sizeof(int32(0)))))
 11261  				SKP_Silk_MA_Prediction(tls, ((NSQ /* &.xq */) + uintptr((start_idx+(k*(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)))*2),
 11262  					A_Q12, bp+5824 /* &FiltState[0] */, (bp + 5888 /* &sLTP[0] */ + uintptr(start_idx)*2), ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length - start_idx), (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder)
 11263  
 11264  				(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 11265  				(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag = 1
 11266  			}
 11267  		}
 11268  
 11269  		SKP_Silk_nsq_del_dec_scale_states(tls, NSQ, bp /* &psDelDec[0] */, x, bp+7808, /* &x_sc_Q10[0] */
 11270  			subfr_length, bp+5888 /* &sLTP[0] */, bp+8288 /* &sLTP_Q16[0] */, k, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision, *(*int32)(unsafe.Pointer(bp + 12128 /* smpl_buf_idx */)),
 11271  			LTP_scale_Q14, Gains_Q16, psEncCtrlC+108 /* &.pitchL */)
 11272  
 11273  		SKP_Silk_noise_shape_quantizer_del_dec(tls, NSQ, bp /* &psDelDec[0] */, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype, bp+7808 /* &x_sc_Q10[0] */, q, pxq, bp+8288, /* &sLTP_Q16[0] */
 11274  			A_Q12, B_Q14, AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, *(*int32)(unsafe.Pointer(Tilt_Q14 + uintptr(k)*4)), *(*int32)(unsafe.Pointer(LF_shp_Q14 + uintptr(k)*4)), *(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(k)*4)),
 11275  			Lambda_Q10, offset_Q10, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length, libc.PostIncInt32(&subfr, 1), (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder,
 11276  			(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fwarping_Q16, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision, bp+12128 /* &smpl_buf_idx */, decisionDelay)
 11277  
 11278  		x += 2 * uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)
 11279  		q += uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)
 11280  		pxq += 2 * (uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length))
 11281  	}
 11282  
 11283  	/* Find winner */
 11284  	RDmin_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */)).FRD_Q10
 11285  	Winner_ind = 0
 11286  	for k = 1; k < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision; k++ {
 11287  		if (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */ +uintptr(k)*1456)).FRD_Q10 < RDmin_Q10 {
 11288  			RDmin_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */ + uintptr(k)*1456)).FRD_Q10
 11289  			Winner_ind = k
 11290  		}
 11291  	}
 11292  
 11293  	/* Copy final part of signals from winner state to output and long-term filter states */
 11294  	psDD = (bp /* &psDelDec */ + uintptr(Winner_ind)*1456)
 11295  	(*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FSeed = (*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeedInit
 11296  	last_smple_idx = (*(*int32)(unsafe.Pointer(bp + 12128 /* smpl_buf_idx */)) + decisionDelay)
 11297  	for i = 0; i < decisionDelay; i++ {
 11298  		last_smple_idx = ((last_smple_idx - 1) & (32 - 1))
 11299  		*(*int8)(unsafe.Pointer(q + uintptr((i - decisionDelay)))) = (int8((*(*int32)(unsafe.Pointer((psDD + 128 /* &.Q_Q10 */) + uintptr(last_smple_idx)*4))) >> (10)))
 11300  		*(*int16)(unsafe.Pointer(pxq + uintptr((i-decisionDelay))*2)) = func() int16 {
 11301  			if (func() int32 {
 11302  				if (10) == 1 {
 11303  					return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11304  						if (16) == 1 {
 11305  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11306  						}
 11307  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11308  					}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11309  						if (16) == 1 {
 11310  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11311  						}
 11312  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11313  					}()))) & 1))
 11314  				}
 11315  				return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11316  					if (16) == 1 {
 11317  						return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11318  					}
 11319  					return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11320  				}()))) >> ((10) - 1)) + 1) >> 1)
 11321  			}()) > 0x7FFF {
 11322  				return int16(0x7FFF)
 11323  			}
 11324  			return func() int16 {
 11325  				if (func() int32 {
 11326  					if (10) == 1 {
 11327  						return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11328  							if (16) == 1 {
 11329  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11330  							}
 11331  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11332  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11333  							if (16) == 1 {
 11334  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11335  							}
 11336  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11337  						}()))) & 1))
 11338  					}
 11339  					return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11340  						if (16) == 1 {
 11341  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11342  						}
 11343  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11344  					}()))) >> ((10) - 1)) + 1) >> 1)
 11345  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 11346  					return libc.Int16FromInt32(0x8000)
 11347  				}
 11348  				return func() int16 {
 11349  					if (10) == 1 {
 11350  						return (int16(((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11351  							if (16) == 1 {
 11352  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11353  							}
 11354  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11355  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11356  							if (16) == 1 {
 11357  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11358  							}
 11359  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11360  						}()))) & 1)))
 11361  					}
 11362  					return (int16((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11363  						if (16) == 1 {
 11364  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11365  						}
 11366  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11367  					}()))) >> ((10) - 1)) + 1) >> 1))
 11368  				}()
 11369  			}()
 11370  		}()
 11371  		*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-decisionDelay)+i))*4)) = *(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(last_smple_idx)*4))
 11372  		*(*int32)(unsafe.Pointer(bp + 8288 /* &sLTP_Q16[0] */ + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx-decisionDelay)+i))*4)) = *(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(last_smple_idx)*4))
 11373  	}
 11374  	libc.Xmemcpy(tls, NSQ+5760 /* &.sLPC_Q14 */, ((psDD + 832 /* &.sLPC_Q14 */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)*4), (uint64(32) * uint64(unsafe.Sizeof(int32(0)))))
 11375  	libc.Xmemcpy(tls, NSQ+6368 /* &.sAR2_Q14 */, psDD+768 /* &.sAR2_Q14 */, uint64(unsafe.Sizeof([16]int32{})))
 11376  
 11377  	/* Update states */
 11378  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12 = (*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12
 11379  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FlagPrev = *(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + 3*4))
 11380  
 11381  	/* Save quantized speech and noise shaping signals */
 11382  	libc.Xmemcpy(tls, NSQ /* &.xq */, ((NSQ /* &.xq */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*2), (uint64((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length) * uint64(unsafe.Sizeof(int16(0)))))
 11383  	libc.Xmemcpy(tls, NSQ+1920 /* &.sLTP_shp_Q10 */, ((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*4), (uint64((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length) * uint64(unsafe.Sizeof(int32(0)))))
 11384  
 11385  }
 11386  
 11387  /******************************************/
 11388  /* Noise shape quantizer for one subframe */
 11389  /******************************************/
 11390  func SKP_Silk_noise_shape_quantizer_del_dec(tls *libc.TLS, NSQ uintptr, psDelDec uintptr, sigtype int32, x_Q10 uintptr, q uintptr, xq uintptr, sLTP_Q16 uintptr, a_Q12 uintptr, b_Q14 uintptr, AR_shp_Q13 uintptr, lag int32, HarmShapeFIRPacked_Q14 int32, Tilt_Q14 int32, LF_shp_Q14 int32, Gain_Q16 int32, Lambda_Q10 int32, offset_Q10 int32, length int32, subfr int32, shapingLPCOrder int32, predictLPCOrder int32, warping_Q16 int32, nStatesDelayedDecision int32, smpl_buf_idx uintptr, decisionDelay int32) { /* SKP_Silk_NSQ_del_dec.c:305:17: */
 11391  	bp := tls.Alloc(192)
 11392  	defer tls.Free(192)
 11393  
 11394  	var i int32
 11395  	var j int32
 11396  	var k int32
 11397  	var Winner_ind int32
 11398  	var RDmin_ind int32
 11399  	var RDmax_ind int32
 11400  	var last_smple_idx int32
 11401  	var Winner_rand_state int32
 11402  	var LTP_pred_Q14 int32
 11403  	var LPC_pred_Q10 int32
 11404  	var n_AR_Q10 int32
 11405  	var n_LTP_Q14 int32
 11406  	var n_LF_Q10 int32
 11407  	var r_Q10 int32
 11408  	var rr_Q20 int32
 11409  	var rd1_Q10 int32
 11410  	var rd2_Q10 int32
 11411  	var RDmin_Q10 int32
 11412  	var RDmax_Q10 int32
 11413  	var q1_Q10 int32
 11414  	var q2_Q10 int32
 11415  	var dither int32
 11416  	var exc_Q10 int32
 11417  	var LPC_exc_Q10 int32
 11418  	var xq_Q10 int32
 11419  	var tmp1 int32
 11420  	var tmp2 int32
 11421  	var sLF_AR_shp_Q10 int32
 11422  	var pred_lag_ptr uintptr
 11423  	var shp_lag_ptr uintptr
 11424  	var psLPC_Q14 uintptr
 11425  	// var psSampleState [4][2]NSQ_sample_struct at bp, 192
 11426  
 11427  	var psDD uintptr
 11428  	var psSS uintptr
 11429  
 11430  	shp_lag_ptr = ((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-lag)+(3/2)))*4)
 11431  	pred_lag_ptr = (sLTP_Q16 + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx-lag)+(5/2)))*4)
 11432  
 11433  	for i = 0; i < length; i++ {
 11434  		/* Perform common calculations used in all states */
 11435  
 11436  		/* Long-term prediction */
 11437  		if sigtype == 0 {
 11438  			/* Unrolled loop */
 11439  			LTP_pred_Q14 = ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14))))) >> 16))
 11440  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 1*2))))) >> 16)))
 11441  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 2*2))))) >> 16)))
 11442  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 3*2))))) >> 16)))
 11443  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 4*2))))) >> 16)))
 11444  			pred_lag_ptr += 4
 11445  		} else {
 11446  			LTP_pred_Q14 = 0
 11447  		}
 11448  
 11449  		/* Long-term shaping */
 11450  		if lag > 0 {
 11451  			/* Symmetric, packed FIR coefficients */
 11452  			n_LTP_Q14 = (((((*(*int32)(unsafe.Pointer(shp_lag_ptr))) + (*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-2)*4)))) >> 16) * (int32(int16(HarmShapeFIRPacked_Q14)))) + (((((*(*int32)(unsafe.Pointer(shp_lag_ptr))) + (*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-2)*4)))) & 0x0000FFFF) * (int32(int16(HarmShapeFIRPacked_Q14)))) >> 16))
 11453  			n_LTP_Q14 = (((n_LTP_Q14) + (((*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * ((HarmShapeFIRPacked_Q14) >> 16))) + ((((*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * ((HarmShapeFIRPacked_Q14) >> 16)) >> 16))
 11454  			n_LTP_Q14 = ((n_LTP_Q14) << (6))
 11455  			shp_lag_ptr += 4
 11456  		} else {
 11457  			n_LTP_Q14 = 0
 11458  		}
 11459  
 11460  		for k = 0; k < nStatesDelayedDecision; k++ {
 11461  			/* Delayed decision state */
 11462  			psDD = (psDelDec + uintptr(k)*1456)
 11463  
 11464  			/* Sample state */
 11465  			psSS = (bp /* &psSampleState[0] */ + uintptr(k)*48)
 11466  
 11467  			/* Generate dither */
 11468  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed = (int32((uint32(907633515)) + ((uint32((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed)) * (uint32(196314165)))))
 11469  
 11470  			/* dither = rand_seed < 0 ? 0xFFFFFFFF : 0; */
 11471  			dither = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed) >> (31))
 11472  
 11473  			/* Pointer used in short term prediction and shaping */
 11474  			psLPC_Q14 = ((psDD + 832 /* &.sLPC_Q14 */) + uintptr(((32-1)+i))*4)
 11475  			/* Short-term prediction */
 11476  			/* check that unrolling works */
 11477  			/* check that order is even */
 11478  			/* check that array starts at 4-byte aligned address */
 11479  			/* Partially unrolled */
 11480  			LPC_pred_Q10 = ((((*(*int32)(unsafe.Pointer(psLPC_Q14))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12))))) >> 16))
 11481  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 1*2))))) >> 16)))
 11482  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 2*2))))) >> 16)))
 11483  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 3*2))))) >> 16)))
 11484  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 4*2))))) >> 16)))
 11485  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-5)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 5*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-5)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 5*2))))) >> 16)))
 11486  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-6)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 6*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-6)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 6*2))))) >> 16)))
 11487  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-7)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 7*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-7)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 7*2))))) >> 16)))
 11488  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-8)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 8*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-8)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 8*2))))) >> 16)))
 11489  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-9)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 9*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-9)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 9*2))))) >> 16)))
 11490  			for j = 10; j < predictLPCOrder; j++ {
 11491  				LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + uintptr(-j)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + uintptr(j)*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + uintptr(-j)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + uintptr(j)*2))))) >> 16)))
 11492  			}
 11493  
 11494  			/* Noise shape feedback */
 11495  			/* check that order is even */
 11496  			/* Output of lowpass section */
 11497  			tmp2 = ((*(*int32)(unsafe.Pointer(psLPC_Q14))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */)))) >> 16) * (int32(int16(warping_Q16)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */)))) & 0x0000FFFF) * (int32(int16(warping_Q16)))) >> 16)))
 11498  			/* Output of allpass section */
 11499  			tmp1 = ((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + 1*4)) - tmp2) >> 16) * (int32(int16(warping_Q16)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + 1*4)) - tmp2) & 0x0000FFFF) * (int32(int16(warping_Q16)))) >> 16)))
 11500  			*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */))) = tmp2
 11501  			n_AR_Q10 = ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13))))) >> 16))
 11502  			/* Loop over allpass sections */
 11503  			for j = 2; j < shapingLPCOrder; j = j + (2) {
 11504  				/* Output of allpass section */
 11505  				tmp2 = ((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j-1))*4))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+0))*4)) - tmp1) >> 16) * (int32(int16(warping_Q16)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+0))*4)) - tmp1) & 0x0000FFFF) * (int32(int16(warping_Q16)))) >> 16)))
 11506  				*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j-1))*4)) = tmp1
 11507  				n_AR_Q10 = ((n_AR_Q10) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((j-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((j-1))*2))))) >> 16)))
 11508  				/* Output of allpass section */
 11509  				tmp1 = ((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+0))*4))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+1))*4)) - tmp2) >> 16) * (int32(int16(warping_Q16)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+1))*4)) - tmp2) & 0x0000FFFF) * (int32(int16(warping_Q16)))) >> 16)))
 11510  				*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+0))*4)) = tmp2
 11511  				n_AR_Q10 = ((n_AR_Q10) + ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr(j)*2))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr(j)*2))))) >> 16)))
 11512  			}
 11513  			*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((shapingLPCOrder-1))*4)) = tmp1
 11514  			n_AR_Q10 = ((n_AR_Q10) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((shapingLPCOrder-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((shapingLPCOrder-1))*2))))) >> 16)))
 11515  
 11516  			n_AR_Q10 = ((n_AR_Q10) >> (1)) /* Q11 -> Q10 */
 11517  			n_AR_Q10 = ((n_AR_Q10) + (((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) >> 16) * (int32(int16(Tilt_Q14)))) + (((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) & 0x0000FFFF) * (int32(int16(Tilt_Q14)))) >> 16)))
 11518  
 11519  			n_LF_Q10 = (((((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4))) >> 16) * (int32(int16(LF_shp_Q14)))) + ((((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4))) & 0x0000FFFF) * (int32(int16(LF_shp_Q14)))) >> 16)) << (2))
 11520  			n_LF_Q10 = (((n_LF_Q10) + ((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) >> 16) * ((LF_shp_Q14) >> 16))) + (((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) & 0x0000FFFF) * ((LF_shp_Q14) >> 16)) >> 16))
 11521  
 11522  			/* Input minus prediction plus noise feedback                       */
 11523  			/* r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP  */
 11524  			tmp1 = ((LTP_pred_Q14) - (n_LTP_Q14))                                /* Add Q14 stuff */
 11525  			tmp1 = ((tmp1) >> (4))                                               /* convert to Q10 */
 11526  			tmp1 = ((tmp1) + (LPC_pred_Q10))                                     /* add Q10 stuff */
 11527  			tmp1 = ((tmp1) - (n_AR_Q10))                                         /* subtract Q10 stuff */
 11528  			tmp1 = ((tmp1) - (n_LF_Q10))                                         /* subtract Q10 stuff */
 11529  			r_Q10 = ((*(*int32)(unsafe.Pointer(x_Q10 + uintptr(i)*4))) - (tmp1)) /* residual error Q10 */
 11530  
 11531  			/* Flip sign depending on dither */
 11532  			r_Q10 = ((r_Q10 ^ dither) - dither)
 11533  			r_Q10 = ((r_Q10) - (offset_Q10))
 11534  			r_Q10 = func() int32 {
 11535  				if (int32(-64) << 10) > (int32(64) << 10) {
 11536  					return func() int32 {
 11537  						if (r_Q10) > (int32(-64) << 10) {
 11538  							return (int32(-64) << 10)
 11539  						}
 11540  						return func() int32 {
 11541  							if (r_Q10) < (int32(64) << 10) {
 11542  								return (int32(64) << 10)
 11543  							}
 11544  							return r_Q10
 11545  						}()
 11546  					}()
 11547  				}
 11548  				return func() int32 {
 11549  					if (r_Q10) > (int32(64) << 10) {
 11550  						return (int32(64) << 10)
 11551  					}
 11552  					return func() int32 {
 11553  						if (r_Q10) < (int32(-64) << 10) {
 11554  							return (int32(-64) << 10)
 11555  						}
 11556  						return r_Q10
 11557  					}()
 11558  				}()
 11559  			}()
 11560  
 11561  			/* Find two quantization level candidates and measure their rate-distortion */
 11562  			if r_Q10 < -1536 {
 11563  				q1_Q10 = ((func() int32 {
 11564  					if (10) == 1 {
 11565  						return (((r_Q10) >> 1) + ((r_Q10) & 1))
 11566  					}
 11567  					return ((((r_Q10) >> ((10) - 1)) + 1) >> 1)
 11568  				}()) << (10))
 11569  				r_Q10 = ((r_Q10) - (q1_Q10))
 11570  				rd1_Q10 = ((((-((q1_Q10) + (offset_Q10))) * (Lambda_Q10)) + ((int32(int16(r_Q10))) * (int32(int16(r_Q10))))) >> (10))
 11571  				rd2_Q10 = ((rd1_Q10) + (1024))
 11572  				rd2_Q10 = ((rd2_Q10) - ((Lambda_Q10) + ((r_Q10) << (1))))
 11573  				q2_Q10 = ((q1_Q10) + (1024))
 11574  			} else if r_Q10 > 512 {
 11575  				q1_Q10 = ((func() int32 {
 11576  					if (10) == 1 {
 11577  						return (((r_Q10) >> 1) + ((r_Q10) & 1))
 11578  					}
 11579  					return ((((r_Q10) >> ((10) - 1)) + 1) >> 1)
 11580  				}()) << (10))
 11581  				r_Q10 = ((r_Q10) - (q1_Q10))
 11582  				rd1_Q10 = (((((q1_Q10) + (offset_Q10)) * (Lambda_Q10)) + ((int32(int16(r_Q10))) * (int32(int16(r_Q10))))) >> (10))
 11583  				rd2_Q10 = ((rd1_Q10) + (1024))
 11584  				rd2_Q10 = ((rd2_Q10) - ((Lambda_Q10) - ((r_Q10) << (1))))
 11585  				q2_Q10 = ((q1_Q10) - (1024))
 11586  			} else { /* r_Q10 >= -1536 && q1_Q10 <= 512 */
 11587  				rr_Q20 = ((int32(int16(offset_Q10))) * (int32(int16(Lambda_Q10))))
 11588  				rd2_Q10 = (((rr_Q20) + ((int32(int16(r_Q10))) * (int32(int16(r_Q10))))) >> (10))
 11589  				rd1_Q10 = ((rd2_Q10) + (1024))
 11590  				rd1_Q10 = ((rd1_Q10) + (((Lambda_Q10) + ((r_Q10) << (1))) - ((rr_Q20) >> (9))))
 11591  				q1_Q10 = -1024
 11592  				q2_Q10 = 0
 11593  			}
 11594  
 11595  			if rd1_Q10 < rd2_Q10 {
 11596  				(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FRD_Q10 = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10) + (rd1_Q10))
 11597  				(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FRD_Q10 = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10) + (rd2_Q10))
 11598  				(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10 = q1_Q10
 11599  				(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FQ_Q10 = q2_Q10
 11600  			} else {
 11601  				(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FRD_Q10 = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10) + (rd2_Q10))
 11602  				(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FRD_Q10 = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10) + (rd1_Q10))
 11603  				(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10 = q2_Q10
 11604  				(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FQ_Q10 = q1_Q10
 11605  			}
 11606  
 11607  			/* Update states for best quantization */
 11608  
 11609  			/* Quantized excitation */
 11610  			exc_Q10 = ((offset_Q10) + ((*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10))
 11611  			exc_Q10 = ((exc_Q10 ^ dither) - dither)
 11612  
 11613  			/* Add predictions */
 11614  			LPC_exc_Q10 = (exc_Q10 + (func() int32 {
 11615  				if (4) == 1 {
 11616  					return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
 11617  				}
 11618  				return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
 11619  			}()))
 11620  			xq_Q10 = ((LPC_exc_Q10) + (LPC_pred_Q10))
 11621  
 11622  			/* Update states */
 11623  			sLF_AR_shp_Q10 = ((xq_Q10) - (n_AR_Q10))
 11624  			(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FsLTP_shp_Q10 = ((sLF_AR_shp_Q10) - (n_LF_Q10))
 11625  			(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FLF_AR_Q12 = ((sLF_AR_shp_Q10) << (2))
 11626  			(*NSQ_sample_struct)(unsafe.Pointer(psSS)).Fxq_Q14 = ((xq_Q10) << (4))
 11627  			(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FLPC_exc_Q16 = ((LPC_exc_Q10) << (6))
 11628  
 11629  			/* Update states for second best quantization */
 11630  
 11631  			/* Quantized excitation */
 11632  			exc_Q10 = ((offset_Q10) + ((*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FQ_Q10))
 11633  			exc_Q10 = ((exc_Q10 ^ dither) - dither)
 11634  
 11635  			/* Add predictions */
 11636  			LPC_exc_Q10 = (exc_Q10 + (func() int32 {
 11637  				if (4) == 1 {
 11638  					return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
 11639  				}
 11640  				return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
 11641  			}()))
 11642  			xq_Q10 = ((LPC_exc_Q10) + (LPC_pred_Q10))
 11643  
 11644  			/* Update states */
 11645  			sLF_AR_shp_Q10 = ((xq_Q10) - (n_AR_Q10))
 11646  			(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FsLTP_shp_Q10 = ((sLF_AR_shp_Q10) - (n_LF_Q10))
 11647  			(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FLF_AR_Q12 = ((sLF_AR_shp_Q10) << (2))
 11648  			(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).Fxq_Q14 = ((xq_Q10) << (4))
 11649  			(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FLPC_exc_Q16 = ((LPC_exc_Q10) << (6))
 11650  		}
 11651  
 11652  		*(*int32)(unsafe.Pointer(smpl_buf_idx)) = ((*(*int32)(unsafe.Pointer(smpl_buf_idx)) - 1) & (32 - 1)) /* Index to newest samples              */
 11653  		last_smple_idx = ((*(*int32)(unsafe.Pointer(smpl_buf_idx)) + decisionDelay) & (32 - 1))              /* Index to decisionDelay old samples   */
 11654  
 11655  		/* Find winner */
 11656  		RDmin_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */))).FRD_Q10
 11657  		Winner_ind = 0
 11658  		for k = 1; k < nStatesDelayedDecision; k++ {
 11659  			if (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10 < RDmin_Q10 {
 11660  				RDmin_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10
 11661  				Winner_ind = k
 11662  			}
 11663  		}
 11664  
 11665  		/* Increase RD values of expired states */
 11666  		Winner_rand_state = *(*int32)(unsafe.Pointer((psDelDec + uintptr(Winner_ind)*1456 /* &.RandState */) + uintptr(last_smple_idx)*4))
 11667  		for k = 0; k < nStatesDelayedDecision; k++ {
 11668  			if *(*int32)(unsafe.Pointer((psDelDec + uintptr(k)*1456 /* &.RandState */) + uintptr(last_smple_idx)*4)) != Winner_rand_state {
 11669  				(*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10 = (((*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10) + (int32(0x7FFFFFFF) >> 4))
 11670  				(*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48) + 1*24)).FRD_Q10 = (((*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48) + 1*24)).FRD_Q10) + (int32(0x7FFFFFFF) >> 4))
 11671  
 11672  			}
 11673  		}
 11674  
 11675  		/* Find worst in first set and best in second set */
 11676  		RDmax_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */))).FRD_Q10
 11677  		RDmin_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */) + 1*24)).FRD_Q10
 11678  		RDmax_ind = 0
 11679  		RDmin_ind = 0
 11680  		for k = 1; k < nStatesDelayedDecision; k++ {
 11681  			/* find worst in first set */
 11682  			if (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10 > RDmax_Q10 {
 11683  				RDmax_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10
 11684  				RDmax_ind = k
 11685  			}
 11686  			/* find best in second set */
 11687  			if (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ +uintptr(k)*48)+1*24)).FRD_Q10 < RDmin_Q10 {
 11688  				RDmin_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48) + 1*24)).FRD_Q10
 11689  				RDmin_ind = k
 11690  			}
 11691  		}
 11692  
 11693  		/* Replace a state if best from second set outperforms worst in first set */
 11694  		if RDmin_Q10 < RDmax_Q10 {
 11695  			SKP_Silk_copy_del_dec_state(tls, (psDelDec + uintptr(RDmax_ind)*1456), (psDelDec + uintptr(RDmin_ind)*1456), i)
 11696  			libc.Xmemcpy(tls, (bp /* &psSampleState */ + uintptr(RDmax_ind)*48), ((bp /* &psSampleState */ + uintptr(RDmin_ind)*48) + 1*24), uint64(unsafe.Sizeof(NSQ_sample_struct{})))
 11697  		}
 11698  
 11699  		/* Write samples from winner to output and long-term filter states */
 11700  		psDD = (psDelDec + uintptr(Winner_ind)*1456)
 11701  		if (subfr > 0) || (i >= decisionDelay) {
 11702  			*(*int8)(unsafe.Pointer(q + uintptr((i - decisionDelay)))) = (int8((*(*int32)(unsafe.Pointer((psDD + 128 /* &.Q_Q10 */) + uintptr(last_smple_idx)*4))) >> (10)))
 11703  			*(*int16)(unsafe.Pointer(xq + uintptr((i-decisionDelay))*2)) = func() int16 {
 11704  				if (func() int32 {
 11705  					if (10) == 1 {
 11706  						return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11707  							if (16) == 1 {
 11708  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11709  							}
 11710  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11711  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11712  							if (16) == 1 {
 11713  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11714  							}
 11715  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11716  						}()))) & 1))
 11717  					}
 11718  					return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11719  						if (16) == 1 {
 11720  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11721  						}
 11722  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11723  					}()))) >> ((10) - 1)) + 1) >> 1)
 11724  				}()) > 0x7FFF {
 11725  					return int16(0x7FFF)
 11726  				}
 11727  				return func() int16 {
 11728  					if (func() int32 {
 11729  						if (10) == 1 {
 11730  							return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11731  								if (16) == 1 {
 11732  									return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11733  								}
 11734  								return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11735  							}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11736  								if (16) == 1 {
 11737  									return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11738  								}
 11739  								return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11740  							}()))) & 1))
 11741  						}
 11742  						return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11743  							if (16) == 1 {
 11744  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11745  							}
 11746  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11747  						}()))) >> ((10) - 1)) + 1) >> 1)
 11748  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 11749  						return libc.Int16FromInt32(0x8000)
 11750  					}
 11751  					return func() int16 {
 11752  						if (10) == 1 {
 11753  							return (int16(((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11754  								if (16) == 1 {
 11755  									return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11756  								}
 11757  								return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11758  							}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11759  								if (16) == 1 {
 11760  									return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11761  								}
 11762  								return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11763  							}()))) & 1)))
 11764  						}
 11765  						return (int16((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11766  							if (16) == 1 {
 11767  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11768  							}
 11769  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11770  						}()))) >> ((10) - 1)) + 1) >> 1))
 11771  					}()
 11772  				}()
 11773  			}()
 11774  			*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-decisionDelay))*4)) = *(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(last_smple_idx)*4))
 11775  			*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx-decisionDelay))*4)) = *(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(last_smple_idx)*4))
 11776  		}
 11777  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx++
 11778  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx++
 11779  
 11780  		/* Update states */
 11781  		for k = 0; k < nStatesDelayedDecision; k++ {
 11782  			psDD = (psDelDec + uintptr(k)*1456)
 11783  			psSS = (bp /* &psSampleState */ + uintptr(k)*48)
 11784  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12 = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FLF_AR_Q12
 11785  			*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr((32+i))*4)) = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).Fxq_Q14
 11786  			*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (((*NSQ_sample_struct)(unsafe.Pointer(psSS)).Fxq_Q14) >> (4))
 11787  			*(*int32)(unsafe.Pointer((psDD + 128 /* &.Q_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10
 11788  			*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FLPC_exc_Q16
 11789  			*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FsLTP_shp_Q10
 11790  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed) + (((*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10) >> (10)))
 11791  			*(*int32)(unsafe.Pointer((psDD /* &.RandState */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed
 11792  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10 = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FRD_Q10
 11793  			*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = Gain_Q16
 11794  		}
 11795  	}
 11796  	/* Update LPC states */
 11797  	for k = 0; k < nStatesDelayedDecision; k++ {
 11798  		psDD = (psDelDec + uintptr(k)*1456)
 11799  		libc.Xmemcpy(tls, psDD+832 /* &.sLPC_Q14 */, ((psDD + 832 /* &.sLPC_Q14 */) + uintptr(length)*4), (uint64(32) * uint64(unsafe.Sizeof(int32(0)))))
 11800  	}
 11801  }
 11802  
 11803  func SKP_Silk_nsq_del_dec_scale_states(tls *libc.TLS, NSQ uintptr, psDelDec uintptr, x uintptr, x_sc_Q10 uintptr, subfr_length int32, sLTP uintptr, sLTP_Q16 uintptr, subfr int32, nStatesDelayedDecision int32, smpl_buf_idx int32, LTP_scale_Q14 int32, Gains_Q16 uintptr, pitchL uintptr) { /* SKP_Silk_NSQ_del_dec.c:603:17: */
 11804  	var i int32
 11805  	var k int32
 11806  	var lag int32
 11807  	var inv_gain_Q16 int32
 11808  	var gain_adj_Q16 int32
 11809  	var inv_gain_Q32 int32
 11810  	var psDD uintptr
 11811  
 11812  	inv_gain_Q16 = SKP_INVERSE32_varQ(tls, func() int32 {
 11813  		if (*(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(subfr)*4))) > (1) {
 11814  			return *(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(subfr)*4))
 11815  		}
 11816  		return 1
 11817  	}(), 32)
 11818  	inv_gain_Q16 = func() int32 {
 11819  		if (inv_gain_Q16) < (0x7FFF) {
 11820  			return inv_gain_Q16
 11821  		}
 11822  		return 0x7FFF
 11823  	}()
 11824  	lag = *(*int32)(unsafe.Pointer(pitchL + uintptr(subfr)*4))
 11825  
 11826  	/* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */
 11827  	if (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag != 0 {
 11828  		inv_gain_Q32 = ((inv_gain_Q16) << (16))
 11829  		if subfr == 0 {
 11830  			/* Do LTP downscaling */
 11831  			inv_gain_Q32 = (((((inv_gain_Q32) >> 16) * (int32(int16(LTP_scale_Q14)))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(int16(LTP_scale_Q14)))) >> 16)) << (2))
 11832  		}
 11833  		for i = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx - lag) - (5 / 2)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx; i++ {
 11834  
 11835  			*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)) = ((((inv_gain_Q32) >> 16) * (int32(*(*int16)(unsafe.Pointer(sLTP + uintptr(i)*2))))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(sLTP + uintptr(i)*2))))) >> 16))
 11836  		}
 11837  	}
 11838  
 11839  	/* Adjust for changing gain */
 11840  	if inv_gain_Q16 != (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16 {
 11841  		gain_adj_Q16 = SKP_DIV32_varQ(tls, inv_gain_Q16, (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16, 16)
 11842  
 11843  		/* Scale long-term shaping state */
 11844  		for i = ((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx - (subfr_length * 4)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx; i++ {
 11845  			*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11846  				if (16) == 1 {
 11847  					return (((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) & 1))
 11848  				}
 11849  				return ((((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11850  			}())))
 11851  		}
 11852  
 11853  		/* Scale long-term prediction state */
 11854  		if (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag == 0 {
 11855  			for i = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx - lag) - (5 / 2)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx; i++ {
 11856  				*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11857  					if (16) == 1 {
 11858  						return (((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) & 1))
 11859  					}
 11860  					return ((((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11861  				}())))
 11862  			}
 11863  		}
 11864  
 11865  		for k = 0; k < nStatesDelayedDecision; k++ {
 11866  			psDD = (psDelDec + uintptr(k)*1456)
 11867  
 11868  			/* Scale scalar states */
 11869  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12 = (((((gain_adj_Q16) >> 16) * (int32(int16((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12)))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12)))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11870  				if (16) == 1 {
 11871  					return ((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) >> 1) + (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) & 1))
 11872  				}
 11873  				return (((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) >> ((16) - 1)) + 1) >> 1)
 11874  			}())))
 11875  
 11876  			/* Scale short-term prediction and shaping states */
 11877  			for i = 0; i < 32; i++ {
 11878  				*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11879  					if (16) == 1 {
 11880  						return (((*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4))) & 1))
 11881  					}
 11882  					return ((((*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11883  				}())))
 11884  			}
 11885  			for i = 0; i < 16; i++ {
 11886  				*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11887  					if (16) == 1 {
 11888  						return (((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4))) & 1))
 11889  					}
 11890  					return ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11891  				}())))
 11892  			}
 11893  			for i = 0; i < 32; i++ {
 11894  				*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11895  					if (16) == 1 {
 11896  						return (((*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4))) & 1))
 11897  					}
 11898  					return ((((*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11899  				}())))
 11900  				*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11901  					if (16) == 1 {
 11902  						return (((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4))) & 1))
 11903  					}
 11904  					return ((((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11905  				}())))
 11906  			}
 11907  		}
 11908  	}
 11909  
 11910  	/* Scale input */
 11911  	for i = 0; i < subfr_length; i++ {
 11912  		*(*int32)(unsafe.Pointer(x_sc_Q10 + uintptr(i)*4)) = (((int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))) * (int32(int16(inv_gain_Q16)))) >> (6))
 11913  	}
 11914  
 11915  	/* save inv_gain */
 11916  
 11917  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16 = inv_gain_Q16
 11918  }
 11919  
 11920  func SKP_Silk_copy_del_dec_state(tls *libc.TLS, DD_dst uintptr, DD_src uintptr, LPC_state_idx int32) { /* SKP_Silk_NSQ_del_dec.c:686:17: */
 11921  	libc.Xmemcpy(tls, DD_dst /* &.RandState */, DD_src /* &.RandState */, uint64(unsafe.Sizeof([32]int32{})))
 11922  	libc.Xmemcpy(tls, DD_dst+128 /* &.Q_Q10 */, DD_src+128 /* &.Q_Q10 */, uint64(unsafe.Sizeof([32]int32{})))
 11923  	libc.Xmemcpy(tls, DD_dst+384 /* &.Pred_Q16 */, DD_src+384 /* &.Pred_Q16 */, uint64(unsafe.Sizeof([32]int32{})))
 11924  	libc.Xmemcpy(tls, DD_dst+512 /* &.Shape_Q10 */, DD_src+512 /* &.Shape_Q10 */, uint64(unsafe.Sizeof([32]int32{})))
 11925  	libc.Xmemcpy(tls, DD_dst+256 /* &.Xq_Q10 */, DD_src+256 /* &.Xq_Q10 */, uint64(unsafe.Sizeof([32]int32{})))
 11926  	libc.Xmemcpy(tls, DD_dst+768 /* &.sAR2_Q14 */, DD_src+768 /* &.sAR2_Q14 */, uint64(unsafe.Sizeof([16]int32{})))
 11927  	libc.Xmemcpy(tls, ((DD_dst + 832 /* &.sLPC_Q14 */) + uintptr(LPC_state_idx)*4), ((DD_src + 832 /* &.sLPC_Q14 */) + uintptr(LPC_state_idx)*4), (uint64(32) * uint64(unsafe.Sizeof(int32(0)))))
 11928  	(*NSQ_del_dec_struct)(unsafe.Pointer(DD_dst)).FLF_AR_Q12 = (*NSQ_del_dec_struct)(unsafe.Pointer(DD_src)).FLF_AR_Q12
 11929  	(*NSQ_del_dec_struct)(unsafe.Pointer(DD_dst)).FSeed = (*NSQ_del_dec_struct)(unsafe.Pointer(DD_src)).FSeed
 11930  	(*NSQ_del_dec_struct)(unsafe.Pointer(DD_dst)).FSeedInit = (*NSQ_del_dec_struct)(unsafe.Pointer(DD_src)).FSeedInit
 11931  	(*NSQ_del_dec_struct)(unsafe.Pointer(DD_dst)).FRD_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(DD_src)).FRD_Q10
 11932  }
 11933  
 11934  /*************************************************************/
 11935  /*      FIXED POINT CORE PITCH ANALYSIS FUNCTION             */
 11936  /*************************************************************/
 11937  func SKP_Silk_pitch_analysis_core(tls *libc.TLS, signal uintptr, pitch_out uintptr, lagIndex uintptr, contourIndex uintptr, LTPCorr_Q15 uintptr, prevLag int32, search_thres1_Q16 int32, search_thres2_Q15 int32, Fs_kHz int32, complexity int32, forLJC int32) int32 { /* SKP_Silk_pitch_analysis_core.c:65:9: */
 11938  	bp := tls.Alloc(20836)
 11939  	defer tls.Free(20836)
 11940  
 11941  	// var signal_8kHz [480]int16 at bp+1796, 960
 11942  
 11943  	// var signal_4kHz [240]int16 at bp+2812, 480
 11944  
 11945  	// var scratch_mem [2880]int32 at bp+3876, 11520
 11946  
 11947  	var input_signal_ptr uintptr
 11948  	// var filt_state [7]int32 at bp+1768, 28
 11949  
 11950  	var i int32
 11951  	var k int32
 11952  	var d int32
 11953  	var j int32
 11954  	// var C [4][221]int16 at bp, 1768
 11955  
 11956  	var target_ptr uintptr
 11957  	var basis_ptr uintptr
 11958  	var cross_corr int32
 11959  	var normalizer int32
 11960  	var energy int32
 11961  	var shift int32
 11962  	var energy_basis int32
 11963  	var energy_target int32
 11964  	// var d_srch [24]int32 at bp+3292, 96
 11965  
 11966  	// var d_comp [221]int16 at bp+3388, 442
 11967  
 11968  	var Cmax int32
 11969  	var length_d_srch int32
 11970  	var length_d_comp int32
 11971  	var sum int32
 11972  	var threshold int32
 11973  	var temp32 int32
 11974  	var CBimax int32
 11975  	var CBimax_new int32
 11976  	var CBimax_old int32
 11977  	var lag int32
 11978  	var start_lag int32
 11979  	var end_lag int32
 11980  	var lag_new int32
 11981  	// var CC [11]int32 at bp+3832, 44
 11982  
 11983  	var CCmax int32
 11984  	var CCmax_b int32
 11985  	var CCmax_new_b int32
 11986  	var CCmax_new int32
 11987  	// var energies_st3 [4][34][5]int32 at bp+18116, 2720
 11988  
 11989  	// var crosscorr_st3 [4][34][5]int32 at bp+15396, 2720
 11990  
 11991  	var lag_counter int32
 11992  	var frame_length int32
 11993  	var frame_length_8kHz int32
 11994  	var frame_length_4kHz int32
 11995  	var max_sum_sq_length int32
 11996  	var sf_length int32
 11997  	var sf_length_8kHz int32
 11998  	var min_lag int32
 11999  	var min_lag_8kHz int32
 12000  	var min_lag_4kHz int32
 12001  	var max_lag int32
 12002  	var max_lag_8kHz int32
 12003  	var max_lag_4kHz int32
 12004  	var contour_bias int32
 12005  	var diff int32
 12006  	var lz int32
 12007  	var lshift int32
 12008  	var cbk_offset int32
 12009  	var cbk_size int32
 12010  	var nb_cbks_stage2 int32
 12011  	var delta_lag_log2_sqr_Q7 int32
 12012  	var lag_log2_Q7 int32
 12013  	var prevLag_log2_Q7 int32
 12014  	var prev_lag_bias_Q15 int32
 12015  	var corr_thres_Q15 int32
 12016  
 12017  	/* Check for valid sampling frequency */
 12018  
 12019  	/* Check for valid complexity setting */
 12020  
 12021  	/* Setup frame lengths max / min lag for the sampling frequency */
 12022  	frame_length = (40 * Fs_kHz)
 12023  	frame_length_4kHz = (40 * 4)
 12024  	frame_length_8kHz = (40 * 8)
 12025  	sf_length = ((frame_length) >> (3))
 12026  	sf_length_8kHz = ((frame_length_8kHz) >> (3))
 12027  	min_lag = (2 * Fs_kHz)
 12028  	min_lag_4kHz = (2 * 4)
 12029  	min_lag_8kHz = (2 * 8)
 12030  	max_lag = (18 * Fs_kHz)
 12031  	max_lag_4kHz = (18 * 4)
 12032  	max_lag_8kHz = (18 * 8)
 12033  
 12034  	libc.Xmemset(tls, bp /* &C[0] */, 0, ((uint64(unsafe.Sizeof(int16(0))) * uint64(4)) * (uint64((int32((18 * 24)) >> 1) + 5))))
 12035  
 12036  	/* Resample from input sampled at Fs_kHz to 8 kHz */
 12037  	if Fs_kHz == 16 {
 12038  		libc.Xmemset(tls, bp+1768 /* &filt_state[0] */, 0, (uint64(2) * uint64(unsafe.Sizeof(int32(0)))))
 12039  		SKP_Silk_resampler_down2(tls, bp+1768 /* &filt_state[0] */, bp+1796 /* &signal_8kHz[0] */, signal, frame_length)
 12040  	} else if Fs_kHz == 12 {
 12041  		// var R23 [6]int32 at bp+2756, 24
 12042  
 12043  		libc.Xmemset(tls, bp+2756 /* &R23[0] */, 0, (uint64(6) * uint64(unsafe.Sizeof(int32(0)))))
 12044  		SKP_Silk_resampler_down2_3(tls, bp+2756 /* &R23[0] */, bp+1796 /* &signal_8kHz[0] */, signal, (40 * 12))
 12045  	} else if Fs_kHz == 24 {
 12046  		// var filt_state_fix [8]int32 at bp+2780, 32
 12047  
 12048  		libc.Xmemset(tls, bp+2780 /* &filt_state_fix[0] */, 0, (uint64(8) * uint64(unsafe.Sizeof(int32(0)))))
 12049  		SKP_Silk_resampler_down3(tls, bp+2780 /* &filt_state_fix[0] */, bp+1796 /* &signal_8kHz[0] */, signal, (24 * 40))
 12050  	} else {
 12051  
 12052  		libc.Xmemcpy(tls, bp+1796 /* &signal_8kHz[0] */, signal, (uint64(frame_length_8kHz) * uint64(unsafe.Sizeof(int16(0)))))
 12053  	}
 12054  	/* Decimate again to 4 kHz */
 12055  	libc.Xmemset(tls, bp+1768 /* &filt_state[0] */, 0, (uint64(2) * uint64(unsafe.Sizeof(int32(0))))) /* Set state to zero */
 12056  	SKP_Silk_resampler_down2(tls, bp+1768 /* &filt_state[0] */, bp+2812 /* &signal_4kHz[0] */, bp+1796 /* &signal_8kHz[0] */, frame_length_8kHz)
 12057  
 12058  	/* Low-pass filter */
 12059  	for i = (frame_length_4kHz - 1); i > 0; i-- {
 12060  		*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)) = func() int16 {
 12061  			if ((int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)))) + (int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr((i-1))*2))))) > 0x7FFF {
 12062  				return int16(0x7FFF)
 12063  			}
 12064  			return func() int16 {
 12065  				if ((int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)))) + (int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr((i-1))*2))))) < (int32(libc.Int16FromInt32(0x8000))) {
 12066  					return libc.Int16FromInt32(0x8000)
 12067  				}
 12068  				return (int16((int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)))) + (int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr((i-1))*2))))))
 12069  			}()
 12070  		}()
 12071  	}
 12072  
 12073  	/*******************************************************************************
 12074  	 ** Scale 4 kHz signal down to prevent correlations measures from overflowing
 12075  	 ** find scaling as max scaling for each 8kHz(?) subframe
 12076  	 *******************************************************************************/
 12077  
 12078  	/* Inner product is calculated with different lengths, so scale for the worst case */
 12079  	max_sum_sq_length = SKP_max_32(tls, sf_length_8kHz, ((frame_length_4kHz) >> (1)))
 12080  	shift = SKP_FIX_P_Ana_find_scaling(tls, bp+2812 /* &signal_4kHz[0] */, frame_length_4kHz, max_sum_sq_length)
 12081  	if shift > 0 {
 12082  		for i = 0; i < frame_length_4kHz; i++ {
 12083  			*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)) = (int16((int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)))) >> (shift)))
 12084  		}
 12085  	}
 12086  
 12087  	/******************************************************************************
 12088  	 * FIRST STAGE, operating in 4 khz
 12089  	 ******************************************************************************/
 12090  	target_ptr = (bp + 2812 /* &signal_4kHz */ + uintptr(((frame_length_4kHz)>>(1)))*2)
 12091  	for k = 0; k < 2; k++ {
 12092  		/* Check that we are within range of the array */
 12093  
 12094  		basis_ptr = (target_ptr - uintptr(min_lag_4kHz)*2)
 12095  
 12096  		/* Check that we are within range of the array */
 12097  
 12098  		normalizer = 0
 12099  		cross_corr = 0
 12100  		/* Calculate first vector products before loop */
 12101  		cross_corr = SKP_Silk_inner_prod_aligned(tls, target_ptr, basis_ptr, sf_length_8kHz)
 12102  		normalizer = SKP_Silk_inner_prod_aligned(tls, basis_ptr, basis_ptr, sf_length_8kHz)
 12103  		normalizer = func() int32 {
 12104  			if ((uint32((normalizer) + ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))) & 0x80000000) == uint32(0) {
 12105  				return func() int32 {
 12106  					if ((uint32((normalizer) & ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))) & 0x80000000) != uint32(0) {
 12107  						return libc.Int32FromUint32(0x80000000)
 12108  					}
 12109  					return ((normalizer) + ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))
 12110  				}()
 12111  			}
 12112  			return func() int32 {
 12113  				if ((uint32((normalizer) | ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))) & 0x80000000) == uint32(0) {
 12114  					return 0x7FFFFFFF
 12115  				}
 12116  				return ((normalizer) + ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))
 12117  			}()
 12118  		}()
 12119  
 12120  		temp32 = ((cross_corr) / (SKP_Silk_SQRT_APPROX(tls, normalizer) + 1))
 12121  		*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*442) + uintptr(min_lag_4kHz)*2)) = func() int16 {
 12122  			if (temp32) > 0x7FFF {
 12123  				return int16(0x7FFF)
 12124  			}
 12125  			return func() int16 {
 12126  				if (temp32) < (int32(libc.Int16FromInt32(0x8000))) {
 12127  					return libc.Int16FromInt32(0x8000)
 12128  				}
 12129  				return int16(temp32)
 12130  			}()
 12131  		}() /* Q0 */
 12132  
 12133  		/* From now on normalizer is computed recursively */
 12134  		for d = (min_lag_4kHz + 1); d <= max_lag_4kHz; d++ {
 12135  			basis_ptr -= 2
 12136  
 12137  			/* Check that we are within range of the array */
 12138  
 12139  			cross_corr = SKP_Silk_inner_prod_aligned(tls, target_ptr, basis_ptr, sf_length_8kHz)
 12140  
 12141  			/* Add contribution of new sample and remove contribution from oldest sample */
 12142  			normalizer = normalizer + (((int32(*(*int16)(unsafe.Pointer(basis_ptr)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr))))) - ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(sf_length_8kHz)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(sf_length_8kHz)*2))))))
 12143  
 12144  			temp32 = ((cross_corr) / (SKP_Silk_SQRT_APPROX(tls, normalizer) + 1))
 12145  			*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*442) + uintptr(d)*2)) = func() int16 {
 12146  				if (temp32) > 0x7FFF {
 12147  					return int16(0x7FFF)
 12148  				}
 12149  				return func() int16 {
 12150  					if (temp32) < (int32(libc.Int16FromInt32(0x8000))) {
 12151  						return libc.Int16FromInt32(0x8000)
 12152  					}
 12153  					return int16(temp32)
 12154  				}()
 12155  			}() /* Q0 */
 12156  		}
 12157  		/* Update target pointer */
 12158  		target_ptr += 2 * (uintptr(sf_length_8kHz))
 12159  	}
 12160  
 12161  	/* Combine two subframes into single correlation measure and apply short-lag bias */
 12162  	for i = max_lag_4kHz; i >= min_lag_4kHz; i-- {
 12163  		sum = (int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */) + uintptr(i)*2))) + int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */ + 1*442) + uintptr(i)*2)))) /* Q0 */
 12164  
 12165  		sum = ((sum) >> (1)) /* Q-1 */
 12166  
 12167  		sum = ((sum) + ((((sum) >> 16) * (int32((int16((-i) << (4)))))) + ((((sum) & 0x0000FFFF) * (int32((int16((-i) << (4)))))) >> 16))) /* Q-1 */
 12168  
 12169  		*(*int16)(unsafe.Pointer((bp /* &C[0] */) + uintptr(i)*2)) = int16(sum) /* Q-1 */
 12170  	}
 12171  
 12172  	/* Sort */
 12173  	length_d_srch = (4 + (2 * complexity))
 12174  
 12175  	SKP_Silk_insertion_sort_decreasing_int16(tls, ((bp /* &C */) + uintptr(min_lag_4kHz)*2), bp+3292 /* &d_srch[0] */, ((max_lag_4kHz - min_lag_4kHz) + 1), length_d_srch)
 12176  
 12177  	/* Escape if correlation is very low already here */
 12178  	target_ptr = (bp + 2812 /* &signal_4kHz */ + uintptr(((frame_length_4kHz)>>(1)))*2)
 12179  	energy = SKP_Silk_inner_prod_aligned(tls, target_ptr, target_ptr, ((frame_length_4kHz) >> (1)))
 12180  	energy = func() int32 {
 12181  		if ((uint32((energy) + (1000))) & 0x80000000) != 0 {
 12182  			return 0x7FFFFFFF
 12183  		}
 12184  		return ((energy) + (1000))
 12185  	}() /* Q0 */
 12186  	Cmax = int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */) + uintptr(min_lag_4kHz)*2))) /* Q-1 */
 12187  	threshold = ((int32(int16(Cmax))) * (int32(int16(Cmax))))                           /* Q-2 */
 12188  	/* Compare in Q-2 domain */
 12189  	if ((energy) >> (4 + 2)) > threshold {
 12190  		libc.Xmemset(tls, pitch_out, 0, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
 12191  		*(*int32)(unsafe.Pointer(LTPCorr_Q15)) = 0
 12192  		*(*int32)(unsafe.Pointer(lagIndex)) = 0
 12193  		*(*int32)(unsafe.Pointer(contourIndex)) = 0
 12194  		return 1
 12195  	}
 12196  
 12197  	threshold = ((((search_thres1_Q16) >> 16) * (int32(int16(Cmax)))) + ((((search_thres1_Q16) & 0x0000FFFF) * (int32(int16(Cmax)))) >> 16))
 12198  	for i = 0; i < length_d_srch; i++ {
 12199  		/* Convert to 8 kHz indices for the sorted correlation that exceeds the threshold */
 12200  		if int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */) + uintptr((min_lag_4kHz+i))*2))) > threshold {
 12201  			*(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(i)*4)) + min_lag_4kHz) << 1)
 12202  		} else {
 12203  			length_d_srch = i
 12204  			break
 12205  		}
 12206  	}
 12207  
 12208  	for i = (min_lag_8kHz - 5); i < (max_lag_8kHz + 5); i++ {
 12209  		*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(i)*2)) = int16(0)
 12210  	}
 12211  	for i = 0; i < length_d_srch; i++ {
 12212  		*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(*(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(i)*4)))*2)) = int16(1)
 12213  	}
 12214  
 12215  	/* Convolution */
 12216  	for i = (max_lag_8kHz + 3); i >= min_lag_8kHz; i-- {
 12217  		*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp */ + uintptr(i)*2)) += int16((int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-1))*2))) + int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-2))*2)))))
 12218  	}
 12219  
 12220  	length_d_srch = 0
 12221  	for i = min_lag_8kHz; i < (max_lag_8kHz + 1); i++ {
 12222  		if int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i+1))*2))) > 0 {
 12223  			*(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(length_d_srch)*4)) = i
 12224  			length_d_srch++
 12225  		}
 12226  	}
 12227  
 12228  	/* Convolution */
 12229  	for i = (max_lag_8kHz + 3); i >= min_lag_8kHz; i-- {
 12230  		*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp */ + uintptr(i)*2)) += int16(((int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-1))*2))) + int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-2))*2)))) + int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-3))*2)))))
 12231  	}
 12232  
 12233  	length_d_comp = 0
 12234  	for i = min_lag_8kHz; i < (max_lag_8kHz + 4); i++ {
 12235  		if int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(i)*2))) > 0 {
 12236  			*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(length_d_comp)*2)) = (int16(i - 2))
 12237  			length_d_comp++
 12238  		}
 12239  	}
 12240  
 12241  	/**********************************************************************************
 12242  	 ** SECOND STAGE, operating at 8 kHz, on lag sections with high correlation
 12243  	 *************************************************************************************/
 12244  
 12245  	/******************************************************************************
 12246  	 ** Scale signal down to avoid correlations measures from overflowing
 12247  	 *******************************************************************************/
 12248  	/* find scaling as max scaling for each subframe */
 12249  	shift = SKP_FIX_P_Ana_find_scaling(tls, bp+1796 /* &signal_8kHz[0] */, frame_length_8kHz, sf_length_8kHz)
 12250  	if shift > 0 {
 12251  		for i = 0; i < frame_length_8kHz; i++ {
 12252  			*(*int16)(unsafe.Pointer(bp + 1796 /* &signal_8kHz[0] */ + uintptr(i)*2)) = (int16((int32(*(*int16)(unsafe.Pointer(bp + 1796 /* &signal_8kHz[0] */ + uintptr(i)*2)))) >> (shift)))
 12253  		}
 12254  	}
 12255  
 12256  	/*********************************************************************************
 12257  	 * Find energy of each subframe projected onto its history, for a range of delays
 12258  	 *********************************************************************************/
 12259  	libc.Xmemset(tls, bp /* &C[0] */, 0, ((uint64(4 * ((int32((18 * 24)) >> 1) + 5))) * uint64(unsafe.Sizeof(int16(0)))))
 12260  
 12261  	target_ptr = (bp + 1796 /* &signal_8kHz */ + uintptr(frame_length_4kHz)*2) /* point to middle of frame */
 12262  	for k = 0; k < 4; k++ {
 12263  
 12264  		/* Check that we are within range of the array */
 12265  
 12266  		energy_target = SKP_Silk_inner_prod_aligned(tls, target_ptr, target_ptr, sf_length_8kHz)
 12267  		// ToDo: Calculate 1 / energy_target here and save one division inside next for loop
 12268  		for j = 0; j < length_d_comp; j++ {
 12269  			d = int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(j)*2)))
 12270  			basis_ptr = (target_ptr - uintptr(d)*2)
 12271  
 12272  			/* Check that we are within range of the array */
 12273  
 12274  			cross_corr = SKP_Silk_inner_prod_aligned(tls, target_ptr, basis_ptr, sf_length_8kHz)
 12275  			energy_basis = SKP_Silk_inner_prod_aligned(tls, basis_ptr, basis_ptr, sf_length_8kHz)
 12276  			if cross_corr > 0 {
 12277  				energy = func() int32 {
 12278  					if (energy_target) > (energy_basis) {
 12279  						return energy_target
 12280  					}
 12281  					return energy_basis
 12282  				}() /* Find max to make sure first division < 1.0 */
 12283  				lz = SKP_Silk_CLZ32(tls, cross_corr)
 12284  				lshift = func() int32 {
 12285  					if (0) > (15) {
 12286  						return func() int32 {
 12287  							if (lz - 1) > (0) {
 12288  								return 0
 12289  							}
 12290  							return func() int32 {
 12291  								if (lz - 1) < (15) {
 12292  									return 15
 12293  								}
 12294  								return (lz - 1)
 12295  							}()
 12296  						}()
 12297  					}
 12298  					return func() int32 {
 12299  						if (lz - 1) > (15) {
 12300  							return 15
 12301  						}
 12302  						return func() int32 {
 12303  							if (lz - 1) < (0) {
 12304  								return 0
 12305  							}
 12306  							return (lz - 1)
 12307  						}()
 12308  					}()
 12309  				}()
 12310  				temp32 = (((cross_corr) << (lshift)) / (((energy) >> (15 - lshift)) + 1)) /* Q15 */
 12311  
 12312  				temp32 = ((((cross_corr) >> 16) * (int32(int16(temp32)))) + ((((cross_corr) & 0x0000FFFF) * (int32(int16(temp32)))) >> 16)) /* Q(-1), cc * ( cc / max(b, t) ) */
 12313  				temp32 = func() int32 {
 12314  					if ((uint32((temp32) + (temp32))) & 0x80000000) == uint32(0) {
 12315  						return func() int32 {
 12316  							if ((uint32((temp32) & (temp32))) & 0x80000000) != uint32(0) {
 12317  								return libc.Int32FromUint32(0x80000000)
 12318  							}
 12319  							return ((temp32) + (temp32))
 12320  						}()
 12321  					}
 12322  					return func() int32 {
 12323  						if ((uint32((temp32) | (temp32))) & 0x80000000) == uint32(0) {
 12324  							return 0x7FFFFFFF
 12325  						}
 12326  						return ((temp32) + (temp32))
 12327  					}()
 12328  				}() /* Q(0) */
 12329  				lz = SKP_Silk_CLZ32(tls, temp32)
 12330  				lshift = func() int32 {
 12331  					if (0) > (15) {
 12332  						return func() int32 {
 12333  							if (lz - 1) > (0) {
 12334  								return 0
 12335  							}
 12336  							return func() int32 {
 12337  								if (lz - 1) < (15) {
 12338  									return 15
 12339  								}
 12340  								return (lz - 1)
 12341  							}()
 12342  						}()
 12343  					}
 12344  					return func() int32 {
 12345  						if (lz - 1) > (15) {
 12346  							return 15
 12347  						}
 12348  						return func() int32 {
 12349  							if (lz - 1) < (0) {
 12350  								return 0
 12351  							}
 12352  							return (lz - 1)
 12353  						}()
 12354  					}()
 12355  				}()
 12356  				energy = func() int32 {
 12357  					if (energy_target) < (energy_basis) {
 12358  						return energy_target
 12359  					}
 12360  					return energy_basis
 12361  				}()
 12362  				*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*442) + uintptr(d)*2)) = int16((((temp32) << (lshift)) / (((energy) >> (15 - lshift)) + 1))) // Q15
 12363  			} else {
 12364  				*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*442) + uintptr(d)*2)) = int16(0)
 12365  			}
 12366  		}
 12367  		target_ptr += 2 * (uintptr(sf_length_8kHz))
 12368  	}
 12369  
 12370  	/* search over lag range and lags codebook */
 12371  	/* scale factor for lag codebook, as a function of center lag */
 12372  
 12373  	CCmax = libc.Int32FromUint32(0x80000000)
 12374  	CCmax_b = libc.Int32FromUint32(0x80000000)
 12375  
 12376  	CBimax = 0 /* To avoid returning undefined lag values */
 12377  	lag = -1   /* To check if lag with strong enough correlation has been found */
 12378  
 12379  	if prevLag > 0 {
 12380  		if Fs_kHz == 12 {
 12381  			prevLag = (((prevLag) << (1)) / (3))
 12382  		} else if Fs_kHz == 16 {
 12383  			prevLag = ((prevLag) >> (1))
 12384  		} else if Fs_kHz == 24 {
 12385  			prevLag = ((prevLag) / (3))
 12386  		}
 12387  		prevLag_log2_Q7 = SKP_Silk_lin2log(tls, prevLag)
 12388  	} else {
 12389  		prevLag_log2_Q7 = 0
 12390  	}
 12391  
 12392  	corr_thres_Q15 = (((int32(int16(search_thres2_Q15))) * (int32(int16(search_thres2_Q15)))) >> (13))
 12393  
 12394  	/* If input is 8 khz use a larger codebook here because it is last stage */
 12395  	if (Fs_kHz == 8) && (complexity > 0) {
 12396  		nb_cbks_stage2 = 11
 12397  	} else {
 12398  		nb_cbks_stage2 = 3
 12399  	}
 12400  
 12401  	for k = 0; k < length_d_srch; k++ {
 12402  		d = *(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(k)*4))
 12403  		for j = 0; j < nb_cbks_stage2; j++ {
 12404  			*(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(j)*4)) = 0
 12405  			for i = 0; i < 4; i++ {
 12406  				/* Try all codebooks */
 12407  				*(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(j)*4)) = (*(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(j)*4)) + int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(i)*442) + uintptr((d+int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2)) + uintptr(i)*22) + uintptr(j)*2)))))*2))))
 12408  			}
 12409  		}
 12410  		/* Find best codebook */
 12411  		CCmax_new = libc.Int32FromUint32(0x80000000)
 12412  		CBimax_new = 0
 12413  		for i = 0; i < nb_cbks_stage2; i++ {
 12414  			if *(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(i)*4)) > CCmax_new {
 12415  				CCmax_new = *(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(i)*4))
 12416  				CBimax_new = i
 12417  			}
 12418  		}
 12419  
 12420  		/* Bias towards shorter lags */
 12421  		lag_log2_Q7 = SKP_Silk_lin2log(tls, d) /* Q7 */
 12422  
 12423  		if forLJC != 0 {
 12424  			CCmax_new_b = CCmax_new
 12425  		} else {
 12426  			CCmax_new_b = (CCmax_new - (((int32((int16(4 * 6554)))) * (int32(int16(lag_log2_Q7)))) >> (7))) /* Q15 */
 12427  		}
 12428  
 12429  		/* Bias towards previous lag */
 12430  
 12431  		if prevLag > 0 {
 12432  			delta_lag_log2_sqr_Q7 = (lag_log2_Q7 - prevLag_log2_Q7)
 12433  
 12434  			delta_lag_log2_sqr_Q7 = (((int32(int16(delta_lag_log2_sqr_Q7))) * (int32(int16(delta_lag_log2_sqr_Q7)))) >> (7))
 12435  			prev_lag_bias_Q15 = (((int32((int16(4 * 6554)))) * (int32(int16(*(*int32)(unsafe.Pointer(LTPCorr_Q15)))))) >> (15)) /* Q15 */
 12436  			prev_lag_bias_Q15 = (((prev_lag_bias_Q15) * (delta_lag_log2_sqr_Q7)) / (delta_lag_log2_sqr_Q7 + (int32(1) << 6)))
 12437  			CCmax_new_b = CCmax_new_b - (prev_lag_bias_Q15) /* Q15 */
 12438  		}
 12439  
 12440  		if ((CCmax_new_b > CCmax_b) && (CCmax_new > corr_thres_Q15)) && (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2))) + uintptr(CBimax_new)*2))) <= min_lag_8kHz) {
 12441  			CCmax_b = CCmax_new_b
 12442  			CCmax = CCmax_new
 12443  			lag = d
 12444  			CBimax = CBimax_new
 12445  		}
 12446  	}
 12447  
 12448  	if lag == -1 {
 12449  		/* No suitable candidate found */
 12450  		libc.Xmemset(tls, pitch_out, 0, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
 12451  		*(*int32)(unsafe.Pointer(LTPCorr_Q15)) = 0
 12452  		*(*int32)(unsafe.Pointer(lagIndex)) = 0
 12453  		*(*int32)(unsafe.Pointer(contourIndex)) = 0
 12454  		return 1
 12455  	}
 12456  
 12457  	if Fs_kHz > 8 {
 12458  
 12459  		/******************************************************************************
 12460  		 ** Scale input signal down to avoid correlations measures from overflowing
 12461  		 *******************************************************************************/
 12462  		/* find scaling as max scaling for each subframe */
 12463  		shift = SKP_FIX_P_Ana_find_scaling(tls, signal, frame_length, sf_length)
 12464  		if shift > 0 {
 12465  			/* Move signal to scratch mem because the input signal should be unchanged */
 12466  			/* Reuse the 32 bit scratch mem vector, use a 16 bit pointer from now */
 12467  			input_signal_ptr = bp + 3876 /* scratch_mem */
 12468  			for i = 0; i < frame_length; i++ {
 12469  				*(*int16)(unsafe.Pointer(input_signal_ptr + uintptr(i)*2)) = (int16((int32(*(*int16)(unsafe.Pointer(signal + uintptr(i)*2)))) >> (shift)))
 12470  			}
 12471  		} else {
 12472  			input_signal_ptr = signal
 12473  		}
 12474  		/*********************************************************************************/
 12475  
 12476  		/* Search in original signal */
 12477  
 12478  		CBimax_old = CBimax
 12479  		/* Compensate for decimation */
 12480  
 12481  		if Fs_kHz == 12 {
 12482  			lag = (((int32(int16(lag))) * (int32(int16(3)))) >> (1))
 12483  		} else if Fs_kHz == 16 {
 12484  			lag = ((lag) << (1))
 12485  		} else {
 12486  			lag = ((int32(int16(lag))) * (int32(int16(3))))
 12487  		}
 12488  
 12489  		lag = func() int32 {
 12490  			if (min_lag) > (max_lag) {
 12491  				return func() int32 {
 12492  					if (lag) > (min_lag) {
 12493  						return min_lag
 12494  					}
 12495  					return func() int32 {
 12496  						if (lag) < (max_lag) {
 12497  							return max_lag
 12498  						}
 12499  						return lag
 12500  					}()
 12501  				}()
 12502  			}
 12503  			return func() int32 {
 12504  				if (lag) > (max_lag) {
 12505  					return max_lag
 12506  				}
 12507  				return func() int32 {
 12508  					if (lag) < (min_lag) {
 12509  						return min_lag
 12510  					}
 12511  					return lag
 12512  				}()
 12513  			}()
 12514  		}()
 12515  		start_lag = SKP_max_int(tls, (lag - 2), min_lag)
 12516  		end_lag = SKP_min_int(tls, (lag + 2), max_lag)
 12517  		lag_new = lag /* to avoid undefined lag */
 12518  		CBimax = 0    /* to avoid undefined lag */
 12519  
 12520  		*(*int32)(unsafe.Pointer(LTPCorr_Q15)) = SKP_Silk_SQRT_APPROX(tls, ((CCmax) << (13))) /* Output normalized correlation */
 12521  
 12522  		CCmax = libc.Int32FromUint32(0x80000000)
 12523  		/* pitch lags according to second stage */
 12524  		for k = 0; k < 4; k++ {
 12525  			*(*int32)(unsafe.Pointer(pitch_out + uintptr(k)*4)) = (lag + (2 * int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2)) + uintptr(k)*22) + uintptr(CBimax_old)*2)))))
 12526  		}
 12527  		/* Calculate the correlations and energies needed in stage 3 */
 12528  		SKP_FIX_P_Ana_calc_corr_st3(tls, bp+15396 /* &crosscorr_st3[0] */, input_signal_ptr, start_lag, sf_length, complexity)
 12529  		SKP_FIX_P_Ana_calc_energy_st3(tls, bp+18116 /* &energies_st3[0] */, input_signal_ptr, start_lag, sf_length, complexity)
 12530  
 12531  		lag_counter = 0
 12532  
 12533  		contour_bias = ((52429) / (lag))
 12534  
 12535  		/* Setup cbk parameters acording to complexity setting */
 12536  		cbk_size = int32(SKP_Silk_cbk_sizes_stage3[complexity])
 12537  		cbk_offset = int32(SKP_Silk_cbk_offsets_stage3[complexity])
 12538  
 12539  		for d = start_lag; d <= end_lag; d++ {
 12540  			for j = cbk_offset; j < (cbk_offset + cbk_size); j++ {
 12541  				cross_corr = 0
 12542  				energy = 0
 12543  				for k = 0; k < 4; k++ {
 12544  
 12545  					energy = energy + ((*(*int32)(unsafe.Pointer(((bp + 18116 /* &energies_st3[0] */ + uintptr(k)*680) + uintptr(j)*20) + uintptr(lag_counter)*4))) >> (2)) /* use mean, to avoid overflow */
 12546  
 12547  					cross_corr = cross_corr + ((*(*int32)(unsafe.Pointer(((bp + 15396 /* &crosscorr_st3[0] */ + uintptr(k)*680) + uintptr(j)*20) + uintptr(lag_counter)*4))) >> (2)) /* use mean, to avoid overflow */
 12548  				}
 12549  				if cross_corr > 0 {
 12550  					/* Divide cross_corr / energy and get result in Q15 */
 12551  					lz = SKP_Silk_CLZ32(tls, cross_corr)
 12552  					/* Divide with result in Q13, cross_corr could be larger than energy */
 12553  					lshift = func() int32 {
 12554  						if (0) > (13) {
 12555  							return func() int32 {
 12556  								if (lz - 1) > (0) {
 12557  									return 0
 12558  								}
 12559  								return func() int32 {
 12560  									if (lz - 1) < (13) {
 12561  										return 13
 12562  									}
 12563  									return (lz - 1)
 12564  								}()
 12565  							}()
 12566  						}
 12567  						return func() int32 {
 12568  							if (lz - 1) > (13) {
 12569  								return 13
 12570  							}
 12571  							return func() int32 {
 12572  								if (lz - 1) < (0) {
 12573  									return 0
 12574  								}
 12575  								return (lz - 1)
 12576  							}()
 12577  						}()
 12578  					}()
 12579  					CCmax_new = (((cross_corr) << (lshift)) / (((energy) >> (13 - lshift)) + 1))
 12580  					CCmax_new = func() int32 {
 12581  						if (CCmax_new) > 0x7FFF {
 12582  							return 0x7FFF
 12583  						}
 12584  						return func() int32 {
 12585  							if (CCmax_new) < (int32(libc.Int16FromInt32(0x8000))) {
 12586  								return int32(libc.Int16FromInt32(0x8000))
 12587  							}
 12588  							return CCmax_new
 12589  						}()
 12590  					}()
 12591  					CCmax_new = ((((cross_corr) >> 16) * (int32(int16(CCmax_new)))) + ((((cross_corr) & 0x0000FFFF) * (int32(int16(CCmax_new)))) >> 16))
 12592  					/* Saturate */
 12593  					if CCmax_new > (int32((0x7FFFFFFF)) >> (3)) {
 12594  						CCmax_new = 0x7FFFFFFF
 12595  					} else {
 12596  						CCmax_new = ((CCmax_new) << (3))
 12597  					}
 12598  					/* Reduce depending on flatness of contour */
 12599  					diff = (j - (int32((34)) >> (1)))
 12600  					diff = ((diff) * (diff))
 12601  					diff = (0x7FFF - (((contour_bias) * (diff)) >> (5))) /* Q20 -> Q15 */
 12602  
 12603  					CCmax_new = (((((CCmax_new) >> 16) * (int32(int16(diff)))) + ((((CCmax_new) & 0x0000FFFF) * (int32(int16(diff)))) >> 16)) << (1))
 12604  				} else {
 12605  					CCmax_new = 0
 12606  				}
 12607  
 12608  				if (CCmax_new > CCmax) && ((d + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3))) + uintptr(j)*2)))) <= max_lag) {
 12609  					CCmax = CCmax_new
 12610  					lag_new = d
 12611  					CBimax = j
 12612  				}
 12613  			}
 12614  			lag_counter++
 12615  		}
 12616  
 12617  		for k = 0; k < 4; k++ {
 12618  			*(*int32)(unsafe.Pointer(pitch_out + uintptr(k)*4)) = (lag_new + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3)) + uintptr(k)*68) + uintptr(CBimax)*2))))
 12619  		}
 12620  		*(*int32)(unsafe.Pointer(lagIndex)) = (lag_new - min_lag)
 12621  		*(*int32)(unsafe.Pointer(contourIndex)) = CBimax
 12622  	} else {
 12623  		/* Save Lags and correlation */
 12624  		CCmax = func() int32 {
 12625  			if (CCmax) > (0) {
 12626  				return CCmax
 12627  			}
 12628  			return 0
 12629  		}()
 12630  		*(*int32)(unsafe.Pointer(LTPCorr_Q15)) = SKP_Silk_SQRT_APPROX(tls, ((CCmax) << (13))) /* Output normalized correlation */
 12631  		for k = 0; k < 4; k++ {
 12632  			*(*int32)(unsafe.Pointer(pitch_out + uintptr(k)*4)) = (lag + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2)) + uintptr(k)*22) + uintptr(CBimax)*2))))
 12633  		}
 12634  		*(*int32)(unsafe.Pointer(lagIndex)) = (lag - min_lag_8kHz)
 12635  		*(*int32)(unsafe.Pointer(contourIndex)) = CBimax
 12636  	}
 12637  
 12638  	/* return as voiced */
 12639  	return 0
 12640  }
 12641  
 12642  /*************************************************************************/
 12643  /* Calculates the correlations used in stage 3 search. In order to cover */
 12644  /* the whole lag codebook for all the searched offset lags (lag +- 2),   */
 12645  /*************************************************************************/
 12646  func SKP_FIX_P_Ana_calc_corr_st3(tls *libc.TLS, cross_corr_st3 uintptr, signal uintptr, start_lag int32, sf_length int32, complexity int32) { /* SKP_Silk_pitch_analysis_core.c:569:6: */
 12647  	bp := tls.Alloc(88)
 12648  	defer tls.Free(88)
 12649  
 12650  	var target_ptr uintptr
 12651  	var basis_ptr uintptr
 12652  	var cross_corr int32
 12653  	var i int32
 12654  	var j int32
 12655  	var k int32
 12656  	var lag_counter int32
 12657  	var cbk_offset int32
 12658  	var cbk_size int32
 12659  	var delta int32
 12660  	var idx int32
 12661  	// var scratch_mem [22]int32 at bp, 88
 12662  
 12663  	cbk_offset = int32(SKP_Silk_cbk_offsets_stage3[complexity])
 12664  	cbk_size = int32(SKP_Silk_cbk_sizes_stage3[complexity])
 12665  
 12666  	target_ptr = (signal + uintptr(((sf_length)<<(2)))*2) /* Pointer to middle of frame */
 12667  	for k = 0; k < 4; k++ {
 12668  		lag_counter = 0
 12669  
 12670  		/* Calculate the correlations for each subframe */
 12671  		for j = int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4)))); j <= int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4) + 1*2))); j++ {
 12672  			basis_ptr = (target_ptr - uintptr((start_lag+j))*2)
 12673  			cross_corr = SKP_Silk_inner_prod_aligned(tls, target_ptr, basis_ptr, sf_length)
 12674  
 12675  			*(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr(lag_counter)*4)) = cross_corr
 12676  			lag_counter++
 12677  		}
 12678  
 12679  		delta = int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4))))
 12680  		for i = cbk_offset; i < (cbk_offset + cbk_size); i++ {
 12681  			/* Fill out the 3 dim array that stores the correlations for */
 12682  			/* each code_book vector for each start lag */
 12683  			idx = (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3)) + uintptr(k)*68) + uintptr(i)*2))) - delta)
 12684  			for j = 0; j < 5; j++ {
 12685  
 12686  				*(*int32)(unsafe.Pointer(((cross_corr_st3 + uintptr(k)*680) + uintptr(i)*20) + uintptr(j)*4)) = *(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr((idx+j))*4))
 12687  			}
 12688  		}
 12689  		target_ptr += 2 * (uintptr(sf_length))
 12690  	}
 12691  }
 12692  
 12693  /********************************************************************/
 12694  /* Calculate the energies for first two subframes. The energies are */
 12695  /* calculated recursively.                                          */
 12696  /********************************************************************/
 12697  func SKP_FIX_P_Ana_calc_energy_st3(tls *libc.TLS, energies_st3 uintptr, signal uintptr, start_lag int32, sf_length int32, complexity int32) { /* SKP_Silk_pitch_analysis_core.c:621:6: */
 12698  	bp := tls.Alloc(88)
 12699  	defer tls.Free(88)
 12700  
 12701  	var target_ptr uintptr
 12702  	var basis_ptr uintptr
 12703  	var energy int32
 12704  	var k int32
 12705  	var i int32
 12706  	var j int32
 12707  	var lag_counter int32
 12708  	var cbk_offset int32
 12709  	var cbk_size int32
 12710  	var delta int32
 12711  	var idx int32
 12712  	// var scratch_mem [22]int32 at bp, 88
 12713  
 12714  	cbk_offset = int32(SKP_Silk_cbk_offsets_stage3[complexity])
 12715  	cbk_size = int32(SKP_Silk_cbk_sizes_stage3[complexity])
 12716  
 12717  	target_ptr = (signal + uintptr(((sf_length)<<(2)))*2)
 12718  	for k = 0; k < 4; k++ {
 12719  		lag_counter = 0
 12720  
 12721  		/* Calculate the energy for first lag */
 12722  		basis_ptr = (target_ptr - uintptr((start_lag+int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4))))))*2)
 12723  		energy = SKP_Silk_inner_prod_aligned(tls, basis_ptr, basis_ptr, sf_length)
 12724  
 12725  		*(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr(lag_counter)*4)) = energy
 12726  		lag_counter++
 12727  
 12728  		for i = 1; i < ((int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4) + 1*2))) - int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4))))) + 1); i++ {
 12729  			/* remove part outside new window */
 12730  			energy = energy - ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr((sf_length-i))*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr((sf_length-i))*2)))))
 12731  
 12732  			/* add part that comes into window */
 12733  			energy = func() int32 {
 12734  				if ((uint32((energy) + ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))) & 0x80000000) == uint32(0) {
 12735  					return func() int32 {
 12736  						if ((uint32((energy) & ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))) & 0x80000000) != uint32(0) {
 12737  							return libc.Int32FromUint32(0x80000000)
 12738  						}
 12739  						return ((energy) + ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))
 12740  					}()
 12741  				}
 12742  				return func() int32 {
 12743  					if ((uint32((energy) | ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))) & 0x80000000) == uint32(0) {
 12744  						return 0x7FFFFFFF
 12745  					}
 12746  					return ((energy) + ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))
 12747  				}()
 12748  			}()
 12749  
 12750  			*(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr(lag_counter)*4)) = energy
 12751  			lag_counter++
 12752  		}
 12753  
 12754  		delta = int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4))))
 12755  		for i = cbk_offset; i < (cbk_offset + cbk_size); i++ {
 12756  			/* Fill out the 3 dim array that stores the correlations for    */
 12757  			/* each code_book vector for each start lag                        */
 12758  			idx = (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3)) + uintptr(k)*68) + uintptr(i)*2))) - delta)
 12759  			for j = 0; j < 5; j++ {
 12760  
 12761  				*(*int32)(unsafe.Pointer(((energies_st3 + uintptr(k)*680) + uintptr(i)*20) + uintptr(j)*4)) = *(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr((idx+j))*4))
 12762  
 12763  			}
 12764  		}
 12765  		target_ptr += 2 * (uintptr(sf_length))
 12766  	}
 12767  }
 12768  
 12769  func SKP_FIX_P_Ana_find_scaling(tls *libc.TLS, signal uintptr, signal_length int32, sum_sqr_len int32) int32 { /* SKP_Silk_pitch_analysis_core.c:681:11: */
 12770  	var nbits int32
 12771  	var x_max int32
 12772  
 12773  	x_max = int32(SKP_Silk_int16_array_maxabs(tls, signal, signal_length))
 12774  
 12775  	if x_max < 0x7FFF {
 12776  		/* Number of bits needed for the sum of the squares */
 12777  		nbits = (32 - SKP_Silk_CLZ32(tls, ((int32(int16(x_max)))*(int32(int16(x_max))))))
 12778  	} else {
 12779  		/* Here we don't know if x_max should have been SKP_int16_MAX + 1, so we expect the worst case */
 12780  		nbits = 30
 12781  	}
 12782  	nbits = nbits + (17 - SKP_Silk_CLZ16(tls, int16(sum_sqr_len)))
 12783  
 12784  	/* Without a guarantee of saturation, we need to keep the 31st bit free */
 12785  	if nbits < 31 {
 12786  		return 0
 12787  	} else {
 12788  		return (nbits - 30)
 12789  	}
 12790  	return int32(0)
 12791  }
 12792  
 12793  /***********************************************************************
 12794  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 12795  Redistribution and use in source and binary forms, with or without
 12796  modification, (subject to the limitations in the disclaimer below)
 12797  are permitted provided that the following conditions are met:
 12798  - Redistributions of source code must retain the above copyright notice,
 12799  this list of conditions and the following disclaimer.
 12800  - Redistributions in binary form must reproduce the above copyright
 12801  notice, this list of conditions and the following disclaimer in the
 12802  documentation and/or other materials provided with the distribution.
 12803  - Neither the name of Skype Limited, nor the names of specific
 12804  contributors, may be used to endorse or promote products derived from
 12805  this software without specific prior written permission.
 12806  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 12807  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 12808  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 12809  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 12810  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 12811  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 12812  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 12813  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 12814  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 12815  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 12816  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 12817  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 12818  ***********************************************************************/
 12819  
 12820  /************************************************************/
 12821  /* Definitions For Fix pitch estimator                      */
 12822  /************************************************************/
 12823  
 12824  /********************************************************/
 12825  /* Auto Generated File from generate_pitch_est_tables.m */
 12826  /********************************************************/
 12827  
 12828  var SKP_Silk_CB_lags_stage2 = [4][11]int16{
 12829  	{int16(0), int16(2), int16(-1), int16(-1), int16(-1), int16(0), int16(0), int16(1), int16(1), int16(0), int16(1)},
 12830  	{int16(0), int16(1), int16(0), int16(0), int16(0), int16(0), int16(0), int16(1), int16(0), int16(0), int16(0)},
 12831  	{int16(0), int16(0), int16(1), int16(0), int16(0), int16(0), int16(1), int16(0), int16(0), int16(0), int16(0)},
 12832  	{int16(0), int16(-1), int16(2), int16(1), int16(0), int16(1), int16(1), int16(0), int16(0), int16(-1), int16(-1)},
 12833  } /* SKP_Silk_pitch_est_tables.c:35:17 */
 12834  
 12835  var SKP_Silk_CB_lags_stage3 = [4][34]int16{
 12836  	{int16(-9), int16(-7), int16(-6), int16(-5), int16(-5), int16(-4), int16(-4), int16(-3), int16(-3), int16(-2), int16(-2), int16(-2), int16(-1), int16(-1), int16(-1), int16(0), int16(0), int16(0), int16(1), int16(1), int16(0), int16(1), int16(2), int16(2), int16(2), int16(3), int16(3), int16(4), int16(4), int16(5), int16(6), int16(5), int16(6), int16(8)},
 12837  	{int16(-3), int16(-2), int16(-2), int16(-2), int16(-1), int16(-1), int16(-1), int16(-1), int16(-1), int16(0), int16(0), int16(-1), int16(0), int16(0), int16(0), int16(0), int16(0), int16(0), int16(1), int16(0), int16(0), int16(0), int16(1), int16(1), int16(0), int16(1), int16(1), int16(2), int16(1), int16(2), int16(2), int16(2), int16(2), int16(3)},
 12838  	{int16(3), int16(3), int16(2), int16(2), int16(2), int16(2), int16(1), int16(2), int16(1), int16(1), int16(0), int16(1), int16(1), int16(0), int16(0), int16(0), int16(1), int16(0), int16(0), int16(0), int16(0), int16(0), int16(0), int16(-1), int16(0), int16(0), int16(-1), int16(-1), int16(-1), int16(-1), int16(-1), int16(-2), int16(-2), int16(-2)},
 12839  	{int16(9), int16(8), int16(6), int16(5), int16(6), int16(5), int16(4), int16(4), int16(3), int16(3), int16(2), int16(2), int16(2), int16(1), int16(0), int16(1), int16(1), int16(0), int16(0), int16(0), int16(-1), int16(-1), int16(-1), int16(-2), int16(-2), int16(-2), int16(-3), int16(-3), int16(-4), int16(-4), int16(-5), int16(-5), int16(-6), int16(-7)},
 12840  } /* SKP_Silk_pitch_est_tables.c:43:17 */
 12841  
 12842  var SKP_Silk_Lag_range_stage3 = [3][4][2]int16{
 12843  	{
 12844  		/* Lags to search for low number of stage3 cbks */
 12845  		{int16(-2), int16(6)},
 12846  		{int16(-1), int16(5)},
 12847  		{int16(-1), int16(5)},
 12848  		{int16(-2), int16(7)},
 12849  	},
 12850  	/* Lags to search for middle number of stage3 cbks */
 12851  	{
 12852  		{int16(-4), int16(8)},
 12853  		{int16(-1), int16(6)},
 12854  		{int16(-1), int16(6)},
 12855  		{int16(-4), int16(9)},
 12856  	},
 12857  	/* Lags to search for max number of stage3 cbks */
 12858  	{
 12859  		{int16(-9), int16(12)},
 12860  		{int16(-3), int16(7)},
 12861  		{int16(-2), int16(7)},
 12862  		{int16(-7), int16(13)},
 12863  	},
 12864  } /* SKP_Silk_pitch_est_tables.c:51:17 */
 12865  
 12866  var SKP_Silk_cbk_sizes_stage3 = [3]int16{
 12867  	int16(16),
 12868  	int16(24),
 12869  	int16(34),
 12870  } /* SKP_Silk_pitch_est_tables.c:76:17 */
 12871  
 12872  var SKP_Silk_cbk_offsets_stage3 = [3]int16{
 12873  	(int16(int32((34 - 16)) >> 1)),
 12874  	(int16(int32((34 - 24)) >> 1)),
 12875  	int16(0),
 12876  } /* SKP_Silk_pitch_est_tables.c:83:17 */
 12877  
 12878  /***********************************************************************
 12879  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 12880  Redistribution and use in source and binary forms, with or without
 12881  modification, (subject to the limitations in the disclaimer below)
 12882  are permitted provided that the following conditions are met:
 12883  - Redistributions of source code must retain the above copyright notice,
 12884  this list of conditions and the following disclaimer.
 12885  - Redistributions in binary form must reproduce the above copyright
 12886  notice, this list of conditions and the following disclaimer in the
 12887  documentation and/or other materials provided with the distribution.
 12888  - Neither the name of Skype Limited, nor the names of specific
 12889  contributors, may be used to endorse or promote products derived from
 12890  this software without specific prior written permission.
 12891  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 12892  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 12893  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 12894  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 12895  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 12896  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 12897  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 12898  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 12899  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 12900  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 12901  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 12902  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 12903  ***********************************************************************/
 12904  
 12905  var HARM_ATT_Q15 = [2]int16{int16(32440), int16(31130)}              /* SKP_Silk_PLC.c:32:24 */ /* 0.99, 0.95 */
 12906  var PLC_RAND_ATTENUATE_V_Q15 = [2]int16{int16(31130), int16(26214)}  /* SKP_Silk_PLC.c:33:24 */ /* 0.95, 0.8 */
 12907  var PLC_RAND_ATTENUATE_UV_Q15 = [2]int16{int16(32440), int16(29491)} /* SKP_Silk_PLC.c:34:24 */
 12908  
 12909  /* 0.99, 0.9 */
 12910  
 12911  func SKP_Silk_PLC_Reset(tls *libc.TLS, psDec uintptr) { /* SKP_Silk_PLC.c:36:6: */
 12912  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsPLC.FpitchL_Q8 = (((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length) >> (1))
 12913  }
 12914  
 12915  func SKP_Silk_PLC(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32, lost int32) { /* SKP_Silk_PLC.c:43:6: */
 12916  	/* PLC control function */
 12917  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz != (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsPLC.Ffs_kHz {
 12918  		SKP_Silk_PLC_Reset(tls, psDec)
 12919  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsPLC.Ffs_kHz = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz
 12920  	}
 12921  
 12922  	if lost != 0 {
 12923  		/****************************/
 12924  		/* Generate Signal          */
 12925  		/****************************/
 12926  		SKP_Silk_PLC_conceal(tls, psDec, psDecCtrl, signal, length)
 12927  
 12928  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt++
 12929  	} else {
 12930  		/****************************/
 12931  		/* Update state             */
 12932  		/****************************/
 12933  		SKP_Silk_PLC_update(tls, psDec, psDecCtrl, signal, length)
 12934  	}
 12935  }
 12936  
 12937  /**************************************************/
 12938  /* Update state of PLC                            */
 12939  /**************************************************/
 12940  func SKP_Silk_PLC_update(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32) { /* SKP_Silk_PLC.c:75:6: */
 12941  	var LTP_Gain_Q14 int32
 12942  	var temp_LTP_Gain_Q14 int32
 12943  	var i int32
 12944  	var j int32
 12945  	var psPLC uintptr
 12946  
 12947  	psPLC = (psDec + 13632 /* &.sPLC */)
 12948  
 12949  	/* Update parameters used in case of packet loss */
 12950  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype = (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype
 12951  	LTP_Gain_Q14 = 0
 12952  	if (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype == 0 {
 12953  		/* Find the parameters for the last subframe which contains a pitch pulse */
 12954  		for j = 0; (j * (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length) < *(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + 3*4)); j++ {
 12955  			temp_LTP_Gain_Q14 = 0
 12956  			for i = 0; i < 5; i++ {
 12957  				temp_LTP_Gain_Q14 = temp_LTP_Gain_Q14 + (int32(*(*int16)(unsafe.Pointer((psDecCtrl + 100 /* &.LTPCoef_Q14 */) + uintptr(((((4-1)-j)*5)+i))*2))))
 12958  			}
 12959  			if temp_LTP_Gain_Q14 > LTP_Gain_Q14 {
 12960  				LTP_Gain_Q14 = temp_LTP_Gain_Q14
 12961  				libc.Xmemcpy(tls, psPLC+4 /* &.LTPCoef_Q14 */, ((psDecCtrl + 100 /* &.LTPCoef_Q14 */) + uintptr(((int32((int16((4 - 1) - j))))*(int32(int16(5)))))*2), (uint64(5) * uint64(unsafe.Sizeof(int16(0)))))
 12962  
 12963  				(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8 = ((*(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + uintptr(((4-1)-j))*4))) << (8))
 12964  			}
 12965  		}
 12966  
 12967  		libc.Xmemset(tls, psPLC+4 /* &.LTPCoef_Q14 */, 0, (uint64(5) * uint64(unsafe.Sizeof(int16(0)))))
 12968  		*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + 2*2)) = int16(LTP_Gain_Q14)
 12969  
 12970  		/* Limit LT coefs */
 12971  		if LTP_Gain_Q14 < 11469 {
 12972  			var scale_Q10 int32
 12973  			var tmp int32
 12974  
 12975  			tmp = (int32((11469)) << (10))
 12976  			scale_Q10 = ((tmp) / (func() int32 {
 12977  				if (LTP_Gain_Q14) > (1) {
 12978  					return LTP_Gain_Q14
 12979  				}
 12980  				return 1
 12981  			}()))
 12982  			for i = 0; i < 5; i++ {
 12983  				*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + uintptr(i)*2)) = (int16(((int32(*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + uintptr(i)*2)))) * (int32(int16(scale_Q10)))) >> (10)))
 12984  			}
 12985  		} else if LTP_Gain_Q14 > 15565 {
 12986  			var scale_Q14 int32
 12987  			var tmp int32
 12988  
 12989  			tmp = (int32((15565)) << (14))
 12990  			scale_Q14 = ((tmp) / (func() int32 {
 12991  				if (LTP_Gain_Q14) > (1) {
 12992  					return LTP_Gain_Q14
 12993  				}
 12994  				return 1
 12995  			}()))
 12996  			for i = 0; i < 5; i++ {
 12997  				*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + uintptr(i)*2)) = (int16(((int32(*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + uintptr(i)*2)))) * (int32(int16(scale_Q14)))) >> (14)))
 12998  			}
 12999  		}
 13000  	} else {
 13001  		(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8 = (((int32(int16((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz))) * (int32(int16(18)))) << (8))
 13002  		libc.Xmemset(tls, psPLC+4 /* &.LTPCoef_Q14 */, 0, (uint64(5) * uint64(unsafe.Sizeof(int16(0)))))
 13003  	}
 13004  
 13005  	/* Save LPC coeficients */
 13006  	libc.Xmemcpy(tls, psPLC+14 /* &.prevLPC_Q12 */, ((psDecCtrl + 36 /* &.PredCoef_Q12 */) + 1*32), (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint64(unsafe.Sizeof(int16(0)))))
 13007  	(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FprevLTP_scale_Q14 = int16((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14)
 13008  
 13009  	/* Save Gains */
 13010  	libc.Xmemcpy(tls, psPLC+72 /* &.prevGain_Q16 */, psDecCtrl+16 /* &.Gains_Q16 */, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
 13011  }
 13012  
 13013  func SKP_Silk_PLC_conceal(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32) { /* SKP_Silk_PLC.c:146:6: */
 13014  	bp := tls.Alloc(2932)
 13015  	defer tls.Free(2932)
 13016  
 13017  	var i int32
 13018  	var j int32
 13019  	var k int32
 13020  	var B_Q14 uintptr
 13021  	// var exc_buf [480]int16 at bp, 960
 13022  
 13023  	var exc_buf_ptr uintptr
 13024  	var rand_scale_Q14 int16
 13025  	// var A_Q12_tmp struct {_ [0]uint32;Fas_int16 [16]int16;} at bp+2900, 32
 13026  
 13027  	var rand_seed int32
 13028  	var harm_Gain_Q15 int32
 13029  	var rand_Gain_Q15 int32
 13030  	var lag int32
 13031  	var idx int32
 13032  	var sLTP_buf_idx int32
 13033  	// var shift1 int32 at bp+964, 4
 13034  
 13035  	// var shift2 int32 at bp+972, 4
 13036  
 13037  	// var energy1 int32 at bp+960, 4
 13038  
 13039  	// var energy2 int32 at bp+968, 4
 13040  
 13041  	var rand_ptr uintptr
 13042  	var pred_lag_ptr uintptr
 13043  	// var sig_Q10 [480]int32 at bp+980, 1920
 13044  
 13045  	var sig_Q10_ptr uintptr
 13046  	var LPC_exc_Q10 int32
 13047  	var LPC_pred_Q10 int32
 13048  	var LTP_pred_Q14 int32
 13049  	var psPLC uintptr
 13050  	psPLC = (psDec + 13632 /* &.sPLC */)
 13051  
 13052  	/* Update LTP buffer */
 13053  	libc.Xmemcpy(tls, psDec+1048 /* &.sLTP_Q16 */, ((psDec + 1048 /* &.sLTP_Q16 */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length)*4), (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length) * uint64(unsafe.Sizeof(int32(0)))))
 13054  
 13055  	/* LPC concealment. Apply BWE to previous LPC */
 13056  	SKP_Silk_bwexpander(tls, psPLC+14 /* &.prevLPC_Q12 */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order, 64880)
 13057  
 13058  	/* Find random noise component */
 13059  	/* Scale previous excitation signal */
 13060  	exc_buf_ptr = bp /* &exc_buf[0] */
 13061  	for k = (int32(4) >> 1); k < 4; k++ {
 13062  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
 13063  			*(*int16)(unsafe.Pointer(exc_buf_ptr + uintptr(i)*2)) = (int16((((((*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr((i+(k*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)))*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr((i+(k*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)))*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr((i+(k*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)))*4))) * (func() int32 {
 13064  				if (16) == 1 {
 13065  					return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4))) & 1))
 13066  				}
 13067  				return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4))) >> ((16) - 1)) + 1) >> 1)
 13068  			}()))) >> (10)))
 13069  		}
 13070  		exc_buf_ptr += 2 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
 13071  	}
 13072  	/* Find the subframe with lowest energy of the last two and use that as random noise generator */
 13073  	SKP_Silk_sum_sqr_shift(tls, bp+960 /* &energy1 */, bp+964 /* &shift1 */, bp /* &exc_buf[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)
 13074  	SKP_Silk_sum_sqr_shift(tls, bp+968 /* &energy2 */, bp+972 /* &shift2 */, (bp /* &exc_buf */ + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)*2), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)
 13075  
 13076  	if ((*(*int32)(unsafe.Pointer(bp + 960 /* energy1 */))) >> (*(*int32)(unsafe.Pointer(bp + 972 /* shift2 */)))) < ((*(*int32)(unsafe.Pointer(bp + 968 /* energy2 */))) >> (*(*int32)(unsafe.Pointer(bp + 964 /* shift1 */)))) {
 13077  		/* First sub-frame has lowest energy */
 13078  		rand_ptr = ((psDec + 5432 /* &.exc_Q10 */) + uintptr(SKP_max_int(tls, 0, ((3*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)-128)))*4)
 13079  	} else {
 13080  		/* Second sub-frame has lowest energy */
 13081  		rand_ptr = ((psDec + 5432 /* &.exc_Q10 */) + uintptr(SKP_max_int(tls, 0, ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length-128)))*4)
 13082  	}
 13083  
 13084  	/* Setup Gain to random noise component */
 13085  	B_Q14 = psPLC + 4 /* &.LTPCoef_Q14 */
 13086  	rand_scale_Q14 = (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FrandScale_Q14
 13087  
 13088  	/* Setup attenuation gains */
 13089  	harm_Gain_Q15 = int32(HARM_ATT_Q15[SKP_min_int(tls, (2-1), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt)])
 13090  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype == 0 {
 13091  		rand_Gain_Q15 = int32(PLC_RAND_ATTENUATE_V_Q15[SKP_min_int(tls, (2-1), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt)])
 13092  	} else {
 13093  		rand_Gain_Q15 = int32(PLC_RAND_ATTENUATE_UV_Q15[SKP_min_int(tls, (2-1), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt)])
 13094  	}
 13095  
 13096  	/* First Lost frame */
 13097  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt == 0 {
 13098  		rand_scale_Q14 = (int16(int32(1) << 14))
 13099  
 13100  		/* Reduce random noise Gain for voiced frames */
 13101  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype == 0 {
 13102  			for i = 0; i < 5; i++ {
 13103  				rand_scale_Q14 = int16(int32(rand_scale_Q14) - (int32(*(*int16)(unsafe.Pointer(B_Q14 + uintptr(i)*2)))))
 13104  			}
 13105  			rand_scale_Q14 = SKP_max_16(tls, int16(3277), rand_scale_Q14) /* 0.2 */
 13106  			rand_scale_Q14 = (int16(((int32(rand_scale_Q14)) * (int32((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FprevLTP_scale_Q14))) >> (14)))
 13107  		}
 13108  
 13109  		/* Reduce random noise for unvoiced frames with high LPC gain */
 13110  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype == 1 {
 13111  			// var invGain_Q30 int32 at bp+976, 4
 13112  
 13113  			var down_scale_Q30 int32
 13114  
 13115  			SKP_Silk_LPC_inverse_pred_gain(tls, bp+976 /* &invGain_Q30 */, psPLC+14 /* &.prevLPC_Q12 */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
 13116  
 13117  			down_scale_Q30 = SKP_min_32(tls, (int32((int32(1) << 30)) >> (3)), *(*int32)(unsafe.Pointer(bp + 976 /* invGain_Q30 */)))
 13118  			down_scale_Q30 = SKP_max_32(tls, (int32((int32(1) << 30)) >> (8)), down_scale_Q30)
 13119  			down_scale_Q30 = ((down_scale_Q30) << (3))
 13120  
 13121  			rand_Gain_Q15 = (((((down_scale_Q30) >> 16) * (int32(int16(rand_Gain_Q15)))) + ((((down_scale_Q30) & 0x0000FFFF) * (int32(int16(rand_Gain_Q15)))) >> 16)) >> (14))
 13122  		}
 13123  	}
 13124  
 13125  	rand_seed = (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Frand_seed
 13126  	lag = func() int32 {
 13127  		if (8) == 1 {
 13128  			return ((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> 1) + (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) & 1))
 13129  		}
 13130  		return (((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> ((8) - 1)) + 1) >> 1)
 13131  	}()
 13132  	sLTP_buf_idx = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length
 13133  
 13134  	/***************************/
 13135  	/* LTP synthesis filtering */
 13136  	/***************************/
 13137  	sig_Q10_ptr = bp + 980 /* &sig_Q10[0] */
 13138  	for k = 0; k < 4; k++ {
 13139  		/* Setup pointer */
 13140  		pred_lag_ptr = ((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-lag)+(5/2)))*4)
 13141  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
 13142  			rand_seed = (int32((uint32(907633515)) + ((uint32(rand_seed)) * (uint32(196314165)))))
 13143  			idx = (((rand_seed) >> (25)) & (128 - 1))
 13144  
 13145  			/* Unrolled loop */
 13146  			LTP_pred_Q14 = ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14))))) >> 16))
 13147  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 1*2))))) >> 16)))
 13148  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 2*2))))) >> 16)))
 13149  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 3*2))))) >> 16)))
 13150  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 4*2))))) >> 16)))
 13151  			pred_lag_ptr += 4
 13152  
 13153  			/* Generate LPC residual */
 13154  			LPC_exc_Q10 = (((((*(*int32)(unsafe.Pointer(rand_ptr + uintptr(idx)*4))) >> 16) * (int32(rand_scale_Q14))) + ((((*(*int32)(unsafe.Pointer(rand_ptr + uintptr(idx)*4))) & 0x0000FFFF) * (int32(rand_scale_Q14))) >> 16)) << (2)) /* Random noise part */
 13155  			LPC_exc_Q10 = ((LPC_exc_Q10) + (func() int32 {
 13156  				if (4) == 1 {
 13157  					return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
 13158  				}
 13159  				return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
 13160  			}())) /* Harmonic part */
 13161  
 13162  			/* Update states */
 13163  			*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(sLTP_buf_idx)*4)) = ((LPC_exc_Q10) << (6))
 13164  			sLTP_buf_idx++
 13165  
 13166  			/* Save LPC residual */
 13167  			*(*int32)(unsafe.Pointer(sig_Q10_ptr + uintptr(i)*4)) = LPC_exc_Q10
 13168  		}
 13169  		sig_Q10_ptr += 4 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
 13170  		/* Gradually reduce LTP gain */
 13171  		for j = 0; j < 5; j++ {
 13172  			*(*int16)(unsafe.Pointer(B_Q14 + uintptr(j)*2)) = (int16(((int32(int16(harm_Gain_Q15))) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + uintptr(j)*2))))) >> (15)))
 13173  		}
 13174  		/* Gradually reduce excitation gain */
 13175  		rand_scale_Q14 = (int16(((int32(rand_scale_Q14)) * (int32(int16(rand_Gain_Q15)))) >> (15)))
 13176  
 13177  		/* Slowly increase pitch lag */
 13178  		*(*int32)(unsafe.Pointer(psPLC /* &.pitchL_Q8 */)) += (((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> 16) * (int32(int16(655)))) + (((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) & 0x0000FFFF) * (int32(int16(655)))) >> 16))
 13179  		(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8 = SKP_min_32(tls, (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8, (((int32(int16(18))) * (int32(int16((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz)))) << (8)))
 13180  		lag = func() int32 {
 13181  			if (8) == 1 {
 13182  				return ((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> 1) + (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) & 1))
 13183  			}
 13184  			return (((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> ((8) - 1)) + 1) >> 1)
 13185  		}()
 13186  	}
 13187  
 13188  	/***************************/
 13189  	/* LPC synthesis filtering */
 13190  	/***************************/
 13191  	sig_Q10_ptr = bp + 980 /* &sig_Q10[0] */
 13192  	/* Preload LPC coeficients to array on stack. Gives small performance gain */
 13193  	libc.Xmemcpy(tls, bp+2900 /* &A_Q12_tmp */ /* &.as_int16 */, psPLC+14 /* &.prevLPC_Q12 */, (uint64((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint64(unsafe.Sizeof(int16(0)))))
 13194  	/* check that unrolling works */
 13195  	for k = 0; k < 4; k++ {
 13196  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
 13197  			/* partly unrolled */
 13198  			LPC_pred_Q10 = ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-1))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */)))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-1))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */)))))) >> 16))
 13199  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-2))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 1*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-2))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 1*2))))) >> 16)))
 13200  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-3))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 2*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-3))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 2*2))))) >> 16)))
 13201  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-4))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 3*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-4))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 3*2))))) >> 16)))
 13202  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-5))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 4*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-5))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 4*2))))) >> 16)))
 13203  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-6))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 5*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-6))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 5*2))))) >> 16)))
 13204  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-7))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 6*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-7))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 6*2))))) >> 16)))
 13205  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-8))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 7*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-8))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 7*2))))) >> 16)))
 13206  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-9))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 8*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-9))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 8*2))))) >> 16)))
 13207  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-10))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 9*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-10))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 9*2))))) >> 16)))
 13208  
 13209  			for j = 10; j < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order; j++ {
 13210  				LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((((16+i)-j)-1))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + uintptr(j)*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((((16+i)-j)-1))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + uintptr(j)*2))))) >> 16)))
 13211  			}
 13212  			/* Add prediction to LPC residual */
 13213  			*(*int32)(unsafe.Pointer(sig_Q10_ptr + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(sig_Q10_ptr + uintptr(i)*4))) + (LPC_pred_Q10))
 13214  
 13215  			/* Update states */
 13216  			*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((16+i))*4)) = ((*(*int32)(unsafe.Pointer(sig_Q10_ptr + uintptr(i)*4))) << (4))
 13217  		}
 13218  		sig_Q10_ptr += 4 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
 13219  		/* Update LPC filter state */
 13220  		libc.Xmemcpy(tls, psDec+4888 /* &.sLPC_Q14 */, ((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)*4), (uint64(16) * uint64(unsafe.Sizeof(int32(0)))))
 13221  	}
 13222  
 13223  	/* Scale with Gain */
 13224  	for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length; i++ {
 13225  		*(*int16)(unsafe.Pointer(signal + uintptr(i)*2)) = func() int16 {
 13226  			if (func() int32 {
 13227  				if (10) == 1 {
 13228  					return (((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13229  						if (16) == 1 {
 13230  							return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13231  						}
 13232  						return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13233  					}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13234  						if (16) == 1 {
 13235  							return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13236  						}
 13237  						return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13238  					}()))) & 1))
 13239  				}
 13240  				return ((((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13241  					if (16) == 1 {
 13242  						return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13243  					}
 13244  					return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13245  				}()))) >> ((10) - 1)) + 1) >> 1)
 13246  			}()) > 0x7FFF {
 13247  				return int16(0x7FFF)
 13248  			}
 13249  			return func() int16 {
 13250  				if (func() int32 {
 13251  					if (10) == 1 {
 13252  						return (((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13253  							if (16) == 1 {
 13254  								return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13255  							}
 13256  							return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13257  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13258  							if (16) == 1 {
 13259  								return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13260  							}
 13261  							return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13262  						}()))) & 1))
 13263  					}
 13264  					return ((((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13265  						if (16) == 1 {
 13266  							return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13267  						}
 13268  						return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13269  					}()))) >> ((10) - 1)) + 1) >> 1)
 13270  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 13271  					return libc.Int16FromInt32(0x8000)
 13272  				}
 13273  				return func() int16 {
 13274  					if (10) == 1 {
 13275  						return (int16(((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13276  							if (16) == 1 {
 13277  								return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13278  							}
 13279  							return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13280  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13281  							if (16) == 1 {
 13282  								return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13283  							}
 13284  							return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13285  						}()))) & 1)))
 13286  					}
 13287  					return (int16((((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13288  						if (16) == 1 {
 13289  							return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13290  						}
 13291  						return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13292  					}()))) >> ((10) - 1)) + 1) >> 1))
 13293  				}()
 13294  			}()
 13295  		}()
 13296  	}
 13297  
 13298  	/**************************************/
 13299  	/* Update states                      */
 13300  	/**************************************/
 13301  	(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Frand_seed = rand_seed
 13302  	(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FrandScale_Q14 = rand_scale_Q14
 13303  	for i = 0; i < 4; i++ {
 13304  		*(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + uintptr(i)*4)) = lag
 13305  	}
 13306  }
 13307  
 13308  /* Glues concealed frames with new good recieved frames             */
 13309  func SKP_Silk_PLC_glue_frames(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32) { /* SKP_Silk_PLC.c:333:6: */
 13310  	bp := tls.Alloc(8)
 13311  	defer tls.Free(8)
 13312  
 13313  	var i int32
 13314  	// var energy_shift int32 at bp+4, 4
 13315  
 13316  	// var energy int32 at bp, 4
 13317  
 13318  	var psPLC uintptr
 13319  	psPLC = (psDec + 13632 /* &.sPLC */)
 13320  
 13321  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt != 0 {
 13322  		/* Calculate energy in concealed residual */
 13323  		SKP_Silk_sum_sqr_shift(tls, (psPLC + 60 /* &.conc_energy */), (psPLC + 64 /* &.conc_energy_shift */), signal, length)
 13324  
 13325  		(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Flast_frame_lost = 1
 13326  	} else {
 13327  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsPLC.Flast_frame_lost != 0 {
 13328  			/* Calculate residual in decoded signal if last frame was lost */
 13329  			SKP_Silk_sum_sqr_shift(tls, bp /* &energy */, bp+4 /* &energy_shift */, signal, length)
 13330  
 13331  			/* Normalize energies */
 13332  			if *(*int32)(unsafe.Pointer(bp + 4 /* energy_shift */)) > (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy_shift {
 13333  				(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy = (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy) >> (*(*int32)(unsafe.Pointer(bp + 4 /* energy_shift */)) - (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy_shift))
 13334  			} else if *(*int32)(unsafe.Pointer(bp + 4 /* energy_shift */)) < (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy_shift {
 13335  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) >> ((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy_shift - *(*int32)(unsafe.Pointer(bp + 4 /* energy_shift */))))
 13336  			}
 13337  
 13338  			/* Fade in the energy difference */
 13339  			if *(*int32)(unsafe.Pointer(bp /* energy */)) > (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy {
 13340  				var frac_Q24 int32
 13341  				var LZ int32
 13342  				var gain_Q12 int32
 13343  				var slope_Q12 int32
 13344  
 13345  				LZ = SKP_Silk_CLZ32(tls, (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy)
 13346  				LZ = (LZ - 1)
 13347  				(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy = (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy) << (LZ))
 13348  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) >> (SKP_max_32(tls, (24 - LZ), 0)))
 13349  
 13350  				frac_Q24 = (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy) / (func() int32 {
 13351  					if (*(*int32)(unsafe.Pointer(bp /* energy */))) > (1) {
 13352  						return *(*int32)(unsafe.Pointer(bp /* energy */))
 13353  					}
 13354  					return 1
 13355  				}()))
 13356  
 13357  				gain_Q12 = SKP_Silk_SQRT_APPROX(tls, frac_Q24)
 13358  				slope_Q12 = (((int32(1) << 12) - gain_Q12) / (length))
 13359  
 13360  				for i = 0; i < length; i++ {
 13361  					*(*int16)(unsafe.Pointer(signal + uintptr(i)*2)) = (int16(((gain_Q12) * (int32(*(*int16)(unsafe.Pointer(signal + uintptr(i)*2))))) >> (12)))
 13362  					gain_Q12 = gain_Q12 + (slope_Q12)
 13363  					gain_Q12 = func() int32 {
 13364  						if (gain_Q12) < (int32(1) << 12) {
 13365  							return gain_Q12
 13366  						}
 13367  						return (int32(1) << 12)
 13368  					}()
 13369  				}
 13370  			}
 13371  		}
 13372  		(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Flast_frame_lost = 0
 13373  
 13374  	}
 13375  }
 13376  
 13377  func SKP_Silk_warped_LPC_analysis_filter_FIX(tls *libc.TLS, state uintptr, res uintptr, coef_Q13 uintptr, input uintptr, lambda_Q16 int16, length int32, order int32) { /* SKP_Silk_prefilter_FIX.c:42:6: */
 13378  	var n int32
 13379  	var i int32
 13380  	var acc_Q11 int32
 13381  	var tmp1 int32
 13382  	var tmp2 int32
 13383  
 13384  	/* Order must be even */
 13385  
 13386  	for n = 0; n < length; n++ {
 13387  		/* Output of lowpass section */
 13388  		tmp2 = ((*(*int32)(unsafe.Pointer(state))) + ((((*(*int32)(unsafe.Pointer(state + 1*4))) >> 16) * (int32(lambda_Q16))) + ((((*(*int32)(unsafe.Pointer(state + 1*4))) & 0x0000FFFF) * (int32(lambda_Q16))) >> 16)))
 13389  		*(*int32)(unsafe.Pointer(state)) = ((int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2)))) << (14))
 13390  		/* Output of allpass section */
 13391  		tmp1 = ((*(*int32)(unsafe.Pointer(state + 1*4))) + ((((*(*int32)(unsafe.Pointer(state + 2*4)) - tmp2) >> 16) * (int32(lambda_Q16))) + ((((*(*int32)(unsafe.Pointer(state + 2*4)) - tmp2) & 0x0000FFFF) * (int32(lambda_Q16))) >> 16)))
 13392  		*(*int32)(unsafe.Pointer(state + 1*4)) = tmp2
 13393  		acc_Q11 = ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(coef_Q13))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(coef_Q13))))) >> 16))
 13394  		/* Loop over allpass sections */
 13395  		for i = 2; i < order; i = i + (2) {
 13396  			/* Output of allpass section */
 13397  			tmp2 = ((*(*int32)(unsafe.Pointer(state + uintptr(i)*4))) + ((((*(*int32)(unsafe.Pointer(state + uintptr((i+1))*4)) - tmp1) >> 16) * (int32(lambda_Q16))) + ((((*(*int32)(unsafe.Pointer(state + uintptr((i+1))*4)) - tmp1) & 0x0000FFFF) * (int32(lambda_Q16))) >> 16)))
 13398  			*(*int32)(unsafe.Pointer(state + uintptr(i)*4)) = tmp1
 13399  			acc_Q11 = ((acc_Q11) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr((i-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr((i-1))*2))))) >> 16)))
 13400  			/* Output of allpass section */
 13401  			tmp1 = ((*(*int32)(unsafe.Pointer(state + uintptr((i+1))*4))) + ((((*(*int32)(unsafe.Pointer(state + uintptr((i+2))*4)) - tmp2) >> 16) * (int32(lambda_Q16))) + ((((*(*int32)(unsafe.Pointer(state + uintptr((i+2))*4)) - tmp2) & 0x0000FFFF) * (int32(lambda_Q16))) >> 16)))
 13402  			*(*int32)(unsafe.Pointer(state + uintptr((i+1))*4)) = tmp2
 13403  			acc_Q11 = ((acc_Q11) + ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr(i)*2))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr(i)*2))))) >> 16)))
 13404  		}
 13405  		*(*int32)(unsafe.Pointer(state + uintptr(order)*4)) = tmp1
 13406  		acc_Q11 = ((acc_Q11) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr((order-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr((order-1))*2))))) >> 16)))
 13407  		*(*int16)(unsafe.Pointer(res + uintptr(n)*2)) = func() int16 {
 13408  			if (int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2))) - (func() int32 {
 13409  				if (11) == 1 {
 13410  					return (((acc_Q11) >> 1) + ((acc_Q11) & 1))
 13411  				}
 13412  				return ((((acc_Q11) >> ((11) - 1)) + 1) >> 1)
 13413  			}())) > 0x7FFF {
 13414  				return int16(0x7FFF)
 13415  			}
 13416  			return func() int16 {
 13417  				if (int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2))) - (func() int32 {
 13418  					if (11) == 1 {
 13419  						return (((acc_Q11) >> 1) + ((acc_Q11) & 1))
 13420  					}
 13421  					return ((((acc_Q11) >> ((11) - 1)) + 1) >> 1)
 13422  				}())) < (int32(libc.Int16FromInt32(0x8000))) {
 13423  					return libc.Int16FromInt32(0x8000)
 13424  				}
 13425  				return (int16(int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2))) - (func() int32 {
 13426  					if (11) == 1 {
 13427  						return (((acc_Q11) >> 1) + ((acc_Q11) & 1))
 13428  					}
 13429  					return ((((acc_Q11) >> ((11) - 1)) + 1) >> 1)
 13430  				}())))
 13431  			}()
 13432  		}()
 13433  	}
 13434  }
 13435  
 13436  func SKP_Silk_prefilter_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, xw uintptr, x uintptr) { /* SKP_Silk_prefilter_FIX.c:83:6: */
 13437  	bp := tls.Alloc(756)
 13438  	defer tls.Free(756)
 13439  
 13440  	var P uintptr = (psEnc + 19592 /* &.sPrefilt */)
 13441  	var j int32
 13442  	var k int32
 13443  	var lag int32
 13444  	var tmp_32 int32
 13445  	var AR1_shp_Q13 uintptr
 13446  	var px uintptr
 13447  	var pxw uintptr
 13448  	var HarmShapeGain_Q12 int32
 13449  	var Tilt_Q14 int32
 13450  	var HarmShapeFIRPacked_Q12 int32
 13451  	var LF_shp_Q14 int32
 13452  	// var x_filt_Q12 [120]int32 at bp+276, 480
 13453  
 13454  	// var st_res [136]int16 at bp, 272
 13455  
 13456  	// var B_Q12 [2]int16 at bp+272, 4
 13457  
 13458  	/* Setup pointers */
 13459  	px = x
 13460  	pxw = xw
 13461  	lag = (*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FlagPrev
 13462  	for k = 0; k < 4; k++ {
 13463  		/* Update Variables that change per sub frame */
 13464  		if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 13465  			lag = *(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 108 /* &.pitchL */) + uintptr(k)*4))
 13466  		}
 13467  
 13468  		/* Noise shape parameters */
 13469  		HarmShapeGain_Q12 = ((((*(*int32)(unsafe.Pointer((psEncCtrl + 572 /* &.HarmShapeGain_Q14 */) + uintptr(k)*4))) >> 16) * (int32((int16(16384 - *(*int32)(unsafe.Pointer((psEncCtrl + 540 /* &.HarmBoost_Q14 */) + uintptr(k)*4))))))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 572 /* &.HarmShapeGain_Q14 */) + uintptr(k)*4))) & 0x0000FFFF) * (int32((int16(16384 - *(*int32)(unsafe.Pointer((psEncCtrl + 540 /* &.HarmBoost_Q14 */) + uintptr(k)*4))))))) >> 16))
 13470  
 13471  		HarmShapeFIRPacked_Q12 = ((HarmShapeGain_Q12) >> (2))
 13472  		HarmShapeFIRPacked_Q12 = HarmShapeFIRPacked_Q12 | (((HarmShapeGain_Q12) >> (1)) << (16))
 13473  		Tilt_Q14 = *(*int32)(unsafe.Pointer((psEncCtrl + 556 /* &.Tilt_Q14 */) + uintptr(k)*4))
 13474  		LF_shp_Q14 = *(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */) + uintptr(k)*4))
 13475  		AR1_shp_Q13 = ((psEncCtrl + 252 /* &.AR1_Q13 */) + uintptr((k*16))*2)
 13476  
 13477  		/* Short term FIR filtering*/
 13478  		SKP_Silk_warped_LPC_analysis_filter_FIX(tls, P+1024 /* &.sAR_shp */, bp /* &st_res[0] */, AR1_shp_Q13, px,
 13479  			int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 13480  
 13481  		/* reduce (mainly) low frequencies during harmonic emphasis */
 13482  		*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */)) = func() int16 {
 13483  			if (2) == 1 {
 13484  				return (int16(((*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4))) & 1)))
 13485  			}
 13486  			return (int16((((*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4))) >> ((2) - 1)) + 1) >> 1))
 13487  		}()
 13488  		tmp_32 = ((SKP_FIX_CONST(tls, 0.05, 26)) + ((int32(int16(*(*int32)(unsafe.Pointer((psEncCtrl + 540 /* &.HarmBoost_Q14 */) + uintptr(k)*4))))) * (int32(int16(HarmShapeGain_Q12)))))                                                                                       /* Q26 */
 13489  		tmp_32 = ((tmp_32) + ((int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14))) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 12))))))                                                                                                      /* Q26 */
 13490  		tmp_32 = ((((tmp_32) >> 16) * (int32(int16(-*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)))))) + ((((tmp_32) & 0x0000FFFF) * (int32(int16(-*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)))))) >> 16)) /* Q24 */
 13491  		tmp_32 = func() int32 {
 13492  			if (12) == 1 {
 13493  				return (((tmp_32) >> 1) + ((tmp_32) & 1))
 13494  			}
 13495  			return ((((tmp_32) >> ((12) - 1)) + 1) >> 1)
 13496  		}() /* Q12 */
 13497  		*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */ + 1*2)) = func() int16 {
 13498  			if (tmp_32) > 0x7FFF {
 13499  				return int16(0x7FFF)
 13500  			}
 13501  			return func() int16 {
 13502  				if (tmp_32) < (int32(libc.Int16FromInt32(0x8000))) {
 13503  					return libc.Int16FromInt32(0x8000)
 13504  				}
 13505  				return int16(tmp_32)
 13506  			}()
 13507  		}()
 13508  
 13509  		*(*int32)(unsafe.Pointer(bp + 276 /* &x_filt_Q12[0] */)) = (((int32(*(*int16)(unsafe.Pointer(bp /* &st_res[0] */)))) * (int32(*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */))))) + ((int32(int16((*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsHarmHP))) * (int32(*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */ + 1*2))))))
 13510  		for j = 1; j < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length; j++ {
 13511  			*(*int32)(unsafe.Pointer(bp + 276 /* &x_filt_Q12[0] */ + uintptr(j)*4)) = (((int32(*(*int16)(unsafe.Pointer(bp /* &st_res[0] */ + uintptr(j)*2)))) * (int32(*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */))))) + ((int32(*(*int16)(unsafe.Pointer(bp /* &st_res[0] */ + uintptr((j-1))*2)))) * (int32(*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */ + 1*2))))))
 13512  		}
 13513  		(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsHarmHP = int32(*(*int16)(unsafe.Pointer(bp /* &st_res[0] */ + uintptr(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length-1))*2)))
 13514  
 13515  		SKP_Silk_prefilt_FIX(tls, P, bp+276 /* &x_filt_Q12[0] */, pxw, HarmShapeFIRPacked_Q12, Tilt_Q14,
 13516  			LF_shp_Q14, lag, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length)
 13517  
 13518  		px += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
 13519  		pxw += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
 13520  	}
 13521  
 13522  	(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FlagPrev = *(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 108 /* &.pitchL */) + 3*4))
 13523  }
 13524  
 13525  /* SKP_Silk_prefilter. Prefilter for finding Quantizer input signal                           */
 13526  func SKP_Silk_prefilt_FIX(tls *libc.TLS, P uintptr, st_res_Q12 uintptr, xw uintptr, HarmShapeFIRPacked_Q12 int32, Tilt_Q14 int32, LF_shp_Q14 int32, lag int32, length int32) { /* SKP_Silk_prefilter_FIX.c:150:17: */
 13527  	var i int32
 13528  	var idx int32
 13529  	var LTP_shp_buf_idx int32
 13530  	var n_LTP_Q12 int32
 13531  	var n_Tilt_Q10 int32
 13532  	var n_LF_Q10 int32
 13533  	var sLF_MA_shp_Q12 int32
 13534  	var sLF_AR_shp_Q12 int32
 13535  	var LTP_shp_buf uintptr
 13536  
 13537  	/* To speed up use temp variables instead of using the struct */
 13538  	LTP_shp_buf = P /* &.sLTP_shp */
 13539  	LTP_shp_buf_idx = (*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLTP_shp_buf_idx
 13540  	sLF_AR_shp_Q12 = (*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLF_AR_shp_Q12
 13541  	sLF_MA_shp_Q12 = (*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLF_MA_shp_Q12
 13542  
 13543  	for i = 0; i < length; i++ {
 13544  		if lag > 0 {
 13545  			/* unrolled loop */
 13546  
 13547  			idx = (lag + LTP_shp_buf_idx)
 13548  			n_LTP_Q12 = ((int32(*(*int16)(unsafe.Pointer(LTP_shp_buf + uintptr((((idx-(3/2))-1)&(512-1)))*2)))) * (int32(int16(HarmShapeFIRPacked_Q12))))
 13549  			n_LTP_Q12 = ((n_LTP_Q12) + ((int32(*(*int16)(unsafe.Pointer(LTP_shp_buf + uintptr(((idx-(3/2))&(512-1)))*2)))) * ((HarmShapeFIRPacked_Q12) >> 16)))
 13550  			n_LTP_Q12 = ((n_LTP_Q12) + ((int32(*(*int16)(unsafe.Pointer(LTP_shp_buf + uintptr((((idx-(3/2))+1)&(512-1)))*2)))) * (int32(int16(HarmShapeFIRPacked_Q12)))))
 13551  		} else {
 13552  			n_LTP_Q12 = 0
 13553  		}
 13554  
 13555  		n_Tilt_Q10 = ((((sLF_AR_shp_Q12) >> 16) * (int32(int16(Tilt_Q14)))) + ((((sLF_AR_shp_Q12) & 0x0000FFFF) * (int32(int16(Tilt_Q14)))) >> 16))
 13556  		n_LF_Q10 = (((((sLF_AR_shp_Q12) >> 16) * ((LF_shp_Q14) >> 16)) + ((((sLF_AR_shp_Q12) & 0x0000FFFF) * ((LF_shp_Q14) >> 16)) >> 16)) + ((((sLF_MA_shp_Q12) >> 16) * (int32(int16(LF_shp_Q14)))) + ((((sLF_MA_shp_Q12) & 0x0000FFFF) * (int32(int16(LF_shp_Q14)))) >> 16)))
 13557  
 13558  		sLF_AR_shp_Q12 = ((*(*int32)(unsafe.Pointer(st_res_Q12 + uintptr(i)*4))) - ((n_Tilt_Q10) << (2)))
 13559  		sLF_MA_shp_Q12 = ((sLF_AR_shp_Q12) - ((n_LF_Q10) << (2)))
 13560  
 13561  		LTP_shp_buf_idx = ((LTP_shp_buf_idx - 1) & (512 - 1))
 13562  		*(*int16)(unsafe.Pointer(LTP_shp_buf + uintptr(LTP_shp_buf_idx)*2)) = func() int16 {
 13563  			if (func() int32 {
 13564  				if (12) == 1 {
 13565  					return (((sLF_MA_shp_Q12) >> 1) + ((sLF_MA_shp_Q12) & 1))
 13566  				}
 13567  				return ((((sLF_MA_shp_Q12) >> ((12) - 1)) + 1) >> 1)
 13568  			}()) > 0x7FFF {
 13569  				return int16(0x7FFF)
 13570  			}
 13571  			return func() int16 {
 13572  				if (func() int32 {
 13573  					if (12) == 1 {
 13574  						return (((sLF_MA_shp_Q12) >> 1) + ((sLF_MA_shp_Q12) & 1))
 13575  					}
 13576  					return ((((sLF_MA_shp_Q12) >> ((12) - 1)) + 1) >> 1)
 13577  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 13578  					return libc.Int16FromInt32(0x8000)
 13579  				}
 13580  				return func() int16 {
 13581  					if (12) == 1 {
 13582  						return (int16(((sLF_MA_shp_Q12) >> 1) + ((sLF_MA_shp_Q12) & 1)))
 13583  					}
 13584  					return (int16((((sLF_MA_shp_Q12) >> ((12) - 1)) + 1) >> 1))
 13585  				}()
 13586  			}()
 13587  		}()
 13588  
 13589  		*(*int16)(unsafe.Pointer(xw + uintptr(i)*2)) = func() int16 {
 13590  			if (func() int32 {
 13591  				if (12) == 1 {
 13592  					return ((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> 1) + (((sLF_MA_shp_Q12) - (n_LTP_Q12)) & 1))
 13593  				}
 13594  				return (((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> ((12) - 1)) + 1) >> 1)
 13595  			}()) > 0x7FFF {
 13596  				return int16(0x7FFF)
 13597  			}
 13598  			return func() int16 {
 13599  				if (func() int32 {
 13600  					if (12) == 1 {
 13601  						return ((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> 1) + (((sLF_MA_shp_Q12) - (n_LTP_Q12)) & 1))
 13602  					}
 13603  					return (((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> ((12) - 1)) + 1) >> 1)
 13604  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 13605  					return libc.Int16FromInt32(0x8000)
 13606  				}
 13607  				return func() int16 {
 13608  					if (12) == 1 {
 13609  						return (int16((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> 1) + (((sLF_MA_shp_Q12) - (n_LTP_Q12)) & 1)))
 13610  					}
 13611  					return (int16(((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> ((12) - 1)) + 1) >> 1))
 13612  				}()
 13613  			}()
 13614  		}()
 13615  	}
 13616  
 13617  	/* Copy temp variable back to state */
 13618  	(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLF_AR_shp_Q12 = sLF_AR_shp_Q12
 13619  	(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLF_MA_shp_Q12 = sLF_MA_shp_Q12
 13620  	(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLTP_shp_buf_idx = LTP_shp_buf_idx
 13621  }
 13622  
 13623  /***********************************************************************
 13624  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 13625  Redistribution and use in source and binary forms, with or without
 13626  modification, (subject to the limitations in the disclaimer below)
 13627  are permitted provided that the following conditions are met:
 13628  - Redistributions of source code must retain the above copyright notice,
 13629  this list of conditions and the following disclaimer.
 13630  - Redistributions in binary form must reproduce the above copyright
 13631  notice, this list of conditions and the following disclaimer in the
 13632  documentation and/or other materials provided with the distribution.
 13633  - Neither the name of Skype Limited, nor the names of specific
 13634  contributors, may be used to endorse or promote products derived from
 13635  this software without specific prior written permission.
 13636  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 13637  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 13638  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 13639  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 13640  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 13641  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 13642  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 13643  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 13644  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 13645  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 13646  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 13647  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 13648  ***********************************************************************/
 13649  
 13650  /*******************/
 13651  /* Pitch estimator */
 13652  /*******************/
 13653  
 13654  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
 13655  
 13656  /* Bandwidth expansion for whitening filter in pitch analysis */
 13657  
 13658  /* Threshold used by pitch estimator for early escape */
 13659  
 13660  /*********************/
 13661  /* Linear prediction */
 13662  /*********************/
 13663  
 13664  /* LPC analysis defines: regularization and bandwidth expansion */
 13665  
 13666  /* LTP analysis defines */
 13667  
 13668  /* LTP quantization settings */
 13669  
 13670  /***********************/
 13671  /* High pass filtering */
 13672  /***********************/
 13673  
 13674  /* Smoothing parameters for low end of pitch frequency range estimation */
 13675  
 13676  /* Min and max values for low end of pitch frequency range estimation */
 13677  
 13678  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
 13679  
 13680  /***********/
 13681  /* Various */
 13682  /***********/
 13683  
 13684  /* Required speech activity for counting frame as active */
 13685  
 13686  /* Speech Activity LBRR enable threshold (needs tuning) */
 13687  
 13688  /*************************/
 13689  /* Perceptual parameters */
 13690  /*************************/
 13691  
 13692  /* reduction in coding SNR during low speech activity */
 13693  
 13694  /* factor for reducing quantization noise during voiced speech */
 13695  
 13696  /* factor for reducing quantization noise for unvoiced sparse signals */
 13697  
 13698  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
 13699  
 13700  /* warping control */
 13701  
 13702  /* fraction added to first autocorrelation value */
 13703  
 13704  /* noise shaping filter chirp factor */
 13705  
 13706  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
 13707  
 13708  /* gain reduction for fricatives */
 13709  
 13710  /* extra harmonic boosting (signal shaping) at low bitrates */
 13711  
 13712  /* extra harmonic boosting (signal shaping) for noisy input signals */
 13713  
 13714  /* harmonic noise shaping */
 13715  
 13716  /* extra harmonic noise shaping for high bitrates or noisy input */
 13717  
 13718  /* parameter for shaping noise towards higher frequencies */
 13719  
 13720  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
 13721  
 13722  /* parameter for applying a high-pass tilt to the input signal */
 13723  
 13724  /* parameter for extra high-pass tilt to the input signal at high rates */
 13725  
 13726  /* parameter for reducing noise at the very low frequencies */
 13727  
 13728  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
 13729  
 13730  /* noise floor to put a lower limit on the quantization step size */
 13731  
 13732  /* noise floor relative to active speech gain level */
 13733  
 13734  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
 13735  
 13736  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
 13737  
 13738  /* parameters defining the R/D tradeoff in the residual quantizer */
 13739  
 13740  /* Processing of gains */
 13741  func SKP_Silk_process_gains_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr) { /* SKP_Silk_process_gains_FIX.c:32:6: */
 13742  	var psShapeSt uintptr = (psEnc + 19576 /* &.sShape */)
 13743  	var k int32
 13744  	var s_Q16 int32
 13745  	var InvMaxSqrVal_Q16 int32
 13746  	var gain int32
 13747  	var gain_squared int32
 13748  	var ResNrg int32
 13749  	var ResNrgPart int32
 13750  	var quant_offset_Q10 int32
 13751  
 13752  	/* Gain reduction when LTP coding gain is high */
 13753  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 13754  		/*s = -0.5f * SKP_sigmoid( 0.25f * ( psEncCtrl->LTPredCodGain - 12.0f ) ); */
 13755  		s_Q16 = -SKP_Silk_sigm_Q15(tls, func() int32 {
 13756  			if (4) == 1 {
 13757  				return ((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 - SKP_FIX_CONST(tls, 12.0, 7)) >> 1) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 - SKP_FIX_CONST(tls, 12.0, 7)) & 1))
 13758  			}
 13759  			return (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 - SKP_FIX_CONST(tls, 12.0, 7)) >> ((4) - 1)) + 1) >> 1)
 13760  		}())
 13761  		for k = 0; k < 4; k++ {
 13762  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) >> 16) * (int32(int16(s_Q16)))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(s_Q16)))) >> 16)))
 13763  		}
 13764  	}
 13765  
 13766  	/* Limit the quantized signal */
 13767  	InvMaxSqrVal_Q16 = ((SKP_Silk_log2lin(tls, ((((SKP_FIX_CONST(tls, 70.0, 7) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.33, 16))))) + ((((SKP_FIX_CONST(tls, 70.0, 7) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.33, 16))))) >> 16)))) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
 13768  
 13769  	for k = 0; k < 4; k++ {
 13770  		/* Soft limit on ratio residual energy and squared gains */
 13771  		ResNrg = *(*int32)(unsafe.Pointer((psEncCtrl + 640 /* &.ResNrg */) + uintptr(k)*4))
 13772  		ResNrgPart = (((((ResNrg) >> 16) * (int32(int16(InvMaxSqrVal_Q16)))) + ((((ResNrg) & 0x0000FFFF) * (int32(int16(InvMaxSqrVal_Q16)))) >> 16)) + ((ResNrg) * (func() int32 {
 13773  			if (16) == 1 {
 13774  				return (((InvMaxSqrVal_Q16) >> 1) + ((InvMaxSqrVal_Q16) & 1))
 13775  			}
 13776  			return ((((InvMaxSqrVal_Q16) >> ((16) - 1)) + 1) >> 1)
 13777  		}())))
 13778  		if *(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4)) > 0 {
 13779  			if *(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4)) < 32 {
 13780  				ResNrgPart = func() int32 {
 13781  					if (*(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4))) == 1 {
 13782  						return (((ResNrgPart) >> 1) + ((ResNrgPart) & 1))
 13783  					}
 13784  					return ((((ResNrgPart) >> ((*(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4))) - 1)) + 1) >> 1)
 13785  				}()
 13786  			} else {
 13787  				ResNrgPart = 0
 13788  			}
 13789  		} else if *(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4)) != 0 {
 13790  			if ResNrgPart > (int32((0x7FFFFFFF)) >> (-*(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4)))) {
 13791  				ResNrgPart = 0x7FFFFFFF
 13792  			} else {
 13793  				ResNrgPart = ((ResNrgPart) << (-*(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4))))
 13794  			}
 13795  		}
 13796  		gain = *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))
 13797  		gain_squared = func() int32 {
 13798  			if ((uint32((ResNrgPart) + (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))) & 0x80000000) == uint32(0) {
 13799  				return func() int32 {
 13800  					if ((uint32((ResNrgPart) & (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))) & 0x80000000) != uint32(0) {
 13801  						return libc.Int32FromUint32(0x80000000)
 13802  					}
 13803  					return ((ResNrgPart) + (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))
 13804  				}()
 13805  			}
 13806  			return func() int32 {
 13807  				if ((uint32((ResNrgPart) | (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))) & 0x80000000) == uint32(0) {
 13808  					return 0x7FFFFFFF
 13809  				}
 13810  				return ((ResNrgPart) + (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))
 13811  			}()
 13812  		}()
 13813  		if gain_squared < 0x7FFF {
 13814  			/* recalculate with higher precision */
 13815  			gain_squared = ((((ResNrgPart) << (16)) + ((((gain) >> 16) * (int32(int16(gain)))) + ((((gain) & 0x0000FFFF) * (int32(int16(gain)))) >> 16))) + ((gain) * (func() int32 {
 13816  				if (16) == 1 {
 13817  					return (((gain) >> 1) + ((gain) & 1))
 13818  				}
 13819  				return ((((gain) >> ((16) - 1)) + 1) >> 1)
 13820  			}())))
 13821  
 13822  			gain = SKP_Silk_SQRT_APPROX(tls, gain_squared) /* Q8   */
 13823  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = ((func() int32 {
 13824  				if (int32((libc.Int32FromUint32(0x80000000))) >> (8)) > (int32((0x7FFFFFFF)) >> (8)) {
 13825  					return func() int32 {
 13826  						if (gain) > (int32((libc.Int32FromUint32(0x80000000))) >> (8)) {
 13827  							return (int32((libc.Int32FromUint32(0x80000000))) >> (8))
 13828  						}
 13829  						return func() int32 {
 13830  							if (gain) < (int32((0x7FFFFFFF)) >> (8)) {
 13831  								return (int32((0x7FFFFFFF)) >> (8))
 13832  							}
 13833  							return gain
 13834  						}()
 13835  					}()
 13836  				}
 13837  				return func() int32 {
 13838  					if (gain) > (int32((0x7FFFFFFF)) >> (8)) {
 13839  						return (int32((0x7FFFFFFF)) >> (8))
 13840  					}
 13841  					return func() int32 {
 13842  						if (gain) < (int32((libc.Int32FromUint32(0x80000000))) >> (8)) {
 13843  							return (int32((libc.Int32FromUint32(0x80000000))) >> (8))
 13844  						}
 13845  						return gain
 13846  					}()
 13847  				}()
 13848  			}()) << (8)) /* Q16  */
 13849  		} else {
 13850  			gain = SKP_Silk_SQRT_APPROX(tls, gain_squared) /* Q0   */
 13851  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = ((func() int32 {
 13852  				if (int32((libc.Int32FromUint32(0x80000000))) >> (16)) > (int32((0x7FFFFFFF)) >> (16)) {
 13853  					return func() int32 {
 13854  						if (gain) > (int32((libc.Int32FromUint32(0x80000000))) >> (16)) {
 13855  							return (int32((libc.Int32FromUint32(0x80000000))) >> (16))
 13856  						}
 13857  						return func() int32 {
 13858  							if (gain) < (int32((0x7FFFFFFF)) >> (16)) {
 13859  								return (int32((0x7FFFFFFF)) >> (16))
 13860  							}
 13861  							return gain
 13862  						}()
 13863  					}()
 13864  				}
 13865  				return func() int32 {
 13866  					if (gain) > (int32((0x7FFFFFFF)) >> (16)) {
 13867  						return (int32((0x7FFFFFFF)) >> (16))
 13868  					}
 13869  					return func() int32 {
 13870  						if (gain) < (int32((libc.Int32FromUint32(0x80000000))) >> (16)) {
 13871  							return (int32((libc.Int32FromUint32(0x80000000))) >> (16))
 13872  						}
 13873  						return gain
 13874  					}()
 13875  				}()
 13876  			}()) << (16)) /* Q16  */
 13877  		}
 13878  	}
 13879  
 13880  	/* Noise shaping quantization */
 13881  	SKP_Silk_gains_quant(tls, psEncCtrl /* &.sCmn */ +72 /* &.GainsIndices */, psEncCtrl+128, /* &.Gains_Q16 */
 13882  		(psShapeSt /* &.LastGainIndex */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf)
 13883  	/* Set quantizer offset for voiced signals. Larger offset when LTP coding gain is low or tilt is high (ie low-pass) */
 13884  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 13885  		if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) >> (8))) > SKP_FIX_CONST(tls, 1.0, 7) {
 13886  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 0
 13887  		} else {
 13888  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 1
 13889  		}
 13890  	}
 13891  
 13892  	/* Quantizer boundary adjustment */
 13893  	quant_offset_Q10 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Quantization_Offsets_Q10)) + uintptr((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype)*4) + uintptr((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType)*2)))
 13894  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLambda_Q10 = (((((SKP_FIX_CONST(tls, 1.2, 10) +
 13895  		((int32(int16(SKP_FIX_CONST(tls, float64(-0.05), 10)))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnStatesDelayedDecision))))) +
 13896  		((((SKP_FIX_CONST(tls, float64(-0.3), 18)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((SKP_FIX_CONST(tls, float64(-0.3), 18)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16))) +
 13897  		((((SKP_FIX_CONST(tls, float64(-0.2), 12)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) + ((((SKP_FIX_CONST(tls, float64(-0.2), 12)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) >> 16))) +
 13898  		((((SKP_FIX_CONST(tls, float64(-0.1), 12)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) + ((((SKP_FIX_CONST(tls, float64(-0.1), 12)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) >> 16))) +
 13899  		((((SKP_FIX_CONST(tls, 1.5, 16)) >> 16) * (int32(int16(quant_offset_Q10)))) + ((((SKP_FIX_CONST(tls, 1.5, 16)) & 0x0000FFFF) * (int32(int16(quant_offset_Q10)))) >> 16)))
 13900  
 13901  }
 13902  
 13903  /* Limit, stabilize, convert and quantize NLSFs.    */
 13904  func SKP_Silk_process_NLSFs_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, pNLSF_Q15 uintptr) { /* SKP_Silk_process_NLSFs_FIX.c:31:6: */
 13905  	bp := tls.Alloc(192)
 13906  	defer tls.Free(192)
 13907  
 13908  	var doInterpolate int32
 13909  	// var pNLSFW_Q6 [16]int32 at bp, 64
 13910  
 13911  	var NLSF_mu_Q15 int32
 13912  	var NLSF_mu_fluc_red_Q16 int32
 13913  	var i_sqr_Q15 int32
 13914  	var psNLSF_CB uintptr
 13915  
 13916  	/* Used only for NLSF interpolation */
 13917  	// var pNLSF0_temp_Q15 [16]int32 at bp+64, 64
 13918  
 13919  	// var pNLSFW0_temp_Q6 [16]int32 at bp+128, 64
 13920  
 13921  	var i int32
 13922  
 13923  	/***********************/
 13924  	/* Calculate mu values */
 13925  	/***********************/
 13926  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 13927  		/* NLSF_mu           = 0.002f - 0.001f * psEnc->speech_activity; */
 13928  		/* NLSF_mu_fluc_red  = 0.1f   - 0.05f  * psEnc->speech_activity; */
 13929  		NLSF_mu_Q15 = ((66) + (((int32((-8388)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((-8388) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))
 13930  		NLSF_mu_fluc_red_Q16 = ((6554) + (((int32((-838848)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((-838848) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))
 13931  	} else {
 13932  		/* NLSF_mu           = 0.005f - 0.004f * psEnc->speech_activity; */
 13933  		/* NLSF_mu_fluc_red  = 0.2f   - 0.1f   * psEnc->speech_activity - 0.1f * psEncCtrl->sparseness; */
 13934  		NLSF_mu_Q15 = ((164) + (((int32((-33554)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((-33554) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))
 13935  		NLSF_mu_fluc_red_Q16 = ((13107) + (((int32((-1677696)) >> 16) * (int32((int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) + ((((-1677696) & 0x0000FFFF) * (int32((int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> 16)))
 13936  	}
 13937  
 13938  	NLSF_mu_Q15 = func() int32 {
 13939  		if (NLSF_mu_Q15) > (1) {
 13940  			return NLSF_mu_Q15
 13941  		}
 13942  		return 1
 13943  	}()
 13944  
 13945  	/* Calculate NLSF weights */
 13946  
 13947  	SKP_Silk_NLSF_VQ_weights_laroia(tls, bp /* &pNLSFW_Q6[0] */, pNLSF_Q15, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13948  
 13949  	/* Update NLSF weights for interpolated NLSFs */
 13950  	doInterpolate = (libc.Bool32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInterpolatedNLSFs == 1) && ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2 < (int32(1) << 2))))
 13951  	if doInterpolate != 0 {
 13952  
 13953  		/* Calculate the interpolated NLSF vector for the first half */
 13954  		SKP_Silk_interpolate(tls, bp+64 /* &pNLSF0_temp_Q15[0] */, psEnc+20708 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, pNLSF_Q15,
 13955  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13956  
 13957  		/* Calculate first half NLSF weights for the interpolated NLSFs */
 13958  
 13959  		SKP_Silk_NLSF_VQ_weights_laroia(tls, bp+128 /* &pNLSFW0_temp_Q6[0] */, bp+64 /* &pNLSF0_temp_Q15[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13960  
 13961  		/* Update NLSF weights with contribution from first half */
 13962  		i_sqr_Q15 = (((int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2))) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2)))) << (11))
 13963  		for i = 0; i < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder; i++ {
 13964  			*(*int32)(unsafe.Pointer(bp /* &pNLSFW_Q6[0] */ + uintptr(i)*4)) = (((*(*int32)(unsafe.Pointer(bp /* &pNLSFW_Q6[0] */ + uintptr(i)*4))) >> (1)) + ((((*(*int32)(unsafe.Pointer(bp + 128 /* &pNLSFW0_temp_Q6[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(i_sqr_Q15)))) + ((((*(*int32)(unsafe.Pointer(bp + 128 /* &pNLSFW0_temp_Q6[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(i_sqr_Q15)))) >> 16)))
 13965  
 13966  		}
 13967  	}
 13968  
 13969  	/* Set pointer to the NLSF codebook for the current signal type and LPC order */
 13970  	psNLSF_CB = *(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */) + uintptr((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype)*8))
 13971  
 13972  	/* Quantize NLSF parameters given the trained NLSF codebooks */
 13973  
 13974  	SKP_Silk_NLSF_MSVQ_encode_FIX(tls, psEncCtrl /* &.sCmn */ +28 /* &.NLSFIndices */, pNLSF_Q15, psNLSF_CB,
 13975  		psEnc+20708 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, bp /* &pNLSFW_Q6[0] */, NLSF_mu_Q15, NLSF_mu_fluc_red_Q16,
 13976  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FNLSF_MSVQ_Survivors, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset)
 13977  
 13978  	/* Convert quantized NLSFs back to LPC coefficients */
 13979  	SKP_Silk_NLSF2A_stable(tls, ((psEncCtrl + 144 /* &.PredCoef_Q12 */) + 1*32), pNLSF_Q15, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13980  
 13981  	if doInterpolate != 0 {
 13982  		/* Calculate the interpolated, quantized LSF vector for the first half */
 13983  		SKP_Silk_interpolate(tls, bp+64 /* &pNLSF0_temp_Q15[0] */, psEnc+20708 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, pNLSF_Q15,
 13984  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13985  
 13986  		/* Convert back to LPC coefficients */
 13987  		SKP_Silk_NLSF2A_stable(tls, (psEncCtrl + 144 /* &.PredCoef_Q12 */), bp+64 /* &pNLSF0_temp_Q15[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13988  
 13989  	} else {
 13990  		/* Copy LPC coefficients for first half from second half */
 13991  		libc.Xmemcpy(tls, (psEncCtrl + 144 /* &.PredCoef_Q12 */), ((psEncCtrl + 144 /* &.PredCoef_Q12 */) + 1*32), (uint64((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder) * uint64(unsafe.Sizeof(int16(0)))))
 13992  	}
 13993  }
 13994  
 13995  func SKP_Silk_quant_LTP_gains_FIX(tls *libc.TLS, B_Q14 uintptr, cbk_index uintptr, periodicity_index uintptr, W_Q18 uintptr, mu_Q8 int32, lowComplexity int32) { /* SKP_Silk_quant_LTP_gains_FIX.c:30:6: */
 13996  	bp := tls.Alloc(20)
 13997  	defer tls.Free(20)
 13998  
 13999  	var j int32
 14000  	var k int32
 14001  	// var temp_idx [4]int32 at bp, 16
 14002  
 14003  	var cbk_size int32
 14004  	var cl_ptr uintptr
 14005  	var cbk_ptr_Q14 uintptr
 14006  	var b_Q14_ptr uintptr
 14007  	var W_Q18_ptr uintptr
 14008  	// var rate_dist_subfr int32 at bp+16, 4
 14009  
 14010  	var rate_dist int32
 14011  	var min_rate_dist int32
 14012  
 14013  	/***************************************************/
 14014  	/* iterate over different codebooks with different */
 14015  	/* rates/distortions, and choose best */
 14016  	/***************************************************/
 14017  	min_rate_dist = 0x7FFFFFFF
 14018  	for k = 0; k < 3; k++ {
 14019  		cl_ptr = SKP_Silk_LTP_gain_BITS_Q6_ptrs[k]
 14020  		cbk_ptr_Q14 = SKP_Silk_LTP_vq_ptrs_Q14[k]
 14021  		cbk_size = SKP_Silk_LTP_vq_sizes[k]
 14022  
 14023  		/* Setup pointer to first subframe */
 14024  		W_Q18_ptr = W_Q18
 14025  		b_Q14_ptr = B_Q14
 14026  
 14027  		rate_dist = 0
 14028  		for j = 0; j < 4; j++ {
 14029  
 14030  			SKP_Silk_VQ_WMat_EC_FIX(tls,
 14031  				(bp /* &temp_idx */ + uintptr(j)*4), /* O    index of best codebook vector                           */
 14032  				bp+16,                               /* &rate_dist_subfr */ /* O    best weighted quantization error + mu * rate            */
 14033  				b_Q14_ptr,                           /* I    input vector to be quantized                            */
 14034  				W_Q18_ptr,                           /* I    weighting matrix                                        */
 14035  				cbk_ptr_Q14,                         /* I    codebook                                                */
 14036  				cl_ptr,                              /* I    code length for each codebook vector                    */
 14037  				mu_Q8,                               /* I    tradeoff between weighted error and rate                */
 14038  				cbk_size)
 14039  
 14040  			rate_dist = func() int32 {
 14041  				if ((uint32((rate_dist) + (*(*int32)(unsafe.Pointer(bp + 16 /* rate_dist_subfr */))))) & 0x80000000) != 0 {
 14042  					return 0x7FFFFFFF
 14043  				}
 14044  				return ((rate_dist) + (*(*int32)(unsafe.Pointer(bp + 16 /* rate_dist_subfr */))))
 14045  			}()
 14046  
 14047  			b_Q14_ptr += 2 * (uintptr(5))
 14048  			W_Q18_ptr += 4 * (uintptr(5 * 5))
 14049  		}
 14050  
 14051  		/* Avoid never finding a codebook */
 14052  		rate_dist = func() int32 {
 14053  			if (0x7FFFFFFF - 1) < (rate_dist) {
 14054  				return (0x7FFFFFFF - 1)
 14055  			}
 14056  			return rate_dist
 14057  		}()
 14058  
 14059  		if rate_dist < min_rate_dist {
 14060  			min_rate_dist = rate_dist
 14061  			libc.Xmemcpy(tls, cbk_index, bp /* &temp_idx[0] */, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
 14062  			*(*int32)(unsafe.Pointer(periodicity_index)) = k
 14063  		}
 14064  
 14065  		/* Break early in low-complexity mode if rate distortion is below threshold */
 14066  		if (lowComplexity != 0) && (rate_dist < SKP_Silk_LTP_gain_middle_avg_RD_Q14) {
 14067  			break
 14068  		}
 14069  	}
 14070  
 14071  	cbk_ptr_Q14 = SKP_Silk_LTP_vq_ptrs_Q14[*(*int32)(unsafe.Pointer(periodicity_index))]
 14072  	for j = 0; j < 4; j++ {
 14073  		for k = 0; k < 5; k++ {
 14074  			*(*int16)(unsafe.Pointer(B_Q14 + uintptr(((j*5)+k))*2)) = *(*int16)(unsafe.Pointer(cbk_ptr_Q14 + uintptr(((k)+((*(*int32)(unsafe.Pointer(cbk_index + uintptr(j)*4)))*(5))))*2))
 14075  		}
 14076  	}
 14077  }
 14078  
 14079  /* Range encoder for one symbol */
 14080  func SKP_Silk_range_encoder(tls *libc.TLS, psRC uintptr, data int32, prob uintptr) { /* SKP_Silk_range_coder.c:31:6: */
 14081  	var low_Q16 uint32
 14082  	var high_Q16 uint32
 14083  	var base_tmp uint32
 14084  	var range_Q32 uint32
 14085  
 14086  	/* Copy structure data */
 14087  	var base_Q32 uint32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32
 14088  	var range_Q16 uint32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16
 14089  	var bufferIx int32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx
 14090  	var buffer uintptr = psRC + 20 /* &.buffer */
 14091  
 14092  	if (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror != 0 {
 14093  		return
 14094  	}
 14095  
 14096  	/* Update interval */
 14097  	low_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr(data)*2)))
 14098  	high_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr((data+1))*2)))
 14099  	base_tmp = base_Q32 /* save current base, to test for carry */
 14100  	base_Q32 = base_Q32 + ((range_Q16) * (low_Q16))
 14101  	range_Q32 = ((range_Q16) * (high_Q16 - low_Q16))
 14102  
 14103  	/* Check for carry */
 14104  	if base_Q32 < base_tmp {
 14105  		/* Propagate carry in buffer */
 14106  		var bufferIx_tmp int32 = bufferIx
 14107  		for (int32(libc.PreIncUint8(&*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PreDecInt32(&bufferIx_tmp, 1)))), 1))) == 0 {
 14108  		}
 14109  	}
 14110  
 14111  	/* Check normalization */
 14112  	if (range_Q32 & 0xFF000000) != 0 {
 14113  		/* No normalization */
 14114  		range_Q16 = ((range_Q32) >> (16))
 14115  	} else {
 14116  		if (range_Q32 & 0xFFFF0000) != 0 {
 14117  			/* Normalization of 8 bits shift */
 14118  			range_Q16 = ((range_Q32) >> (8))
 14119  		} else {
 14120  			/* Normalization of 16 bits shift */
 14121  			range_Q16 = range_Q32
 14122  			/* Make sure not to write beyond buffer */
 14123  			if bufferIx >= (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14124  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -1
 14125  				return
 14126  			}
 14127  			/* Write one byte to buffer */
 14128  			*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PostIncInt32(&bufferIx, 1)))) = (uint8((base_Q32) >> (24)))
 14129  			base_Q32 = ((base_Q32) << (8))
 14130  		}
 14131  		/* Make sure not to write beyond buffer */
 14132  		if bufferIx >= (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14133  			(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -1
 14134  			return
 14135  		}
 14136  		/* Write one byte to buffer */
 14137  		*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PostIncInt32(&bufferIx, 1)))) = (uint8((base_Q32) >> (24)))
 14138  		base_Q32 = ((base_Q32) << (8))
 14139  	}
 14140  
 14141  	/* Copy structure data back */
 14142  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32 = base_Q32
 14143  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16 = range_Q16
 14144  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx = bufferIx
 14145  }
 14146  
 14147  /* Range encoder for multiple symbols */
 14148  func SKP_Silk_range_encoder_multi(tls *libc.TLS, psRC uintptr, data uintptr, prob uintptr, nSymbols int32) { /* SKP_Silk_range_coder.c:101:6: */
 14149  	var k int32
 14150  	for k = 0; k < nSymbols; k++ {
 14151  		SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer(data + uintptr(k)*4)), *(*uintptr)(unsafe.Pointer(prob + uintptr(k)*8)))
 14152  	}
 14153  }
 14154  
 14155  /* Range decoder for one symbol */
 14156  func SKP_Silk_range_decoder(tls *libc.TLS, data uintptr, psRC uintptr, prob uintptr, probIx int32) { /* SKP_Silk_range_coder.c:115:6: */
 14157  	var low_Q16 uint32
 14158  	var high_Q16 uint32
 14159  	var base_tmp uint32
 14160  	var range_Q32 uint32
 14161  
 14162  	/* Copy structure data */
 14163  	var base_Q32 uint32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32
 14164  	var range_Q16 uint32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16
 14165  	var bufferIx int32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx
 14166  	var buffer uintptr = ((psRC + 20 /* &.buffer */) + 4)
 14167  
 14168  	if (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror != 0 {
 14169  		/* Set output to zero */
 14170  		*(*int32)(unsafe.Pointer(data)) = 0
 14171  		return
 14172  	}
 14173  
 14174  	high_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr(probIx)*2)))
 14175  	base_tmp = ((range_Q16) * (high_Q16))
 14176  	if base_tmp > base_Q32 {
 14177  		for 1 != 0 {
 14178  			low_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr(libc.PreDecInt32(&probIx, 1))*2)))
 14179  			base_tmp = ((range_Q16) * (low_Q16))
 14180  			if base_tmp <= base_Q32 {
 14181  				break
 14182  			}
 14183  			high_Q16 = low_Q16
 14184  			/* Test for out of range */
 14185  			if high_Q16 == uint32(0) {
 14186  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -2
 14187  				/* Set output to zero */
 14188  				*(*int32)(unsafe.Pointer(data)) = 0
 14189  				return
 14190  			}
 14191  		}
 14192  	} else {
 14193  		for 1 != 0 {
 14194  			low_Q16 = high_Q16
 14195  			high_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr(libc.PreIncInt32(&probIx, 1))*2)))
 14196  			base_tmp = ((range_Q16) * (high_Q16))
 14197  			if base_tmp > base_Q32 {
 14198  				probIx--
 14199  				break
 14200  			}
 14201  			/* Test for out of range */
 14202  			if high_Q16 == uint32(0xFFFF) {
 14203  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -2
 14204  				/* Set output to zero */
 14205  				*(*int32)(unsafe.Pointer(data)) = 0
 14206  				return
 14207  			}
 14208  		}
 14209  	}
 14210  	*(*int32)(unsafe.Pointer(data)) = probIx
 14211  	base_Q32 = base_Q32 - ((range_Q16) * (low_Q16))
 14212  	range_Q32 = ((range_Q16) * (high_Q16 - low_Q16))
 14213  
 14214  	/* Check normalization */
 14215  	if (range_Q32 & 0xFF000000) != 0 {
 14216  		/* No normalization */
 14217  		range_Q16 = ((range_Q32) >> (16))
 14218  	} else {
 14219  		if (range_Q32 & 0xFFFF0000) != 0 {
 14220  			/* Normalization of 8 bits shift */
 14221  			range_Q16 = ((range_Q32) >> (8))
 14222  			/* Check for errors */
 14223  			if ((base_Q32) >> (24)) != 0 {
 14224  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -3
 14225  				/* Set output to zero */
 14226  				*(*int32)(unsafe.Pointer(data)) = 0
 14227  				return
 14228  			}
 14229  		} else {
 14230  			/* Normalization of 16 bits shift */
 14231  			range_Q16 = range_Q32
 14232  			/* Check for errors */
 14233  			if ((base_Q32) >> (16)) != 0 {
 14234  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -3
 14235  				/* Set output to zero */
 14236  				*(*int32)(unsafe.Pointer(data)) = 0
 14237  				return
 14238  			}
 14239  			/* Update base */
 14240  			base_Q32 = ((base_Q32) << (8))
 14241  			/* Make sure not to read beyond buffer */
 14242  			if bufferIx < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14243  				/* Read one byte from buffer */
 14244  				base_Q32 = base_Q32 | (uint32(*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PostIncInt32(&bufferIx, 1))))))
 14245  			}
 14246  		}
 14247  		/* Update base */
 14248  		base_Q32 = ((base_Q32) << (8))
 14249  		/* Make sure not to read beyond buffer */
 14250  		if bufferIx < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14251  			/* Read one byte from buffer */
 14252  			base_Q32 = base_Q32 | (uint32(*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PostIncInt32(&bufferIx, 1))))))
 14253  		}
 14254  	}
 14255  
 14256  	/* Check for zero interval length */
 14257  	if range_Q16 == uint32(0) {
 14258  		(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -4
 14259  		/* Set output to zero */
 14260  		*(*int32)(unsafe.Pointer(data)) = 0
 14261  		return
 14262  	}
 14263  
 14264  	/* Copy structure data back */
 14265  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32 = base_Q32
 14266  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16 = range_Q16
 14267  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx = bufferIx
 14268  }
 14269  
 14270  /* Range decoder for multiple symbols */
 14271  func SKP_Silk_range_decoder_multi(tls *libc.TLS, data uintptr, psRC uintptr, prob uintptr, probStartIx uintptr, nSymbols int32) { /* SKP_Silk_range_coder.c:234:6: */
 14272  	var k int32
 14273  	for k = 0; k < nSymbols; k++ {
 14274  		SKP_Silk_range_decoder(tls, (data + uintptr(k)*4), psRC, *(*uintptr)(unsafe.Pointer(prob + uintptr(k)*8)), *(*int32)(unsafe.Pointer(probStartIx + uintptr(k)*4)))
 14275  	}
 14276  }
 14277  
 14278  /* Initialize range encoder */
 14279  func SKP_Silk_range_enc_init(tls *libc.TLS, psRC uintptr) { /* SKP_Silk_range_coder.c:249:6: */
 14280  	/* Initialize structure */
 14281  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength = 1024
 14282  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16 = uint32(0x0000FFFF)
 14283  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx = 0
 14284  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32 = uint32(0)
 14285  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = 0
 14286  }
 14287  
 14288  /* Initialize range decoder */
 14289  func SKP_Silk_range_dec_init(tls *libc.TLS, psRC uintptr, buffer uintptr, bufferLength int32) { /* SKP_Silk_range_coder.c:262:6: */
 14290  	/* check input */
 14291  	if (bufferLength > 1024) || (bufferLength < 0) {
 14292  		(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -8
 14293  		return
 14294  	}
 14295  	/* Initialize structure */
 14296  	/* Copy to internal buffer */
 14297  	libc.Xmemcpy(tls, psRC+20 /* &.buffer */, buffer, (uint64(bufferLength) * uint64(unsafe.Sizeof(uint8(0)))))
 14298  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength = bufferLength
 14299  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx = 0
 14300  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32 = (((((uint32(*(*uint8)(unsafe.Pointer(buffer)))) << (24)) | ((uint32(*(*uint8)(unsafe.Pointer(buffer + 1)))) << (16))) | ((uint32(*(*uint8)(unsafe.Pointer(buffer + 2)))) << (8))) | uint32(*(*uint8)(unsafe.Pointer(buffer + 3))))
 14301  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16 = uint32(0x0000FFFF)
 14302  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = 0
 14303  }
 14304  
 14305  /* Determine length of bitstream */
 14306  func SKP_Silk_range_coder_get_length(tls *libc.TLS, psRC uintptr, nBytes uintptr) int32 { /* SKP_Silk_range_coder.c:288:9: */
 14307  	var nBits int32
 14308  
 14309  	/* Number of bits in stream */
 14310  	nBits = (((((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx) << (3)) + SKP_Silk_CLZ32(tls, (int32((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16-uint32(1))))) - 14)
 14311  
 14312  	*(*int32)(unsafe.Pointer(nBytes)) = ((nBits + 7) >> (3))
 14313  
 14314  	/* Return number of bits in bitstream */
 14315  	return nBits
 14316  }
 14317  
 14318  /* Write shortest uniquely decodable stream to buffer, and determine its length */
 14319  func SKP_Silk_range_enc_wrap_up(tls *libc.TLS, psRC uintptr) { /* SKP_Silk_range_coder.c:305:6: */
 14320  	bp := tls.Alloc(4)
 14321  	defer tls.Free(4)
 14322  
 14323  	var bufferIx_tmp int32
 14324  	var bits_to_store int32
 14325  	var bits_in_stream int32
 14326  	// var nBytes int32 at bp, 4
 14327  
 14328  	var mask int32
 14329  	var base_Q24 uint32
 14330  
 14331  	/* Lower limit of interval, shifted 8 bits to the right */
 14332  	base_Q24 = (((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32) >> (8))
 14333  
 14334  	bits_in_stream = SKP_Silk_range_coder_get_length(tls, psRC, bp /* &nBytes */)
 14335  
 14336  	/* Number of additional bits (1..9) required to be stored to stream */
 14337  	bits_to_store = (bits_in_stream - (((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx) << (3)))
 14338  	/* Round up to required resolution */
 14339  	base_Q24 = base_Q24 + (uint32(int32((0x00800000)) >> (bits_to_store - 1)))
 14340  	base_Q24 = base_Q24 & (uint32((0xFFFFFFFF)) << (24 - bits_to_store))
 14341  
 14342  	/* Check for carry */
 14343  	if (base_Q24 & uint32(0x01000000)) != 0 {
 14344  		/* Propagate carry in buffer */
 14345  		bufferIx_tmp = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx
 14346  		for (int32(libc.PreIncUint8(&(*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr(libc.PreDecInt32(&bufferIx_tmp, 1))))), 1))) == 0 {
 14347  		}
 14348  	}
 14349  
 14350  	/* Store to stream, making sure not to write beyond buffer */
 14351  	if (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14352  		*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr(libc.PostIncInt32(&(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx, 1)))) = (uint8((base_Q24) >> (16)))
 14353  		if bits_to_store > 8 {
 14354  			if (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14355  				*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr(libc.PostIncInt32(&(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx, 1)))) = (uint8((base_Q24) >> (8)))
 14356  			}
 14357  		}
 14358  	}
 14359  
 14360  	/* Fill up any remaining bits in the last byte with 1s */
 14361  	if (bits_in_stream & 7) != 0 {
 14362  		mask = (int32((0xFF)) >> (bits_in_stream & 7))
 14363  		if (*(*int32)(unsafe.Pointer(bp /* nBytes */)) - 1) < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14364  			*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr((*(*int32)(unsafe.Pointer(bp /* nBytes */)) - 1)))) |= uint8((mask))
 14365  		}
 14366  	}
 14367  }
 14368  
 14369  /* Check that any remaining bits in the last byte are set to 1 */
 14370  func SKP_Silk_range_coder_check_after_decoding(tls *libc.TLS, psRC uintptr) { /* SKP_Silk_range_coder.c:350:6: */
 14371  	bp := tls.Alloc(4)
 14372  	defer tls.Free(4)
 14373  
 14374  	var bits_in_stream int32
 14375  	// var nBytes int32 at bp, 4
 14376  
 14377  	var mask int32
 14378  
 14379  	bits_in_stream = SKP_Silk_range_coder_get_length(tls, psRC, bp /* &nBytes */)
 14380  
 14381  	/* Make sure not to read beyond buffer */
 14382  	if (*(*int32)(unsafe.Pointer(bp /* nBytes */)) - 1) >= (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14383  		(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -5
 14384  		return
 14385  	}
 14386  
 14387  	/* Test any remaining bits in last byte */
 14388  	if (bits_in_stream & 7) != 0 {
 14389  		mask = (int32((0xFF)) >> (bits_in_stream & 7))
 14390  		if (int32(*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr((*(*int32)(unsafe.Pointer(bp /* nBytes */)) - 1))))) & mask) != mask {
 14391  			(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -5
 14392  			return
 14393  		}
 14394  	}
 14395  }
 14396  
 14397  /* Add noise to matrix diagonal */
 14398  func SKP_Silk_regularize_correlations_FIX(tls *libc.TLS, XX uintptr, xx uintptr, noise int32, D int32) { /* SKP_Silk_regularize_correlations_FIX.c:31:6: */
 14399  	var i int32
 14400  	for i = 0; i < D; i++ {
 14401  		*(*int32)(unsafe.Pointer(((XX) + uintptr((((i)*(D))+(i)))*4))) = ((*(*int32)(unsafe.Pointer(((XX) + uintptr((((i)*(D))+(i)))*4)))) + (noise))
 14402  	}
 14403  	*(*int32)(unsafe.Pointer(xx)) += (noise)
 14404  }
 14405  
 14406  /* Greatest common divisor */
 14407  func gcd(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_resampler.c:66:18: */
 14408  	var tmp int32
 14409  	for b > 0 {
 14410  		tmp = (a - (b * ((a) / (b))))
 14411  		a = b
 14412  		b = tmp
 14413  	}
 14414  	return a
 14415  }
 14416  
 14417  /* Initialize/reset the resampler state for a given pair of input/output sampling rates */
 14418  func SKP_Silk_resampler_init(tls *libc.TLS, S uintptr, Fs_Hz_in int32, Fs_Hz_out int32) int32 { /* SKP_Silk_resampler.c:81:9: */
 14419  	var cycleLen int32
 14420  	var cyclesPerBatch int32
 14421  	var up2 int32 = 0
 14422  	var down2 int32 = 0
 14423  
 14424  	/* Clear state */
 14425  	libc.Xmemset(tls, S, 0, uint64(unsafe.Sizeof(SKP_Silk_resampler_state_struct{})))
 14426  
 14427  	/* Input checking */
 14428  	if (((Fs_Hz_in < 8000) || (Fs_Hz_in > 192000)) || (Fs_Hz_out < 8000)) || (Fs_Hz_out > 192000) {
 14429  
 14430  		return -1
 14431  	}
 14432  
 14433  	/* Determine pre downsampling and post upsampling */
 14434  	if Fs_Hz_in > 96000 {
 14435  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers = 2
 14436  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fdown_pre_function = *(*uintptr)(unsafe.Pointer(&struct {
 14437  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14438  		}{SKP_Silk_resampler_private_down4}))
 14439  	} else if Fs_Hz_in > 48000 {
 14440  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers = 1
 14441  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fdown_pre_function = *(*uintptr)(unsafe.Pointer(&struct {
 14442  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14443  		}{SKP_Silk_resampler_down2}))
 14444  	} else {
 14445  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers = 0
 14446  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fdown_pre_function = uintptr(0)
 14447  	}
 14448  
 14449  	if Fs_Hz_out > 96000 {
 14450  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers = 2
 14451  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup_post_function = *(*uintptr)(unsafe.Pointer(&struct {
 14452  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14453  		}{SKP_Silk_resampler_private_up4}))
 14454  	} else if Fs_Hz_out > 48000 {
 14455  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers = 1
 14456  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup_post_function = *(*uintptr)(unsafe.Pointer(&struct {
 14457  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14458  		}{SKP_Silk_resampler_up2}))
 14459  	} else {
 14460  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers = 0
 14461  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup_post_function = uintptr(0)
 14462  	}
 14463  
 14464  	if ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers + (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers) > 0 {
 14465  		/* Ratio of output/input samples */
 14466  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16 = ((((Fs_Hz_out) << (13)) / (Fs_Hz_in)) << (3))
 14467  		/* Make sure the ratio is rounded up */
 14468  		for ((((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) >> 16) * (int32(int16(Fs_Hz_in)))) + (((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) & 0x0000FFFF) * (int32(int16(Fs_Hz_in)))) >> 16)) + (((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) * (func() int32 {
 14469  			if (16) == 1 {
 14470  				return (((Fs_Hz_in) >> 1) + ((Fs_Hz_in) & 1))
 14471  			}
 14472  			return ((((Fs_Hz_in) >> ((16) - 1)) + 1) >> 1)
 14473  		}()))) < Fs_Hz_out {
 14474  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16++
 14475  		}
 14476  
 14477  		/* Batch size is 10 ms */
 14478  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSizePrePost = ((Fs_Hz_in) / (100))
 14479  
 14480  		/* Convert sampling rate to those after pre-downsampling and before post-upsampling */
 14481  		Fs_Hz_in = ((Fs_Hz_in) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers))
 14482  		Fs_Hz_out = ((Fs_Hz_out) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers))
 14483  	}
 14484  
 14485  	/* Number of samples processed per batch */
 14486  	/* First, try 10 ms frames */
 14487  	(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize = ((Fs_Hz_in) / (100))
 14488  	if ((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize) * (100)) != Fs_Hz_in) || ((Fs_Hz_in % 100) != 0) {
 14489  		/* No integer number of input or output samples with 10 ms frames, use greatest common divisor */
 14490  		cycleLen = ((Fs_Hz_in) / (gcd(tls, Fs_Hz_in, Fs_Hz_out)))
 14491  		cyclesPerBatch = ((480) / (cycleLen))
 14492  		if cyclesPerBatch == 0 {
 14493  			/* cycleLen too big, let's just use the maximum batch size. Some distortion will result. */
 14494  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize = 480
 14495  
 14496  		} else {
 14497  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize = ((cyclesPerBatch) * (cycleLen))
 14498  		}
 14499  	}
 14500  
 14501  	/* Find resampler with the right sampling ratio */
 14502  	if Fs_Hz_out > Fs_Hz_in {
 14503  		/* Upsample */
 14504  		if Fs_Hz_out == ((Fs_Hz_in) * (2)) { /* Fs_out : Fs_in = 2 : 1 */
 14505  			/* Special case: directly use 2x upsampler */
 14506  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14507  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14508  			}{SKP_Silk_resampler_private_up2_HQ_wrapper}))
 14509  		} else {
 14510  			/* Default resampler */
 14511  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14512  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14513  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14514  			up2 = 1
 14515  			if Fs_Hz_in > 24000 {
 14516  				/* Low-quality all-pass upsampler */
 14517  				(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup2_function = *(*uintptr)(unsafe.Pointer(&struct {
 14518  					f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14519  				}{SKP_Silk_resampler_up2}))
 14520  			} else {
 14521  				/* High-quality all-pass upsampler */
 14522  				(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup2_function = *(*uintptr)(unsafe.Pointer(&struct {
 14523  					f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14524  				}{SKP_Silk_resampler_private_up2_HQ}))
 14525  			}
 14526  		}
 14527  	} else if Fs_Hz_out < Fs_Hz_in {
 14528  		/* Downsample */
 14529  		if ((Fs_Hz_out) * (4)) == ((Fs_Hz_in) * (3)) { /* Fs_out : Fs_in = 3 : 4 */
 14530  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 3
 14531  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_3_4_COEFS))
 14532  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14533  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14534  			}{SKP_Silk_resampler_private_down_FIR}))
 14535  		} else if ((Fs_Hz_out) * (3)) == ((Fs_Hz_in) * (2)) { /* Fs_out : Fs_in = 2 : 3 */
 14536  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 2
 14537  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_2_3_COEFS))
 14538  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14539  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14540  			}{SKP_Silk_resampler_private_down_FIR}))
 14541  		} else if ((Fs_Hz_out) * (2)) == Fs_Hz_in { /* Fs_out : Fs_in = 1 : 2 */
 14542  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 1
 14543  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_2_COEFS))
 14544  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14545  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14546  			}{SKP_Silk_resampler_private_down_FIR}))
 14547  		} else if ((Fs_Hz_out) * (8)) == ((Fs_Hz_in) * (3)) { /* Fs_out : Fs_in = 3 : 8 */
 14548  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 3
 14549  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_3_8_COEFS))
 14550  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14551  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14552  			}{SKP_Silk_resampler_private_down_FIR}))
 14553  		} else if ((Fs_Hz_out) * (3)) == Fs_Hz_in { /* Fs_out : Fs_in = 1 : 3 */
 14554  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 1
 14555  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_3_COEFS))
 14556  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14557  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14558  			}{SKP_Silk_resampler_private_down_FIR}))
 14559  		} else if ((Fs_Hz_out) * (4)) == Fs_Hz_in { /* Fs_out : Fs_in = 1 : 4 */
 14560  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 1
 14561  			down2 = 1
 14562  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_2_COEFS))
 14563  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14564  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14565  			}{SKP_Silk_resampler_private_down_FIR}))
 14566  		} else if ((Fs_Hz_out) * (6)) == Fs_Hz_in { /* Fs_out : Fs_in = 1 : 6 */
 14567  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 1
 14568  			down2 = 1
 14569  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_3_COEFS))
 14570  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14571  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14572  			}{SKP_Silk_resampler_private_down_FIR}))
 14573  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (80)) { /* Fs_out : Fs_in = 80 : 441 */
 14574  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_80_441_ARMA4_COEFS))
 14575  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14576  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14577  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14578  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (120)) { /* Fs_out : Fs_in = 120 : 441 */
 14579  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_120_441_ARMA4_COEFS))
 14580  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14581  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14582  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14583  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (160)) { /* Fs_out : Fs_in = 160 : 441 */
 14584  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_160_441_ARMA4_COEFS))
 14585  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14586  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14587  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14588  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (240)) { /* Fs_out : Fs_in = 240 : 441 */
 14589  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_240_441_ARMA4_COEFS))
 14590  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14591  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14592  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14593  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (320)) { /* Fs_out : Fs_in = 320 : 441 */
 14594  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_320_441_ARMA4_COEFS))
 14595  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14596  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14597  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14598  		} else {
 14599  			/* Default resampler */
 14600  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14601  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14602  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14603  			up2 = 1
 14604  			if Fs_Hz_in > 24000 {
 14605  				/* Low-quality all-pass upsampler */
 14606  				(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup2_function = *(*uintptr)(unsafe.Pointer(&struct {
 14607  					f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14608  				}{SKP_Silk_resampler_up2}))
 14609  			} else {
 14610  				/* High-quality all-pass upsampler */
 14611  				(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup2_function = *(*uintptr)(unsafe.Pointer(&struct {
 14612  					f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14613  				}{SKP_Silk_resampler_private_up2_HQ}))
 14614  			}
 14615  		}
 14616  	} else {
 14617  		/* Input and output sampling rates are equal: copy */
 14618  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14619  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14620  		}{SKP_Silk_resampler_private_copy}))
 14621  	}
 14622  
 14623  	(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x = (up2 | down2)
 14624  
 14625  	/* Ratio of input/output samples */
 14626  	(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16 = ((((Fs_Hz_in) << ((14 + up2) - down2)) / (Fs_Hz_out)) << (2))
 14627  	/* Make sure the ratio is rounded up */
 14628  	for ((((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16) >> 16) * (int32((int16((Fs_Hz_out) << (down2)))))) + (((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16) & 0x0000FFFF) * (int32((int16((Fs_Hz_out) << (down2)))))) >> 16)) + (((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16) * (func() int32 {
 14629  		if (16) == 1 {
 14630  			return ((((Fs_Hz_out) << (down2)) >> 1) + (((Fs_Hz_out) << (down2)) & 1))
 14631  		}
 14632  		return (((((Fs_Hz_out) << (down2)) >> ((16) - 1)) + 1) >> 1)
 14633  	}()))) < ((Fs_Hz_in) << (up2)) {
 14634  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16++
 14635  	}
 14636  
 14637  	(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fmagic_number = 123456789
 14638  
 14639  	return 0
 14640  }
 14641  
 14642  /* Clear the states of all resampling filters, without resetting sampling rate ratio */
 14643  func SKP_Silk_resampler_clear(tls *libc.TLS, S uintptr) int32 { /* SKP_Silk_resampler.c:255:9: */
 14644  	/* Clear state */
 14645  	libc.Xmemset(tls, S+88 /* &.sDown2 */, 0, uint64(unsafe.Sizeof([2]int32{})))
 14646  	libc.Xmemset(tls, S /* &.sIIR */, 0, uint64(unsafe.Sizeof([6]int32{})))
 14647  	libc.Xmemset(tls, S+24 /* &.sFIR */, 0, uint64(unsafe.Sizeof([16]int32{})))
 14648  	libc.Xmemset(tls, S+136 /* &.sDownPre */, 0, uint64(unsafe.Sizeof([2]int32{})))
 14649  	libc.Xmemset(tls, S+144 /* &.sUpPost */, 0, uint64(unsafe.Sizeof([2]int32{})))
 14650  	return 0
 14651  }
 14652  
 14653  /* Resampler: convert from one sampling rate to another                                 */
 14654  func SKP_Silk_resampler(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) int32 { /* SKP_Silk_resampler.c:271:9: */
 14655  	bp := tls.Alloc(1920)
 14656  	defer tls.Free(1920)
 14657  
 14658  	/* Verify that state was initialized and has not been corrupted */
 14659  	if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fmagic_number != 123456789 {
 14660  
 14661  		return -1
 14662  	}
 14663  
 14664  	if ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers + (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers) > 0 {
 14665  		/* The input and/or output sampling rate is above 48000 Hz */
 14666  		var nSamplesIn int32
 14667  		var nSamplesOut int32
 14668  		// var in_buf [480]int16 at bp, 960
 14669  
 14670  		// var out_buf [480]int16 at bp+960, 960
 14671  
 14672  		for inLen > 0 {
 14673  			/* Number of input and output samples to process */
 14674  			nSamplesIn = func() int32 {
 14675  				if (inLen) < ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSizePrePost) {
 14676  					return inLen
 14677  				}
 14678  				return (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSizePrePost
 14679  			}()
 14680  			nSamplesOut = (((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) >> 16) * (int32(int16(nSamplesIn)))) + (((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) & 0x0000FFFF) * (int32(int16(nSamplesIn)))) >> 16))
 14681  
 14682  			if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers > 0 {
 14683  				(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 152 /* &.down_pre_function */))))(tls, S+136 /* &.sDownPre */, bp /* &in_buf[0] */, in, nSamplesIn)
 14684  				if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers > 0 {
 14685  					(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 96 /* &.resampler_function */))))(tls, S, bp+960 /* &out_buf[0] */, bp /* &in_buf[0] */, ((nSamplesIn) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers)))
 14686  					(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 160 /* &.up_post_function */))))(tls, S+144 /* &.sUpPost */, out, bp+960 /* &out_buf[0] */, ((nSamplesOut) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers)))
 14687  				} else {
 14688  					(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 96 /* &.resampler_function */))))(tls, S, out, bp /* &in_buf[0] */, ((nSamplesIn) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers)))
 14689  				}
 14690  			} else {
 14691  				(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 96 /* &.resampler_function */))))(tls, S, bp+960 /* &out_buf[0] */, in, ((nSamplesIn) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers)))
 14692  				(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 160 /* &.up_post_function */))))(tls, S+144 /* &.sUpPost */, out, bp+960 /* &out_buf[0] */, ((nSamplesOut) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers)))
 14693  			}
 14694  
 14695  			in += 2 * uintptr(nSamplesIn)
 14696  			out += 2 * uintptr(nSamplesOut)
 14697  			inLen = inLen - (nSamplesIn)
 14698  		}
 14699  	} else {
 14700  		/* Input and output sampling rate are at most 48000 Hz */
 14701  		(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 96 /* &.resampler_function */))))(tls, S, out, in, inLen)
 14702  	}
 14703  
 14704  	return 0
 14705  }
 14706  
 14707  /* Downsample by a factor 2, mediocre quality */
 14708  func SKP_Silk_resampler_down2(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_down2.c:40:6: */
 14709  	var k int32
 14710  	var len2 int32 = ((inLen) >> (1))
 14711  	var in32 int32
 14712  	var out32 int32
 14713  	var Y int32
 14714  	var X int32
 14715  
 14716  	/* Internal variables and state are in Q10 format */
 14717  	for k = 0; k < len2; k++ {
 14718  		/* Convert to Q10 */
 14719  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr((2*k))*2)))) << (10))
 14720  
 14721  		/* All-pass section for even input sample */
 14722  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 14723  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_down2_1))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_down2_1))) >> 16)))
 14724  		out32 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 14725  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 14726  
 14727  		/* Convert to Q10 */
 14728  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(((2*k)+1))*2)))) << (10))
 14729  
 14730  		/* All-pass section for odd input sample, and add to output of previous section */
 14731  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 14732  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_down2_0))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_down2_0))) >> 16))
 14733  		out32 = ((out32) + (*(*int32)(unsafe.Pointer(S + 1*4))))
 14734  		out32 = ((out32) + (X))
 14735  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
 14736  
 14737  		/* Add, convert back to int16 and store to output */
 14738  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
 14739  			if (func() int32 {
 14740  				if (11) == 1 {
 14741  					return (((out32) >> 1) + ((out32) & 1))
 14742  				}
 14743  				return ((((out32) >> ((11) - 1)) + 1) >> 1)
 14744  			}()) > 0x7FFF {
 14745  				return int16(0x7FFF)
 14746  			}
 14747  			return func() int16 {
 14748  				if (func() int32 {
 14749  					if (11) == 1 {
 14750  						return (((out32) >> 1) + ((out32) & 1))
 14751  					}
 14752  					return ((((out32) >> ((11) - 1)) + 1) >> 1)
 14753  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 14754  					return libc.Int16FromInt32(0x8000)
 14755  				}
 14756  				return func() int16 {
 14757  					if (11) == 1 {
 14758  						return (int16(((out32) >> 1) + ((out32) & 1)))
 14759  					}
 14760  					return (int16((((out32) >> ((11) - 1)) + 1) >> 1))
 14761  				}()
 14762  			}()
 14763  		}()
 14764  	}
 14765  }
 14766  
 14767  /* Downsample by a factor 2/3, low quality */
 14768  func SKP_Silk_resampler_down2_3(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_down2_3.c:42:6: */
 14769  	bp := tls.Alloc(1936)
 14770  	defer tls.Free(1936)
 14771  
 14772  	var nSamplesIn int32
 14773  	var counter int32
 14774  	var res_Q6 int32
 14775  	// var buf [484]int32 at bp, 1936
 14776  
 14777  	var buf_ptr uintptr
 14778  
 14779  	/* Copy buffered samples to start of buffer */
 14780  	libc.Xmemcpy(tls, bp /* &buf[0] */, S, (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
 14781  
 14782  	/* Iterate over blocks of frameSizeIn input samples */
 14783  	for 1 != 0 {
 14784  		nSamplesIn = func() int32 {
 14785  			if (inLen) < (480) {
 14786  				return inLen
 14787  			}
 14788  			return 480
 14789  		}()
 14790  
 14791  		/* Second-order AR filter (output in Q8) */
 14792  		SKP_Silk_resampler_private_AR2(tls, (S + 4*4), (bp /* &buf */ + 4*4), in,
 14793  			uintptr(unsafe.Pointer(&SKP_Silk_Resampler_2_3_COEFS_LQ)), nSamplesIn)
 14794  
 14795  		/* Interpolate filtered signal */
 14796  		buf_ptr = bp /* &buf[0] */
 14797  		counter = nSamplesIn
 14798  		for counter > 2 {
 14799  			/* Inner product */
 14800  			res_Q6 = ((((*(*int32)(unsafe.Pointer(buf_ptr))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[2]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[2]))) >> 16))
 14801  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[3]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[3]))) >> 16)))
 14802  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[5]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[5]))) >> 16)))
 14803  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[4]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[4]))) >> 16)))
 14804  
 14805  			/* Scale down, saturate and store in output array */
 14806  			*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 14807  				if (func() int32 {
 14808  					if (6) == 1 {
 14809  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14810  					}
 14811  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14812  				}()) > 0x7FFF {
 14813  					return int16(0x7FFF)
 14814  				}
 14815  				return func() int16 {
 14816  					if (func() int32 {
 14817  						if (6) == 1 {
 14818  							return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14819  						}
 14820  						return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14821  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 14822  						return libc.Int16FromInt32(0x8000)
 14823  					}
 14824  					return func() int16 {
 14825  						if (6) == 1 {
 14826  							return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 14827  						}
 14828  						return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 14829  					}()
 14830  				}()
 14831  			}()
 14832  
 14833  			res_Q6 = ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[4]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[4]))) >> 16))
 14834  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[5]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[5]))) >> 16)))
 14835  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[3]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[3]))) >> 16)))
 14836  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[2]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[2]))) >> 16)))
 14837  
 14838  			/* Scale down, saturate and store in output array */
 14839  			*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 14840  				if (func() int32 {
 14841  					if (6) == 1 {
 14842  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14843  					}
 14844  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14845  				}()) > 0x7FFF {
 14846  					return int16(0x7FFF)
 14847  				}
 14848  				return func() int16 {
 14849  					if (func() int32 {
 14850  						if (6) == 1 {
 14851  							return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14852  						}
 14853  						return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14854  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 14855  						return libc.Int16FromInt32(0x8000)
 14856  					}
 14857  					return func() int16 {
 14858  						if (6) == 1 {
 14859  							return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 14860  						}
 14861  						return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 14862  					}()
 14863  				}()
 14864  			}()
 14865  
 14866  			buf_ptr += 4 * (uintptr(3))
 14867  			counter = counter - (3)
 14868  		}
 14869  
 14870  		in += 2 * (uintptr(nSamplesIn))
 14871  		inLen = inLen - (nSamplesIn)
 14872  
 14873  		if inLen > 0 {
 14874  			/* More iterations to do; copy last part of filtered signal to beginning of buffer */
 14875  			libc.Xmemcpy(tls, bp /* &buf[0] */, (bp /* &buf */ + uintptr(nSamplesIn)*4), (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
 14876  		} else {
 14877  			break
 14878  		}
 14879  	}
 14880  
 14881  	/* Copy last part of filtered signal to the state for the next call */
 14882  	libc.Xmemcpy(tls, S, (bp /* &buf */ + uintptr(nSamplesIn)*4), (uint64(4) * uint64(unsafe.Sizeof(int32(0)))))
 14883  }
 14884  
 14885  /* Downsample by a factor 3, low quality */
 14886  func SKP_Silk_resampler_down3(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_down3.c:42:6: */
 14887  	bp := tls.Alloc(1944)
 14888  	defer tls.Free(1944)
 14889  
 14890  	var nSamplesIn int32
 14891  	var counter int32
 14892  	var res_Q6 int32
 14893  	// var buf [486]int32 at bp, 1944
 14894  
 14895  	var buf_ptr uintptr
 14896  
 14897  	/* Copy buffered samples to start of buffer */
 14898  	libc.Xmemcpy(tls, bp /* &buf[0] */, S, (uint64(6) * uint64(unsafe.Sizeof(int32(0)))))
 14899  
 14900  	/* Iterate over blocks of frameSizeIn input samples */
 14901  	for 1 != 0 {
 14902  		nSamplesIn = func() int32 {
 14903  			if (inLen) < (480) {
 14904  				return inLen
 14905  			}
 14906  			return 480
 14907  		}()
 14908  
 14909  		/* Second-order AR filter (output in Q8) */
 14910  		SKP_Silk_resampler_private_AR2(tls, (S + 6*4), (bp /* &buf */ + 6*4), in,
 14911  			uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_3_COEFS_LQ)), nSamplesIn)
 14912  
 14913  		/* Interpolate filtered signal */
 14914  		buf_ptr = bp /* &buf[0] */
 14915  		counter = nSamplesIn
 14916  		for counter > 2 {
 14917  			/* Inner product */
 14918  			res_Q6 = (((((*(*int32)(unsafe.Pointer(buf_ptr))) + (*(*int32)(unsafe.Pointer(buf_ptr + 5*4)))) >> 16) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[2]))) + (((((*(*int32)(unsafe.Pointer(buf_ptr))) + (*(*int32)(unsafe.Pointer(buf_ptr + 5*4)))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[2]))) >> 16))
 14919  			res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 4*4)))) >> 16) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[3]))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 4*4)))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[3]))) >> 16)))
 14920  			res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 3*4)))) >> 16) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[4]))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 3*4)))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[4]))) >> 16)))
 14921  
 14922  			/* Scale down, saturate and store in output array */
 14923  			*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 14924  				if (func() int32 {
 14925  					if (6) == 1 {
 14926  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14927  					}
 14928  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14929  				}()) > 0x7FFF {
 14930  					return int16(0x7FFF)
 14931  				}
 14932  				return func() int16 {
 14933  					if (func() int32 {
 14934  						if (6) == 1 {
 14935  							return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14936  						}
 14937  						return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14938  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 14939  						return libc.Int16FromInt32(0x8000)
 14940  					}
 14941  					return func() int16 {
 14942  						if (6) == 1 {
 14943  							return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 14944  						}
 14945  						return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 14946  					}()
 14947  				}()
 14948  			}()
 14949  
 14950  			buf_ptr += 4 * (uintptr(3))
 14951  			counter = counter - (3)
 14952  		}
 14953  
 14954  		in += 2 * (uintptr(nSamplesIn))
 14955  		inLen = inLen - (nSamplesIn)
 14956  
 14957  		if inLen > 0 {
 14958  			/* More iterations to do; copy last part of filtered signal to beginning of buffer */
 14959  			libc.Xmemcpy(tls, bp /* &buf[0] */, (bp /* &buf */ + uintptr(nSamplesIn)*4), (uint64(6) * uint64(unsafe.Sizeof(int32(0)))))
 14960  		} else {
 14961  			break
 14962  		}
 14963  	}
 14964  
 14965  	/* Copy last part of filtered signal to the state for the next call */
 14966  	libc.Xmemcpy(tls, S, (bp /* &buf */ + uintptr(nSamplesIn)*4), (uint64(6) * uint64(unsafe.Sizeof(int32(0)))))
 14967  }
 14968  
 14969  /* Second order AR filter with single delay elements */
 14970  func SKP_Silk_resampler_private_AR2(tls *libc.TLS, S uintptr, out_Q8 uintptr, in uintptr, A_Q14 uintptr, len int32) { /* SKP_Silk_resampler_private_AR2.c:40:6: */
 14971  	var k int32
 14972  	var out32 int32
 14973  
 14974  	for k = 0; k < len; k++ {
 14975  		out32 = ((*(*int32)(unsafe.Pointer(S))) + ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (8)))
 14976  		*(*int32)(unsafe.Pointer(out_Q8 + uintptr(k)*4)) = out32
 14977  		out32 = ((out32) << (2))
 14978  		*(*int32)(unsafe.Pointer(S)) = ((*(*int32)(unsafe.Pointer(S + 1*4))) + ((((out32) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q14))))) + ((((out32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q14))))) >> 16)))
 14979  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((((out32) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q14 + 1*2))))) + ((((out32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q14 + 1*2))))) >> 16))
 14980  	}
 14981  }
 14982  
 14983  /* Fourth order ARMA filter                                             */
 14984  /* Internally operates as two biquad filters in sequence.               */
 14985  
 14986  /* Coeffients are stored in a packed format:                                                        */
 14987  /*    { B1_Q14[1], B2_Q14[1], -A1_Q14[1], -A1_Q14[2], -A2_Q14[1], -A2_Q14[2], gain_Q16 }            */
 14988  /* where it is assumed that B*_Q14[0], B*_Q14[2], A*_Q14[0] are all 16384                           */
 14989  func SKP_Silk_resampler_private_ARMA4(tls *libc.TLS, S uintptr, out uintptr, in uintptr, Coef uintptr, len int32) { /* SKP_Silk_resampler_private_ARMA4.c:45:6: */
 14990  	var k int32
 14991  	var in_Q8 int32
 14992  	var out1_Q8 int32
 14993  	var out2_Q8 int32
 14994  	var X int32
 14995  
 14996  	for k = 0; k < len; k++ {
 14997  		in_Q8 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (8))
 14998  
 14999  		/* Outputs of first and second biquad */
 15000  		out1_Q8 = ((in_Q8) + ((*(*int32)(unsafe.Pointer(S))) << (2)))
 15001  		out2_Q8 = ((out1_Q8) + ((*(*int32)(unsafe.Pointer(S + 2*4))) << (2)))
 15002  
 15003  		/* Update states, which are stored in Q6. Coefficients are in Q14 here */
 15004  		X = ((*(*int32)(unsafe.Pointer(S + 1*4))) + ((((in_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef))))) + ((((in_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef))))) >> 16)))
 15005  		*(*int32)(unsafe.Pointer(S)) = ((X) + ((((out1_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 2*2))))) + ((((out1_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 2*2))))) >> 16)))
 15006  
 15007  		X = ((*(*int32)(unsafe.Pointer(S + 3*4))) + ((((out1_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 1*2))))) + ((((out1_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 1*2))))) >> 16)))
 15008  		*(*int32)(unsafe.Pointer(S + 2*4)) = ((X) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 4*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 4*2))))) >> 16)))
 15009  
 15010  		*(*int32)(unsafe.Pointer(S + 1*4)) = (((in_Q8) >> (2)) + ((((out1_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 3*2))))) + ((((out1_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 3*2))))) >> 16)))
 15011  		*(*int32)(unsafe.Pointer(S + 3*4)) = (((out1_Q8) >> (2)) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 5*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 5*2))))) >> 16)))
 15012  
 15013  		/* Apply gain and store to output. The coefficient is in Q16 */
 15014  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
 15015  			if (((128) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) >> 16))) >> (8)) > 0x7FFF {
 15016  				return int16(0x7FFF)
 15017  			}
 15018  			return func() int16 {
 15019  				if (((128) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) >> 16))) >> (8)) < (int32(libc.Int16FromInt32(0x8000))) {
 15020  					return libc.Int16FromInt32(0x8000)
 15021  				}
 15022  				return (int16(((128) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) >> 16))) >> (8)))
 15023  			}()
 15024  		}()
 15025  	}
 15026  }
 15027  
 15028  /* Copy */
 15029  func SKP_Silk_resampler_private_copy(tls *libc.TLS, SS uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_private_copy.c:41:6: */
 15030  	libc.Xmemcpy(tls, out, in, (uint64(inLen) * uint64(unsafe.Sizeof(int16(0)))))
 15031  }
 15032  
 15033  /* Downsample by a factor 4. Note: very low quality, only use with input sampling rates above 96 kHz. */
 15034  func SKP_Silk_resampler_private_down4(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_private_down4.c:40:6: */
 15035  	var k int32
 15036  	var len4 int32 = ((inLen) >> (2))
 15037  	var in32 int32
 15038  	var out32 int32
 15039  	var Y int32
 15040  	var X int32
 15041  
 15042  	/* Internal variables and state are in Q10 format */
 15043  	for k = 0; k < len4; k++ {
 15044  		/* Add two input samples and convert to Q10 */
 15045  		in32 = (((int32(*(*int16)(unsafe.Pointer(in + uintptr((4*k))*2)))) + (int32(*(*int16)(unsafe.Pointer(in + uintptr(((4*k)+1))*2))))) << (9))
 15046  
 15047  		/* All-pass section for even input sample */
 15048  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 15049  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_down2_1))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_down2_1))) >> 16)))
 15050  		out32 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 15051  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 15052  
 15053  		/* Add two input samples and convert to Q10 */
 15054  		in32 = (((int32(*(*int16)(unsafe.Pointer(in + uintptr(((4*k)+2))*2)))) + (int32(*(*int16)(unsafe.Pointer(in + uintptr(((4*k)+3))*2))))) << (9))
 15055  
 15056  		/* All-pass section for odd input sample */
 15057  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 15058  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_down2_0))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_down2_0))) >> 16))
 15059  		out32 = ((out32) + (*(*int32)(unsafe.Pointer(S + 1*4))))
 15060  		out32 = ((out32) + (X))
 15061  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
 15062  
 15063  		/* Add, convert back to int16 and store to output */
 15064  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
 15065  			if (func() int32 {
 15066  				if (11) == 1 {
 15067  					return (((out32) >> 1) + ((out32) & 1))
 15068  				}
 15069  				return ((((out32) >> ((11) - 1)) + 1) >> 1)
 15070  			}()) > 0x7FFF {
 15071  				return int16(0x7FFF)
 15072  			}
 15073  			return func() int16 {
 15074  				if (func() int32 {
 15075  					if (11) == 1 {
 15076  						return (((out32) >> 1) + ((out32) & 1))
 15077  					}
 15078  					return ((((out32) >> ((11) - 1)) + 1) >> 1)
 15079  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15080  					return libc.Int16FromInt32(0x8000)
 15081  				}
 15082  				return func() int16 {
 15083  					if (11) == 1 {
 15084  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15085  					}
 15086  					return (int16((((out32) >> ((11) - 1)) + 1) >> 1))
 15087  				}()
 15088  			}()
 15089  		}()
 15090  	}
 15091  }
 15092  
 15093  func SKP_Silk_resampler_private_down_FIR_INTERPOL0(tls *libc.TLS, out uintptr, buf2 uintptr, FIR_Coefs uintptr, max_index_Q16 int32, index_increment_Q16 int32) uintptr { /* SKP_Silk_resampler_private_down_FIR.c:39:22: */
 15094  	var index_Q16 int32
 15095  	var res_Q6 int32
 15096  	var buf_ptr uintptr
 15097  	for index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 = index_Q16 + (index_increment_Q16) {
 15098  		/* Integer part gives pointer to buffered input */
 15099  		buf_ptr = (buf2 + uintptr(((index_Q16)>>(16)))*4)
 15100  
 15101  		/* Inner product */
 15102  		res_Q6 = (((((*(*int32)(unsafe.Pointer(buf_ptr))) + (*(*int32)(unsafe.Pointer(buf_ptr + 11*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr))) + (*(*int32)(unsafe.Pointer(buf_ptr + 11*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs))))) >> 16))
 15103  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 10*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 1*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 10*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 1*2))))) >> 16)))
 15104  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 9*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 2*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 9*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 2*2))))) >> 16)))
 15105  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 8*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 3*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 8*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 3*2))))) >> 16)))
 15106  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 7*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 4*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 7*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 4*2))))) >> 16)))
 15107  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 5*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 6*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 5*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 5*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 6*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 5*2))))) >> 16)))
 15108  
 15109  		/* Scale down, saturate and store in output array */
 15110  		*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 15111  			if (func() int32 {
 15112  				if (6) == 1 {
 15113  					return (((res_Q6) >> 1) + ((res_Q6) & 1))
 15114  				}
 15115  				return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 15116  			}()) > 0x7FFF {
 15117  				return int16(0x7FFF)
 15118  			}
 15119  			return func() int16 {
 15120  				if (func() int32 {
 15121  					if (6) == 1 {
 15122  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 15123  					}
 15124  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 15125  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15126  					return libc.Int16FromInt32(0x8000)
 15127  				}
 15128  				return func() int16 {
 15129  					if (6) == 1 {
 15130  						return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 15131  					}
 15132  					return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 15133  				}()
 15134  			}()
 15135  		}()
 15136  	}
 15137  	return out
 15138  }
 15139  
 15140  func SKP_Silk_resampler_private_down_FIR_INTERPOL1(tls *libc.TLS, out uintptr, buf2 uintptr, FIR_Coefs uintptr, max_index_Q16 int32, index_increment_Q16 int32, FIR_Fracs int32) uintptr { /* SKP_Silk_resampler_private_down_FIR.c:62:22: */
 15141  	var index_Q16 int32
 15142  	var res_Q6 int32
 15143  	var buf_ptr uintptr
 15144  	var interpol_ind int32
 15145  	var interpol_ptr uintptr
 15146  	for index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 = index_Q16 + (index_increment_Q16) {
 15147  		/* Integer part gives pointer to buffered input */
 15148  		buf_ptr = (buf2 + uintptr(((index_Q16)>>(16)))*4)
 15149  
 15150  		/* Fractional part gives interpolation coefficients */
 15151  		interpol_ind = ((((index_Q16 & 0xFFFF) >> 16) * (int32(int16(FIR_Fracs)))) + ((((index_Q16 & 0xFFFF) & 0x0000FFFF) * (int32(int16(FIR_Fracs)))) >> 16))
 15152  
 15153  		/* Inner product */
 15154  		interpol_ptr = (FIR_Coefs + uintptr(((12/2)*interpol_ind))*2)
 15155  		res_Q6 = ((((*(*int32)(unsafe.Pointer(buf_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr))))) >> 16))
 15156  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 1*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 1*2))))) >> 16)))
 15157  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 2*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 2*2))))) >> 16)))
 15158  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 3*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 3*2))))) >> 16)))
 15159  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 4*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 4*2))))) >> 16)))
 15160  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 5*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 5*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 5*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 5*2))))) >> 16)))
 15161  		interpol_ptr = (FIR_Coefs + uintptr(((12/2)*((FIR_Fracs-1)-interpol_ind)))*2)
 15162  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 11*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 11*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr))))) >> 16)))
 15163  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 10*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 1*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 10*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 1*2))))) >> 16)))
 15164  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 9*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 2*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 9*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 2*2))))) >> 16)))
 15165  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 8*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 3*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 8*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 3*2))))) >> 16)))
 15166  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 7*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 4*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 7*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 4*2))))) >> 16)))
 15167  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 6*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 5*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 6*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 5*2))))) >> 16)))
 15168  
 15169  		/* Scale down, saturate and store in output array */
 15170  		*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 15171  			if (func() int32 {
 15172  				if (6) == 1 {
 15173  					return (((res_Q6) >> 1) + ((res_Q6) & 1))
 15174  				}
 15175  				return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 15176  			}()) > 0x7FFF {
 15177  				return int16(0x7FFF)
 15178  			}
 15179  			return func() int16 {
 15180  				if (func() int32 {
 15181  					if (6) == 1 {
 15182  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 15183  					}
 15184  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 15185  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15186  					return libc.Int16FromInt32(0x8000)
 15187  				}
 15188  				return func() int16 {
 15189  					if (6) == 1 {
 15190  						return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 15191  					}
 15192  					return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 15193  				}()
 15194  			}()
 15195  		}()
 15196  	}
 15197  	return out
 15198  }
 15199  
 15200  /* Resample with a 2x downsampler (optional), a 2nd order AR filter followed by FIR interpolation */
 15201  func SKP_Silk_resampler_private_down_FIR(tls *libc.TLS, SS uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_private_down_FIR.c:100:6: */
 15202  	bp := tls.Alloc(2448)
 15203  	defer tls.Free(2448)
 15204  
 15205  	var S uintptr = SS
 15206  	var nSamplesIn int32
 15207  	var max_index_Q16 int32
 15208  	var index_increment_Q16 int32
 15209  	// var buf1 [240]int16 at bp+1968, 480
 15210  
 15211  	// var buf2 [492]int32 at bp, 1968
 15212  
 15213  	var FIR_Coefs uintptr
 15214  
 15215  	/* Copy buffered samples to start of buffer */
 15216  	libc.Xmemcpy(tls, bp /* &buf2[0] */, S+24 /* &.sFIR */, (uint64(12) * uint64(unsafe.Sizeof(int32(0)))))
 15217  
 15218  	FIR_Coefs = ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs + 2*2)
 15219  
 15220  	/* Iterate over blocks of frameSizeIn input samples */
 15221  	index_increment_Q16 = (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16
 15222  	for 1 != 0 {
 15223  		nSamplesIn = func() int32 {
 15224  			if (inLen) < ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize) {
 15225  				return inLen
 15226  			}
 15227  			return (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize
 15228  		}()
 15229  
 15230  		if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x == 1 {
 15231  			/* Downsample 2x */
 15232  			SKP_Silk_resampler_down2(tls, S+88 /* &.sDown2 */, bp+1968 /* &buf1[0] */, in, nSamplesIn)
 15233  
 15234  			nSamplesIn = ((nSamplesIn) >> (1))
 15235  
 15236  			/* Second-order AR filter (output in Q8) */
 15237  			SKP_Silk_resampler_private_AR2(tls, S /* &.sIIR */, (bp /* &buf2 */ + 12*4), bp+1968 /* &buf1[0] */, (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs, nSamplesIn)
 15238  		} else {
 15239  			/* Second-order AR filter (output in Q8) */
 15240  			SKP_Silk_resampler_private_AR2(tls, S /* &.sIIR */, (bp /* &buf2 */ + 12*4), in, (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs, nSamplesIn)
 15241  		}
 15242  
 15243  		max_index_Q16 = ((nSamplesIn) << (16))
 15244  
 15245  		/* Interpolate filtered signal */
 15246  		if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs == 1 {
 15247  			out = SKP_Silk_resampler_private_down_FIR_INTERPOL0(tls, out, bp /* &buf2[0] */, FIR_Coefs, max_index_Q16, index_increment_Q16)
 15248  		} else {
 15249  			out = SKP_Silk_resampler_private_down_FIR_INTERPOL1(tls, out, bp /* &buf2[0] */, FIR_Coefs, max_index_Q16, index_increment_Q16, (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs)
 15250  		}
 15251  
 15252  		in += 2 * uintptr((nSamplesIn << (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x))
 15253  		inLen = inLen - (nSamplesIn << (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x)
 15254  
 15255  		if inLen > (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x {
 15256  			/* More iterations to do; copy last part of filtered signal to beginning of buffer */
 15257  			libc.Xmemcpy(tls, bp /* &buf2[0] */, (bp /* &buf2 */ + uintptr(nSamplesIn)*4), (uint64(12) * uint64(unsafe.Sizeof(int32(0)))))
 15258  		} else {
 15259  			break
 15260  		}
 15261  	}
 15262  
 15263  	/* Copy last part of filtered signal to the state for the next call */
 15264  	libc.Xmemcpy(tls, S+24 /* &.sFIR */, (bp /* &buf2 */ + uintptr(nSamplesIn)*4), (uint64(12) * uint64(unsafe.Sizeof(int32(0)))))
 15265  }
 15266  
 15267  func SKP_Silk_resampler_private_IIR_FIR_INTERPOL(tls *libc.TLS, out uintptr, buf uintptr, max_index_Q16 int32, index_increment_Q16 int32) uintptr { /* SKP_Silk_resampler_private_IIR_FIR.c:39:22: */
 15268  	var index_Q16 int32
 15269  	var res_Q15 int32
 15270  	var buf_ptr uintptr
 15271  	var table_index int32
 15272  	/* Interpolate upsampled signal and store in output array */
 15273  	for index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 = index_Q16 + (index_increment_Q16) {
 15274  		table_index = ((((index_Q16 & 0xFFFF) >> 16) * (int32(int16(144)))) + ((((index_Q16 & 0xFFFF) & 0x0000FFFF) * (int32(int16(144)))) >> 16))
 15275  		buf_ptr = (buf + uintptr((index_Q16>>16))*2)
 15276  
 15277  		res_Q15 = ((int32(*(*int16)(unsafe.Pointer(buf_ptr)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr(table_index)*6))))))
 15278  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 1*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr(table_index)*6) + 1*2))))))
 15279  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 2*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr(table_index)*6) + 2*2))))))
 15280  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 3*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr((143-table_index))*6) + 2*2))))))
 15281  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 4*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr((143-table_index))*6) + 1*2))))))
 15282  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 5*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr((143-table_index))*6)))))))
 15283  		*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 15284  			if (func() int32 {
 15285  				if (15) == 1 {
 15286  					return (((res_Q15) >> 1) + ((res_Q15) & 1))
 15287  				}
 15288  				return ((((res_Q15) >> ((15) - 1)) + 1) >> 1)
 15289  			}()) > 0x7FFF {
 15290  				return int16(0x7FFF)
 15291  			}
 15292  			return func() int16 {
 15293  				if (func() int32 {
 15294  					if (15) == 1 {
 15295  						return (((res_Q15) >> 1) + ((res_Q15) & 1))
 15296  					}
 15297  					return ((((res_Q15) >> ((15) - 1)) + 1) >> 1)
 15298  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15299  					return libc.Int16FromInt32(0x8000)
 15300  				}
 15301  				return func() int16 {
 15302  					if (15) == 1 {
 15303  						return (int16(((res_Q15) >> 1) + ((res_Q15) & 1)))
 15304  					}
 15305  					return (int16((((res_Q15) >> ((15) - 1)) + 1) >> 1))
 15306  				}()
 15307  			}()
 15308  		}()
 15309  	}
 15310  	return out
 15311  }
 15312  
 15313  /* Upsample using a combination of allpass-based 2x upsampling and FIR interpolation */
 15314  func SKP_Silk_resampler_private_IIR_FIR(tls *libc.TLS, SS uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_private_IIR_FIR.c:60:6: */
 15315  	bp := tls.Alloc(1932)
 15316  	defer tls.Free(1932)
 15317  
 15318  	var S uintptr = SS
 15319  	var nSamplesIn int32
 15320  	var max_index_Q16 int32
 15321  	var index_increment_Q16 int32
 15322  	// var buf [966]int16 at bp, 1932
 15323  
 15324  	/* Copy buffered samples to start of buffer */
 15325  	libc.Xmemcpy(tls, bp /* &buf[0] */, S+24 /* &.sFIR */, (uint64(6) * uint64(unsafe.Sizeof(int32(0)))))
 15326  
 15327  	/* Iterate over blocks of frameSizeIn input samples */
 15328  	index_increment_Q16 = (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16
 15329  	for 1 != 0 {
 15330  		nSamplesIn = func() int32 {
 15331  			if (inLen) < ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize) {
 15332  				return inLen
 15333  			}
 15334  			return (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize
 15335  		}()
 15336  
 15337  		if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x == 1 {
 15338  			/* Upsample 2x */
 15339  			(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 104 /* &.up2_function */))))(tls, S /* &.sIIR */, (bp /* &buf */ + 6*2), in, nSamplesIn)
 15340  		} else {
 15341  			/* Fourth-order ARMA filter */
 15342  			SKP_Silk_resampler_private_ARMA4(tls, S /* &.sIIR */, (bp /* &buf */ + 6*2), in, (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs, nSamplesIn)
 15343  		}
 15344  
 15345  		max_index_Q16 = ((nSamplesIn) << (16 + (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x)) /* +1 if 2x upsampling */
 15346  		out = SKP_Silk_resampler_private_IIR_FIR_INTERPOL(tls, out, bp /* &buf[0] */, max_index_Q16, index_increment_Q16)
 15347  		in += 2 * uintptr(nSamplesIn)
 15348  		inLen = inLen - (nSamplesIn)
 15349  
 15350  		if inLen > 0 {
 15351  			/* More iterations to do; copy last part of filtered signal to beginning of buffer */
 15352  			libc.Xmemcpy(tls, bp /* &buf[0] */, (bp /* &buf */ + uintptr((nSamplesIn<<(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x))*2), (uint64(6) * uint64(unsafe.Sizeof(int32(0)))))
 15353  		} else {
 15354  			break
 15355  		}
 15356  	}
 15357  
 15358  	/* Copy last part of filtered signal to the state for the next call */
 15359  	libc.Xmemcpy(tls, S+24 /* &.sFIR */, (bp /* &buf */ + uintptr((nSamplesIn<<(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x))*2), (uint64(6) * uint64(unsafe.Sizeof(int32(0)))))
 15360  }
 15361  
 15362  /* Upsample by a factor 2, high quality */
 15363  /* Uses 2nd order allpass filters for the 2x upsampling, followed by a      */
 15364  /* notch filter just above Nyquist.                                         */
 15365  func SKP_Silk_resampler_private_up2_HQ(tls *libc.TLS, S uintptr, out uintptr, in uintptr, len int32) { /* SKP_Silk_resampler_private_up2_HQ.c:42:6: */
 15366  	var k int32
 15367  	var in32 int32
 15368  	var out32_1 int32
 15369  	var out32_2 int32
 15370  	var Y int32
 15371  	var X int32
 15372  
 15373  	/* Internal variables and state are in Q10 format */
 15374  	for k = 0; k < len; k++ {
 15375  		/* Convert to Q10 */
 15376  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (10))
 15377  
 15378  		/* First all-pass section for even output sample */
 15379  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 15380  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_hq_0[0]))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_0[0]))) >> 16))
 15381  		out32_1 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 15382  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 15383  
 15384  		/* Second all-pass section for even output sample */
 15385  		Y = ((out32_1) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 15386  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_hq_0[1]))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_0[1]))) >> 16)))
 15387  		out32_2 = ((*(*int32)(unsafe.Pointer(S + 1*4))) + (X))
 15388  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((out32_1) + (X))
 15389  
 15390  		/* Biquad notch filter */
 15391  		out32_2 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[2]))) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[2]))) >> 16)))
 15392  		out32_2 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[1]))) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[1]))) >> 16)))
 15393  		out32_1 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[0]))) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[0]))) >> 16)))
 15394  		*(*int32)(unsafe.Pointer(S + 5*4)) = ((out32_2) - (*(*int32)(unsafe.Pointer(S + 5*4))))
 15395  
 15396  		/* Apply gain in Q15, convert back to int16 and store to output */
 15397  		*(*int16)(unsafe.Pointer(out + uintptr((2*k))*2)) = func() int16 {
 15398  			if (((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)) > 0x7FFF {
 15399  				return int16(0x7FFF)
 15400  			}
 15401  			return func() int16 {
 15402  				if (((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)) < (int32(libc.Int16FromInt32(0x8000))) {
 15403  					return libc.Int16FromInt32(0x8000)
 15404  				}
 15405  				return (int16(((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)))
 15406  			}()
 15407  		}()
 15408  
 15409  		/* First all-pass section for odd output sample */
 15410  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 2*4))))
 15411  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_hq_1[0]))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_1[0]))) >> 16))
 15412  		out32_1 = ((*(*int32)(unsafe.Pointer(S + 2*4))) + (X))
 15413  		*(*int32)(unsafe.Pointer(S + 2*4)) = ((in32) + (X))
 15414  
 15415  		/* Second all-pass section for odd output sample */
 15416  		Y = ((out32_1) - (*(*int32)(unsafe.Pointer(S + 3*4))))
 15417  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_hq_1[1]))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_1[1]))) >> 16)))
 15418  		out32_2 = ((*(*int32)(unsafe.Pointer(S + 3*4))) + (X))
 15419  		*(*int32)(unsafe.Pointer(S + 3*4)) = ((out32_1) + (X))
 15420  
 15421  		/* Biquad notch filter */
 15422  		out32_2 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[2]))) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[2]))) >> 16)))
 15423  		out32_2 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[1]))) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[1]))) >> 16)))
 15424  		out32_1 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[0]))) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[0]))) >> 16)))
 15425  		*(*int32)(unsafe.Pointer(S + 4*4)) = ((out32_2) - (*(*int32)(unsafe.Pointer(S + 4*4))))
 15426  
 15427  		/* Apply gain in Q15, convert back to int16 and store to output */
 15428  		*(*int16)(unsafe.Pointer(out + uintptr(((2*k)+1))*2)) = func() int16 {
 15429  			if (((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)) > 0x7FFF {
 15430  				return int16(0x7FFF)
 15431  			}
 15432  			return func() int16 {
 15433  				if (((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)) < (int32(libc.Int16FromInt32(0x8000))) {
 15434  					return libc.Int16FromInt32(0x8000)
 15435  				}
 15436  				return (int16(((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)))
 15437  			}()
 15438  		}()
 15439  	}
 15440  }
 15441  
 15442  func SKP_Silk_resampler_private_up2_HQ_wrapper(tls *libc.TLS, SS uintptr, out uintptr, in uintptr, len int32) { /* SKP_Silk_resampler_private_up2_HQ.c:109:6: */
 15443  	var S uintptr = SS
 15444  	SKP_Silk_resampler_private_up2_HQ(tls, S /* &.sIIR */, out, in, len)
 15445  }
 15446  
 15447  /* Upsample by a factor 4, Note: very low quality, only use with output sampling rates above 96 kHz. */
 15448  func SKP_Silk_resampler_private_up4(tls *libc.TLS, S uintptr, out uintptr, in uintptr, len int32) { /* SKP_Silk_resampler_private_up4.c:40:6: */
 15449  	var k int32
 15450  	var in32 int32
 15451  	var out32 int32
 15452  	var Y int32
 15453  	var X int32
 15454  	var out16 int16
 15455  
 15456  	/* Internal variables and state are in Q10 format */
 15457  	for k = 0; k < len; k++ {
 15458  		/* Convert to Q10 */
 15459  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (10))
 15460  
 15461  		/* All-pass section for even output sample */
 15462  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 15463  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_lq_0))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_lq_0))) >> 16))
 15464  		out32 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 15465  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 15466  
 15467  		/* Convert back to int16 and store to output */
 15468  		out16 = func() int16 {
 15469  			if (func() int32 {
 15470  				if (10) == 1 {
 15471  					return (((out32) >> 1) + ((out32) & 1))
 15472  				}
 15473  				return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15474  			}()) > 0x7FFF {
 15475  				return int16(0x7FFF)
 15476  			}
 15477  			return func() int16 {
 15478  				if (func() int32 {
 15479  					if (10) == 1 {
 15480  						return (((out32) >> 1) + ((out32) & 1))
 15481  					}
 15482  					return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15483  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15484  					return libc.Int16FromInt32(0x8000)
 15485  				}
 15486  				return func() int16 {
 15487  					if (10) == 1 {
 15488  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15489  					}
 15490  					return (int16((((out32) >> ((10) - 1)) + 1) >> 1))
 15491  				}()
 15492  			}()
 15493  		}()
 15494  		*(*int16)(unsafe.Pointer(out + uintptr((4*k))*2)) = out16
 15495  		*(*int16)(unsafe.Pointer(out + uintptr(((4*k)+1))*2)) = out16
 15496  
 15497  		/* All-pass section for odd output sample */
 15498  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 15499  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_lq_1))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_lq_1))) >> 16)))
 15500  		out32 = ((*(*int32)(unsafe.Pointer(S + 1*4))) + (X))
 15501  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
 15502  
 15503  		/* Convert back to int16 and store to output */
 15504  		out16 = func() int16 {
 15505  			if (func() int32 {
 15506  				if (10) == 1 {
 15507  					return (((out32) >> 1) + ((out32) & 1))
 15508  				}
 15509  				return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15510  			}()) > 0x7FFF {
 15511  				return int16(0x7FFF)
 15512  			}
 15513  			return func() int16 {
 15514  				if (func() int32 {
 15515  					if (10) == 1 {
 15516  						return (((out32) >> 1) + ((out32) & 1))
 15517  					}
 15518  					return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15519  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15520  					return libc.Int16FromInt32(0x8000)
 15521  				}
 15522  				return func() int16 {
 15523  					if (10) == 1 {
 15524  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15525  					}
 15526  					return (int16((((out32) >> ((10) - 1)) + 1) >> 1))
 15527  				}()
 15528  			}()
 15529  		}()
 15530  		*(*int16)(unsafe.Pointer(out + uintptr(((4*k)+2))*2)) = out16
 15531  		*(*int16)(unsafe.Pointer(out + uintptr(((4*k)+3))*2)) = out16
 15532  	}
 15533  }
 15534  
 15535  /* Tables for 2x downsampler */
 15536  var SKP_Silk_resampler_down2_0 int16 = int16(9872)            /* SKP_Silk_resampler_rom.c:41:17 */
 15537  var SKP_Silk_resampler_down2_1 int16 = (int16(39809 - 65536)) /* SKP_Silk_resampler_rom.c:42:17 */
 15538  
 15539  /* Tables for 2x upsampler, low quality */
 15540  var SKP_Silk_resampler_up2_lq_0 int16 = int16(8102)            /* SKP_Silk_resampler_rom.c:45:17 */
 15541  var SKP_Silk_resampler_up2_lq_1 int16 = (int16(36783 - 65536)) /* SKP_Silk_resampler_rom.c:46:17 */
 15542  
 15543  /* Tables for 2x upsampler, high quality */
 15544  var SKP_Silk_resampler_up2_hq_0 = [2]int16{int16(4280), (int16(33727 - 65536))}  /* SKP_Silk_resampler_rom.c:49:17 */
 15545  var SKP_Silk_resampler_up2_hq_1 = [2]int16{int16(16295), (int16(54015 - 65536))} /* SKP_Silk_resampler_rom.c:50:17 */
 15546  /* Matlab code for the notch filter coefficients: */
 15547  /* B = [1, 0.12, 1];  A = [1, 0.055, 0.8]; G = 0.87; freqz(G * B, A, 2^14, 16e3); axis([0, 8000, -10, 1]);  */
 15548  /* fprintf('\t%6d, %6d, %6d, %6d\n', round(B(2)*2^16), round(-A(2)*2^16), round((1-A(3))*2^16), round(G*2^15)) */
 15549  var SKP_Silk_resampler_up2_hq_notch = [4]int16{int16(7864), int16(-3604), int16(13107), int16(28508)} /* SKP_Silk_resampler_rom.c:54:17 */
 15550  
 15551  /* Tables with IIR and FIR coefficients for fractional downsamplers (70 Words) */
 15552  var SKP_Silk_Resampler_3_4_COEFS = [20]int16{
 15553  	int16(-18249), int16(-12532),
 15554  	int16(-97), int16(284), int16(-495), int16(309), int16(10268), int16(20317),
 15555  	int16(-94), int16(156), int16(-48), int16(-720), int16(5984), int16(18278),
 15556  	int16(-45), int16(-4), int16(237), int16(-847), int16(2540), int16(14662),
 15557  } /* SKP_Silk_resampler_rom.c:58:33 */
 15558  
 15559  var SKP_Silk_Resampler_2_3_COEFS = [14]int16{
 15560  	int16(-11891), int16(-12486),
 15561  	int16(20), int16(211), int16(-657), int16(688), int16(8423), int16(15911),
 15562  	int16(-44), int16(197), int16(-152), int16(-653), int16(3855), int16(13015),
 15563  } /* SKP_Silk_resampler_rom.c:65:33 */
 15564  
 15565  var SKP_Silk_Resampler_1_2_COEFS = [8]int16{
 15566  	int16(2415), int16(-13101),
 15567  	int16(158), int16(-295), int16(-400), int16(1265), int16(4832), int16(7968),
 15568  } /* SKP_Silk_resampler_rom.c:71:33 */
 15569  
 15570  var SKP_Silk_Resampler_3_8_COEFS = [20]int16{
 15571  	int16(13270), int16(-13738),
 15572  	int16(-294), int16(-123), int16(747), int16(2043), int16(3339), int16(3995),
 15573  	int16(-151), int16(-311), int16(414), int16(1583), int16(2947), int16(3877),
 15574  	int16(-33), int16(-389), int16(143), int16(1141), int16(2503), int16(3653),
 15575  } /* SKP_Silk_resampler_rom.c:76:33 */
 15576  
 15577  var SKP_Silk_Resampler_1_3_COEFS = [8]int16{
 15578  	int16(16643), int16(-14000),
 15579  	int16(-331), int16(19), int16(581), int16(1421), int16(2290), int16(2845),
 15580  } /* SKP_Silk_resampler_rom.c:83:33 */
 15581  
 15582  var SKP_Silk_Resampler_2_3_COEFS_LQ = [6]int16{
 15583  	int16(-2797), int16(-6507),
 15584  	int16(4697), int16(10739),
 15585  	int16(1567), int16(8276),
 15586  } /* SKP_Silk_resampler_rom.c:88:33 */
 15587  
 15588  var SKP_Silk_Resampler_1_3_COEFS_LQ = [5]int16{
 15589  	int16(16777), int16(-9792),
 15590  	int16(890), int16(1614), int16(2148),
 15591  } /* SKP_Silk_resampler_rom.c:94:33 */
 15592  
 15593  /* Tables with coefficients for 4th order ARMA filter (35 Words), in a packed format:       */
 15594  /*    { B1_Q14[1], B2_Q14[1], -A1_Q14[1], -A1_Q14[2], -A2_Q14[1], -A2_Q14[2], gain_Q16 }    */
 15595  /* where it is assumed that B*_Q14[0], B*_Q14[2], A*_Q14[0] are all 16384                   */
 15596  var SKP_Silk_Resampler_320_441_ARMA4_COEFS = [7]int16{
 15597  	int16(31454), int16(24746), int16(-9706), int16(-3386), int16(-17911), int16(-13243), int16(24797),
 15598  } /* SKP_Silk_resampler_rom.c:103:33 */
 15599  
 15600  var SKP_Silk_Resampler_240_441_ARMA4_COEFS = [7]int16{
 15601  	int16(28721), int16(11254), int16(3189), int16(-2546), int16(-1495), int16(-12618), int16(11562),
 15602  } /* SKP_Silk_resampler_rom.c:107:33 */
 15603  
 15604  var SKP_Silk_Resampler_160_441_ARMA4_COEFS = [7]int16{
 15605  	int16(23492), int16(-6457), int16(14358), int16(-4856), int16(14654), int16(-13008), int16(4456),
 15606  } /* SKP_Silk_resampler_rom.c:111:33 */
 15607  
 15608  var SKP_Silk_Resampler_120_441_ARMA4_COEFS = [7]int16{
 15609  	int16(19311), int16(-15569), int16(19489), int16(-6950), int16(21441), int16(-13559), int16(2370),
 15610  } /* SKP_Silk_resampler_rom.c:115:33 */
 15611  
 15612  var SKP_Silk_Resampler_80_441_ARMA4_COEFS = [7]int16{
 15613  	int16(13248), int16(-23849), int16(24126), int16(-9486), int16(26806), int16(-14286), int16(1065),
 15614  } /* SKP_Silk_resampler_rom.c:119:33 */
 15615  
 15616  /* Table with interplation fractions of 1/288 : 2/288 : 287/288 (432 Words) */
 15617  var SKP_Silk_resampler_frac_FIR_144 = [144][3]int16{
 15618  	{int16(-647), int16(1884), int16(30078)},
 15619  	{int16(-625), int16(1736), int16(30044)},
 15620  	{int16(-603), int16(1591), int16(30005)},
 15621  	{int16(-581), int16(1448), int16(29963)},
 15622  	{int16(-559), int16(1308), int16(29917)},
 15623  	{int16(-537), int16(1169), int16(29867)},
 15624  	{int16(-515), int16(1032), int16(29813)},
 15625  	{int16(-494), int16(898), int16(29755)},
 15626  	{int16(-473), int16(766), int16(29693)},
 15627  	{int16(-452), int16(636), int16(29627)},
 15628  	{int16(-431), int16(508), int16(29558)},
 15629  	{int16(-410), int16(383), int16(29484)},
 15630  	{int16(-390), int16(260), int16(29407)},
 15631  	{int16(-369), int16(139), int16(29327)},
 15632  	{int16(-349), int16(20), int16(29242)},
 15633  	{int16(-330), int16(-97), int16(29154)},
 15634  	{int16(-310), int16(-211), int16(29062)},
 15635  	{int16(-291), int16(-324), int16(28967)},
 15636  	{int16(-271), int16(-434), int16(28868)},
 15637  	{int16(-253), int16(-542), int16(28765)},
 15638  	{int16(-234), int16(-647), int16(28659)},
 15639  	{int16(-215), int16(-751), int16(28550)},
 15640  	{int16(-197), int16(-852), int16(28436)},
 15641  	{int16(-179), int16(-951), int16(28320)},
 15642  	{int16(-162), int16(-1048), int16(28200)},
 15643  	{int16(-144), int16(-1143), int16(28077)},
 15644  	{int16(-127), int16(-1235), int16(27950)},
 15645  	{int16(-110), int16(-1326), int16(27820)},
 15646  	{int16(-94), int16(-1414), int16(27687)},
 15647  	{int16(-77), int16(-1500), int16(27550)},
 15648  	{int16(-61), int16(-1584), int16(27410)},
 15649  	{int16(-45), int16(-1665), int16(27268)},
 15650  	{int16(-30), int16(-1745), int16(27122)},
 15651  	{int16(-15), int16(-1822), int16(26972)},
 15652  	{int16(0), int16(-1897), int16(26820)},
 15653  	{int16(15), int16(-1970), int16(26665)},
 15654  	{int16(29), int16(-2041), int16(26507)},
 15655  	{int16(44), int16(-2110), int16(26346)},
 15656  	{int16(57), int16(-2177), int16(26182)},
 15657  	{int16(71), int16(-2242), int16(26015)},
 15658  	{int16(84), int16(-2305), int16(25845)},
 15659  	{int16(97), int16(-2365), int16(25673)},
 15660  	{int16(110), int16(-2424), int16(25498)},
 15661  	{int16(122), int16(-2480), int16(25320)},
 15662  	{int16(134), int16(-2534), int16(25140)},
 15663  	{int16(146), int16(-2587), int16(24956)},
 15664  	{int16(157), int16(-2637), int16(24771)},
 15665  	{int16(168), int16(-2685), int16(24583)},
 15666  	{int16(179), int16(-2732), int16(24392)},
 15667  	{int16(190), int16(-2776), int16(24199)},
 15668  	{int16(200), int16(-2819), int16(24003)},
 15669  	{int16(210), int16(-2859), int16(23805)},
 15670  	{int16(220), int16(-2898), int16(23605)},
 15671  	{int16(229), int16(-2934), int16(23403)},
 15672  	{int16(238), int16(-2969), int16(23198)},
 15673  	{int16(247), int16(-3002), int16(22992)},
 15674  	{int16(255), int16(-3033), int16(22783)},
 15675  	{int16(263), int16(-3062), int16(22572)},
 15676  	{int16(271), int16(-3089), int16(22359)},
 15677  	{int16(279), int16(-3114), int16(22144)},
 15678  	{int16(286), int16(-3138), int16(21927)},
 15679  	{int16(293), int16(-3160), int16(21709)},
 15680  	{int16(300), int16(-3180), int16(21488)},
 15681  	{int16(306), int16(-3198), int16(21266)},
 15682  	{int16(312), int16(-3215), int16(21042)},
 15683  	{int16(318), int16(-3229), int16(20816)},
 15684  	{int16(323), int16(-3242), int16(20589)},
 15685  	{int16(328), int16(-3254), int16(20360)},
 15686  	{int16(333), int16(-3263), int16(20130)},
 15687  	{int16(338), int16(-3272), int16(19898)},
 15688  	{int16(342), int16(-3278), int16(19665)},
 15689  	{int16(346), int16(-3283), int16(19430)},
 15690  	{int16(350), int16(-3286), int16(19194)},
 15691  	{int16(353), int16(-3288), int16(18957)},
 15692  	{int16(356), int16(-3288), int16(18718)},
 15693  	{int16(359), int16(-3286), int16(18478)},
 15694  	{int16(362), int16(-3283), int16(18238)},
 15695  	{int16(364), int16(-3279), int16(17996)},
 15696  	{int16(366), int16(-3273), int16(17753)},
 15697  	{int16(368), int16(-3266), int16(17509)},
 15698  	{int16(369), int16(-3257), int16(17264)},
 15699  	{int16(371), int16(-3247), int16(17018)},
 15700  	{int16(372), int16(-3235), int16(16772)},
 15701  	{int16(372), int16(-3222), int16(16525)},
 15702  	{int16(373), int16(-3208), int16(16277)},
 15703  	{int16(373), int16(-3192), int16(16028)},
 15704  	{int16(373), int16(-3175), int16(15779)},
 15705  	{int16(373), int16(-3157), int16(15529)},
 15706  	{int16(372), int16(-3138), int16(15279)},
 15707  	{int16(371), int16(-3117), int16(15028)},
 15708  	{int16(370), int16(-3095), int16(14777)},
 15709  	{int16(369), int16(-3072), int16(14526)},
 15710  	{int16(368), int16(-3048), int16(14274)},
 15711  	{int16(366), int16(-3022), int16(14022)},
 15712  	{int16(364), int16(-2996), int16(13770)},
 15713  	{int16(362), int16(-2968), int16(13517)},
 15714  	{int16(359), int16(-2940), int16(13265)},
 15715  	{int16(357), int16(-2910), int16(13012)},
 15716  	{int16(354), int16(-2880), int16(12760)},
 15717  	{int16(351), int16(-2848), int16(12508)},
 15718  	{int16(348), int16(-2815), int16(12255)},
 15719  	{int16(344), int16(-2782), int16(12003)},
 15720  	{int16(341), int16(-2747), int16(11751)},
 15721  	{int16(337), int16(-2712), int16(11500)},
 15722  	{int16(333), int16(-2676), int16(11248)},
 15723  	{int16(328), int16(-2639), int16(10997)},
 15724  	{int16(324), int16(-2601), int16(10747)},
 15725  	{int16(320), int16(-2562), int16(10497)},
 15726  	{int16(315), int16(-2523), int16(10247)},
 15727  	{int16(310), int16(-2482), int16(9998)},
 15728  	{int16(305), int16(-2442), int16(9750)},
 15729  	{int16(300), int16(-2400), int16(9502)},
 15730  	{int16(294), int16(-2358), int16(9255)},
 15731  	{int16(289), int16(-2315), int16(9009)},
 15732  	{int16(283), int16(-2271), int16(8763)},
 15733  	{int16(277), int16(-2227), int16(8519)},
 15734  	{int16(271), int16(-2182), int16(8275)},
 15735  	{int16(265), int16(-2137), int16(8032)},
 15736  	{int16(259), int16(-2091), int16(7791)},
 15737  	{int16(252), int16(-2045), int16(7550)},
 15738  	{int16(246), int16(-1998), int16(7311)},
 15739  	{int16(239), int16(-1951), int16(7072)},
 15740  	{int16(232), int16(-1904), int16(6835)},
 15741  	{int16(226), int16(-1856), int16(6599)},
 15742  	{int16(219), int16(-1807), int16(6364)},
 15743  	{int16(212), int16(-1758), int16(6131)},
 15744  	{int16(204), int16(-1709), int16(5899)},
 15745  	{int16(197), int16(-1660), int16(5668)},
 15746  	{int16(190), int16(-1611), int16(5439)},
 15747  	{int16(183), int16(-1561), int16(5212)},
 15748  	{int16(175), int16(-1511), int16(4986)},
 15749  	{int16(168), int16(-1460), int16(4761)},
 15750  	{int16(160), int16(-1410), int16(4538)},
 15751  	{int16(152), int16(-1359), int16(4317)},
 15752  	{int16(145), int16(-1309), int16(4098)},
 15753  	{int16(137), int16(-1258), int16(3880)},
 15754  	{int16(129), int16(-1207), int16(3664)},
 15755  	{int16(121), int16(-1156), int16(3450)},
 15756  	{int16(113), int16(-1105), int16(3238)},
 15757  	{int16(105), int16(-1054), int16(3028)},
 15758  	{int16(97), int16(-1003), int16(2820)},
 15759  	{int16(89), int16(-952), int16(2614)},
 15760  	{int16(81), int16(-901), int16(2409)},
 15761  	{int16(73), int16(-851), int16(2207)},
 15762  } /* SKP_Silk_resampler_rom.c:124:33 */
 15763  
 15764  /* Upsample by a factor 2, low quality */
 15765  func SKP_Silk_resampler_up2(tls *libc.TLS, S uintptr, out uintptr, in uintptr, len int32) { /* SKP_Silk_resampler_up2.c:40:6: */
 15766  	var k int32
 15767  	var in32 int32
 15768  	var out32 int32
 15769  	var Y int32
 15770  	var X int32
 15771  
 15772  	/* Internal variables and state are in Q10 format */
 15773  	for k = 0; k < len; k++ {
 15774  		/* Convert to Q10 */
 15775  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (10))
 15776  
 15777  		/* All-pass section for even output sample */
 15778  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 15779  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_lq_0))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_lq_0))) >> 16))
 15780  		out32 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 15781  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 15782  
 15783  		/* Convert back to int16 and store to output */
 15784  		*(*int16)(unsafe.Pointer(out + uintptr((2*k))*2)) = func() int16 {
 15785  			if (func() int32 {
 15786  				if (10) == 1 {
 15787  					return (((out32) >> 1) + ((out32) & 1))
 15788  				}
 15789  				return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15790  			}()) > 0x7FFF {
 15791  				return int16(0x7FFF)
 15792  			}
 15793  			return func() int16 {
 15794  				if (func() int32 {
 15795  					if (10) == 1 {
 15796  						return (((out32) >> 1) + ((out32) & 1))
 15797  					}
 15798  					return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15799  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15800  					return libc.Int16FromInt32(0x8000)
 15801  				}
 15802  				return func() int16 {
 15803  					if (10) == 1 {
 15804  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15805  					}
 15806  					return (int16((((out32) >> ((10) - 1)) + 1) >> 1))
 15807  				}()
 15808  			}()
 15809  		}()
 15810  
 15811  		/* All-pass section for odd output sample */
 15812  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 15813  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_lq_1))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_lq_1))) >> 16)))
 15814  		out32 = ((*(*int32)(unsafe.Pointer(S + 1*4))) + (X))
 15815  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
 15816  
 15817  		/* Convert back to int16 and store to output */
 15818  		*(*int16)(unsafe.Pointer(out + uintptr(((2*k)+1))*2)) = func() int16 {
 15819  			if (func() int32 {
 15820  				if (10) == 1 {
 15821  					return (((out32) >> 1) + ((out32) & 1))
 15822  				}
 15823  				return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15824  			}()) > 0x7FFF {
 15825  				return int16(0x7FFF)
 15826  			}
 15827  			return func() int16 {
 15828  				if (func() int32 {
 15829  					if (10) == 1 {
 15830  						return (((out32) >> 1) + ((out32) & 1))
 15831  					}
 15832  					return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15833  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15834  					return libc.Int16FromInt32(0x8000)
 15835  				}
 15836  				return func() int16 {
 15837  					if (10) == 1 {
 15838  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15839  					}
 15840  					return (int16((((out32) >> ((10) - 1)) + 1) >> 1))
 15841  				}()
 15842  			}()
 15843  		}()
 15844  	}
 15845  }
 15846  
 15847  /* Residual energy: nrg = wxx - 2 * wXx * c + c' * wXX * c */
 15848  func SKP_Silk_residual_energy16_covar_FIX(tls *libc.TLS, c uintptr, wXX uintptr, wXx uintptr, wxx int32, D int32, cQ int32) int32 { /* SKP_Silk_residual_energy16_FIX.c:31:11: */
 15849  	bp := tls.Alloc(64)
 15850  	defer tls.Free(64)
 15851  
 15852  	var i int32
 15853  	var j int32
 15854  	var lshifts int32
 15855  	var Qxtra int32
 15856  	var c_max int32
 15857  	var w_max int32
 15858  	var tmp int32
 15859  	var tmp2 int32
 15860  	var nrg int32
 15861  	// var cn [16]int32 at bp, 64
 15862  
 15863  	var pRow uintptr
 15864  
 15865  	/* Safety checks */
 15866  
 15867  	lshifts = (16 - cQ)
 15868  	Qxtra = lshifts
 15869  
 15870  	c_max = 0
 15871  	for i = 0; i < D; i++ {
 15872  		c_max = SKP_max_32(tls, c_max, func() int32 {
 15873  			if (int32(*(*int16)(unsafe.Pointer(c + uintptr(i)*2)))) > 0 {
 15874  				return int32(*(*int16)(unsafe.Pointer(c + uintptr(i)*2)))
 15875  			}
 15876  			return -int32(*(*int16)(unsafe.Pointer(c + uintptr(i)*2)))
 15877  		}())
 15878  	}
 15879  	Qxtra = SKP_min_int(tls, Qxtra, (SKP_Silk_CLZ32(tls, c_max) - 17))
 15880  
 15881  	w_max = SKP_max_32(tls, *(*int32)(unsafe.Pointer(wXX)), *(*int32)(unsafe.Pointer(wXX + uintptr(((D*D)-1))*4)))
 15882  	Qxtra = SKP_min_int(tls, Qxtra, (SKP_Silk_CLZ32(tls, ((D)*(((((w_max)>>16)*(int32(int16(c_max))))+((((w_max)&0x0000FFFF)*(int32(int16(c_max))))>>16))>>(4)))) - 5))
 15883  	Qxtra = SKP_max_int(tls, Qxtra, 0)
 15884  	for i = 0; i < D; i++ {
 15885  		*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)) = ((int32(*(*int16)(unsafe.Pointer(c + uintptr(i)*2)))) << (Qxtra))
 15886  		/* Check that SKP_SMLAWB can be used */
 15887  	}
 15888  	lshifts = lshifts - (Qxtra)
 15889  
 15890  	/* Compute wxx - 2 * wXx * c */
 15891  	tmp = 0
 15892  	for i = 0; i < D; i++ {
 15893  		tmp = ((tmp) + ((((*(*int32)(unsafe.Pointer(wXx + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) + ((((*(*int32)(unsafe.Pointer(wXx + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) >> 16)))
 15894  	}
 15895  	nrg = (((wxx) >> (1 + lshifts)) - tmp) /* Q: -lshifts - 1 */
 15896  
 15897  	/* Add c' * wXX * c, assuming wXX is symmetric */
 15898  	tmp2 = 0
 15899  	for i = 0; i < D; i++ {
 15900  		tmp = 0
 15901  		pRow = (wXX + uintptr((i*D))*4)
 15902  		for j = (i + 1); j < D; j++ {
 15903  			tmp = ((tmp) + ((((*(*int32)(unsafe.Pointer(pRow + uintptr(j)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(j)*4)))))) + ((((*(*int32)(unsafe.Pointer(pRow + uintptr(j)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(j)*4)))))) >> 16)))
 15904  		}
 15905  		tmp = ((tmp) + (((((*(*int32)(unsafe.Pointer(pRow + uintptr(i)*4))) >> (1)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) + (((((*(*int32)(unsafe.Pointer(pRow + uintptr(i)*4))) >> (1)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) >> 16)))
 15906  		tmp2 = ((tmp2) + ((((tmp) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) + ((((tmp) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) >> 16)))
 15907  	}
 15908  	nrg = ((nrg) + ((tmp2) << (lshifts))) /* Q: -lshifts - 1 */
 15909  
 15910  	/* Keep one bit free always, because we add them for LSF interpolation */
 15911  	if nrg < 1 {
 15912  		nrg = 1
 15913  	} else if nrg > (int32((0x7FFFFFFF)) >> (lshifts + 2)) {
 15914  		nrg = (int32(0x7FFFFFFF) >> 1)
 15915  	} else {
 15916  		nrg = ((nrg) << (lshifts + 1)) /* Q0 */
 15917  	}
 15918  	return nrg
 15919  
 15920  }
 15921  
 15922  /* Calculates residual energies of input subframes where all subframes have LPC_order   */
 15923  /* of preceeding samples                                                                */
 15924  func SKP_Silk_residual_energy_FIX(tls *libc.TLS, nrgs uintptr, nrgsQ uintptr, x uintptr, a_Q12 uintptr, gains uintptr, subfr_length int32, LPC_order int32) { /* SKP_Silk_residual_energy_FIX.c:32:6: */
 15925  	bp := tls.Alloc(580)
 15926  	defer tls.Free(580)
 15927  
 15928  	var offset int32
 15929  	var i int32
 15930  	var j int32
 15931  	// var rshift int32 at bp+576, 4
 15932  
 15933  	var lz1 int32
 15934  	var lz2 int32
 15935  	var LPC_res_ptr uintptr
 15936  	// var LPC_res [272]int16 at bp+32, 544
 15937  
 15938  	var x_ptr uintptr
 15939  	// var S [16]int16 at bp, 32
 15940  
 15941  	var tmp32 int32
 15942  
 15943  	x_ptr = x
 15944  	offset = (LPC_order + subfr_length)
 15945  
 15946  	/* Filter input to create the LPC residual for each frame half, and measure subframe energies */
 15947  	for i = 0; i < 2; i++ {
 15948  		/* Calculate half frame LPC residual signal including preceeding samples */
 15949  		libc.Xmemset(tls, bp /* &S[0] */, 0, (uint64(LPC_order) * uint64(unsafe.Sizeof(int16(0)))))
 15950  		SKP_Silk_LPC_analysis_filter(tls, x_ptr, (a_Q12 + uintptr(i)*32), bp /* &S[0] */, bp+32 /* &LPC_res[0] */, ((int32(4) >> 1) * offset), LPC_order)
 15951  
 15952  		/* Point to first subframe of the just calculated LPC residual signal */
 15953  		LPC_res_ptr = (bp + 32 /* &LPC_res[0] */ + uintptr(LPC_order)*2)
 15954  		for j = 0; j < (int32(4) >> 1); j++ {
 15955  			/* Measure subframe energy */
 15956  			SKP_Silk_sum_sqr_shift(tls, (nrgs + uintptr(((i*(int32(4)>>1))+j))*4), bp+576 /* &rshift */, LPC_res_ptr, subfr_length)
 15957  
 15958  			/* Set Q values for the measured energy */
 15959  			*(*int32)(unsafe.Pointer(nrgsQ + uintptr(((i*(int32(4)>>1))+j))*4)) = -*(*int32)(unsafe.Pointer(bp + 576 /* rshift */))
 15960  
 15961  			/* Move to next subframe */
 15962  			LPC_res_ptr += 2 * (uintptr(offset))
 15963  		}
 15964  		/* Move to next frame half */
 15965  		x_ptr += 2 * (uintptr((int32(4) >> 1) * offset))
 15966  	}
 15967  
 15968  	/* Apply the squared subframe gains */
 15969  	for i = 0; i < 4; i++ {
 15970  		/* Fully upscale gains and energies */
 15971  		lz1 = (SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(nrgs + uintptr(i)*4))) - 1)
 15972  		lz2 = (SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(gains + uintptr(i)*4))) - 1)
 15973  
 15974  		tmp32 = ((*(*int32)(unsafe.Pointer(gains + uintptr(i)*4))) << (lz2))
 15975  
 15976  		/* Find squared gains */
 15977  		tmp32 = (int32(((int64_t(tmp32)) * (int64_t(tmp32))) >> (32))) // Q( 2 * lz2 - 32 )
 15978  
 15979  		/* Scale energies */
 15980  		*(*int32)(unsafe.Pointer(nrgs + uintptr(i)*4)) = (int32(((int64_t(tmp32)) * (int64_t((*(*int32)(unsafe.Pointer(nrgs + uintptr(i)*4))) << (lz1)))) >> (32))) // Q( nrgsQ[ i ] + lz1 + 2 * lz2 - 32 - 32 )
 15981  		*(*int32)(unsafe.Pointer(nrgsQ + uintptr(i)*4)) += (((lz1 + (2 * lz2)) - 32) - 32)
 15982  	}
 15983  }
 15984  
 15985  /* Copy and multiply a vector by a constant */
 15986  func SKP_Silk_scale_copy_vector16(tls *libc.TLS, data_out uintptr, data_in uintptr, gain_Q16 int32, dataSize int32) { /* SKP_Silk_scale_copy_vector16.c:31:6: */
 15987  	var i int32
 15988  	var tmp32 int32
 15989  
 15990  	for i = 0; i < dataSize; i++ {
 15991  		tmp32 = ((((gain_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(data_in + uintptr(i)*2))))) + ((((gain_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(data_in + uintptr(i)*2))))) >> 16))
 15992  		*(*int16)(unsafe.Pointer(data_out + uintptr(i)*2)) = int16(tmp32)
 15993  	}
 15994  }
 15995  
 15996  /* Multiply a vector by a constant */
 15997  func SKP_Silk_scale_vector32_Q26_lshift_18(tls *libc.TLS, data1 uintptr, gain_Q26 int32, dataSize int32) { /* SKP_Silk_scale_vector.c:31:6: */
 15998  	var i int32
 15999  
 16000  	for i = 0; i < dataSize; i++ {
 16001  		*(*int32)(unsafe.Pointer(data1 + uintptr(i)*4)) = (int32(((int64_t(*(*int32)(unsafe.Pointer(data1 + uintptr(i)*4)))) * (int64_t(gain_Q26))) >> (8))) // OUTPUT: Q18
 16002  	}
 16003  }
 16004  
 16005  /* Faster than schur64(), but much less accurate.                       */
 16006  /* uses SMLAWB(), requiring armv5E and higher.                          */
 16007  func SKP_Silk_schur(tls *libc.TLS, rc_Q15 uintptr, c uintptr, order int32) int32 { /* SKP_Silk_schur.c:40:11: */
 16008  	bp := tls.Alloc(136)
 16009  	defer tls.Free(136)
 16010  
 16011  	var k int32
 16012  	var n int32
 16013  	var lz int32
 16014  	// var C [17][2]int32 at bp, 136
 16015  
 16016  	var Ctmp1 int32
 16017  	var Ctmp2 int32
 16018  	var rc_tmp_Q15 int32
 16019  
 16020  	/* Get number of leading zeros */
 16021  	lz = SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(c)))
 16022  
 16023  	/* Copy correlations and adjust level to Q30 */
 16024  	if lz < 2 {
 16025  		/* lz must be 1, so shift one to the right */
 16026  		for k = 0; k < (order + 1); k++ {
 16027  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*8))) = libc.AssignPtrInt32((bp /* &C */ +uintptr(k)*8)+1*4, ((*(*int32)(unsafe.Pointer(c + uintptr(k)*4))) >> (1)))
 16028  		}
 16029  	} else if lz > 2 {
 16030  		/* Shift to the left */
 16031  		lz = lz - (2)
 16032  		for k = 0; k < (order + 1); k++ {
 16033  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*8))) = libc.AssignPtrInt32((bp /* &C */ +uintptr(k)*8)+1*4, ((*(*int32)(unsafe.Pointer(c + uintptr(k)*4))) << (lz)))
 16034  		}
 16035  	} else {
 16036  		/* No need to shift */
 16037  		for k = 0; k < (order + 1); k++ {
 16038  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*8))) = libc.AssignPtrInt32((bp /* &C */ +uintptr(k)*8)+1*4, *(*int32)(unsafe.Pointer(c + uintptr(k)*4)))
 16039  		}
 16040  	}
 16041  
 16042  	for k = 0; k < order; k++ {
 16043  
 16044  		/* Get reflection coefficient */
 16045  		rc_tmp_Q15 = -((*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr((k+1))*8)))) / (SKP_max_32(tls, ((*(*int32)(unsafe.Pointer((bp /* &C[0] */) + 1*4))) >> (15)), 1)))
 16046  
 16047  		/* Clip (shouldn't happen for properly conditioned inputs) */
 16048  		rc_tmp_Q15 = func() int32 {
 16049  			if (rc_tmp_Q15) > 0x7FFF {
 16050  				return 0x7FFF
 16051  			}
 16052  			return func() int32 {
 16053  				if (rc_tmp_Q15) < (int32(libc.Int16FromInt32(0x8000))) {
 16054  					return int32(libc.Int16FromInt32(0x8000))
 16055  				}
 16056  				return rc_tmp_Q15
 16057  			}()
 16058  		}()
 16059  
 16060  		/* Store */
 16061  		*(*int16)(unsafe.Pointer(rc_Q15 + uintptr(k)*2)) = int16(rc_tmp_Q15)
 16062  
 16063  		/* Update correlations */
 16064  		for n = 0; n < (order - k); n++ {
 16065  			Ctmp1 = *(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(((n+k)+1))*8)))
 16066  			Ctmp2 = *(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(n)*8) + 1*4))
 16067  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(((n+k)+1))*8))) = ((Ctmp1) + (((((Ctmp2) << (1)) >> 16) * (int32(int16(rc_tmp_Q15)))) + (((((Ctmp2) << (1)) & 0x0000FFFF) * (int32(int16(rc_tmp_Q15)))) >> 16)))
 16068  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(n)*8) + 1*4)) = ((Ctmp2) + (((((Ctmp1) << (1)) >> 16) * (int32(int16(rc_tmp_Q15)))) + (((((Ctmp1) << (1)) & 0x0000FFFF) * (int32(int16(rc_tmp_Q15)))) >> 16)))
 16069  		}
 16070  	}
 16071  
 16072  	/* return residual energy */
 16073  	return *(*int32)(unsafe.Pointer((bp /* &C[0] */) + 1*4))
 16074  }
 16075  
 16076  /* Slower than schur(), but more accurate.                              */
 16077  /* Uses SMULL(), available on armv4                                     */
 16078  func SKP_Silk_schur64(tls *libc.TLS, rc_Q16 uintptr, c uintptr, order int32) int32 { /* SKP_Silk_schur64.c:41:11: */
 16079  	bp := tls.Alloc(136)
 16080  	defer tls.Free(136)
 16081  
 16082  	var k int32
 16083  	var n int32
 16084  	// var C [17][2]int32 at bp, 136
 16085  
 16086  	var Ctmp1_Q30 int32
 16087  	var Ctmp2_Q30 int32
 16088  	var rc_tmp_Q31 int32
 16089  
 16090  	/* Check for invalid input */
 16091  	if *(*int32)(unsafe.Pointer(c)) <= 0 {
 16092  		libc.Xmemset(tls, rc_Q16, 0, (uint64(order) * uint64(unsafe.Sizeof(int32(0)))))
 16093  		return 0
 16094  	}
 16095  
 16096  	for k = 0; k < (order + 1); k++ {
 16097  		*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*8))) = libc.AssignPtrInt32((bp /* &C */ +uintptr(k)*8)+1*4, *(*int32)(unsafe.Pointer(c + uintptr(k)*4)))
 16098  	}
 16099  
 16100  	for k = 0; k < order; k++ {
 16101  		/* Get reflection coefficient: divide two Q30 values and get result in Q31 */
 16102  		rc_tmp_Q31 = SKP_DIV32_varQ(tls, -*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr((k+1))*8))), *(*int32)(unsafe.Pointer((bp /* &C[0] */) + 1*4)), 31)
 16103  
 16104  		/* Save the output */
 16105  		*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4)) = func() int32 {
 16106  			if (15) == 1 {
 16107  				return (((rc_tmp_Q31) >> 1) + ((rc_tmp_Q31) & 1))
 16108  			}
 16109  			return ((((rc_tmp_Q31) >> ((15) - 1)) + 1) >> 1)
 16110  		}()
 16111  
 16112  		/* Update correlations */
 16113  		for n = 0; n < (order - k); n++ {
 16114  			Ctmp1_Q30 = *(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(((n+k)+1))*8)))
 16115  			Ctmp2_Q30 = *(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(n)*8) + 1*4))
 16116  
 16117  			/* Multiply and add the highest int32 */
 16118  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(((n+k)+1))*8))) = (Ctmp1_Q30 + (int32(((int64_t((Ctmp2_Q30) << (1))) * (int64_t(rc_tmp_Q31))) >> (32))))
 16119  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(n)*8) + 1*4)) = (Ctmp2_Q30 + (int32(((int64_t((Ctmp1_Q30) << (1))) * (int64_t(rc_tmp_Q31))) >> (32))))
 16120  		}
 16121  	}
 16122  
 16123  	return *(*int32)(unsafe.Pointer((bp /* &C[0] */) + 1*4))
 16124  }
 16125  
 16126  /* shell coder; pulse-subframe length is hardcoded */
 16127  
 16128  func combine_pulses(tls *libc.TLS, out uintptr, in uintptr, len int32) { /* SKP_Silk_shell_coder.c:32:17: */
 16129  	var k int32
 16130  	for k = 0; k < len; k++ {
 16131  		*(*int32)(unsafe.Pointer(out + uintptr(k)*4)) = (*(*int32)(unsafe.Pointer(in + uintptr((2*k))*4)) + *(*int32)(unsafe.Pointer(in + uintptr(((2*k)+1))*4)))
 16132  	}
 16133  }
 16134  
 16135  func encode_split(tls *libc.TLS, sRC uintptr, p_child1 int32, p int32, shell_table uintptr) { /* SKP_Silk_shell_coder.c:44:17: */
 16136  	var cdf uintptr
 16137  
 16138  	if p > 0 {
 16139  		cdf = (shell_table + uintptr(SKP_Silk_shell_code_table_offsets[p])*2)
 16140  		SKP_Silk_range_encoder(tls, sRC, p_child1, cdf)
 16141  	}
 16142  }
 16143  
 16144  func decode_split(tls *libc.TLS, p_child1 uintptr, p_child2 uintptr, sRC uintptr, p int32, shell_table uintptr) { /* SKP_Silk_shell_coder.c:59:17: */
 16145  	var cdf_middle int32
 16146  	var cdf uintptr
 16147  
 16148  	if p > 0 {
 16149  		cdf_middle = ((p) >> (1))
 16150  		cdf = (shell_table + uintptr(SKP_Silk_shell_code_table_offsets[p])*2)
 16151  		SKP_Silk_range_decoder(tls, p_child1, sRC, cdf, cdf_middle)
 16152  		*(*int32)(unsafe.Pointer(p_child2)) = (p - *(*int32)(unsafe.Pointer(p_child1)))
 16153  	} else {
 16154  		*(*int32)(unsafe.Pointer(p_child1)) = 0
 16155  		*(*int32)(unsafe.Pointer(p_child2)) = 0
 16156  	}
 16157  }
 16158  
 16159  /* Shell encoder, operates on one shell code frame of 16 pulses */
 16160  func SKP_Silk_shell_encoder(tls *libc.TLS, sRC uintptr, pulses0 uintptr) { /* SKP_Silk_shell_coder.c:82:6: */
 16161  	bp := tls.Alloc(60)
 16162  	defer tls.Free(60)
 16163  
 16164  	// var pulses1 [8]int32 at bp, 32
 16165  
 16166  	// var pulses2 [4]int32 at bp+32, 16
 16167  
 16168  	// var pulses3 [2]int32 at bp+48, 8
 16169  
 16170  	// var pulses4 [1]int32 at bp+56, 4
 16171  
 16172  	/* this function operates on one shell code frame of 16 pulses */
 16173  
 16174  	/* tree representation per pulse-subframe */
 16175  	combine_pulses(tls, bp /* &pulses1[0] */, pulses0, 8)
 16176  	combine_pulses(tls, bp+32 /* &pulses2[0] */, bp /* &pulses1[0] */, 4)
 16177  	combine_pulses(tls, bp+48 /* &pulses3[0] */, bp+32 /* &pulses2[0] */, 2)
 16178  	combine_pulses(tls, bp+56 /* &pulses4[0] */, bp+48 /* &pulses3[0] */, 1)
 16179  
 16180  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp + 48 /* &pulses3[0] */)), *(*int32)(unsafe.Pointer(bp + 56 /* &pulses4[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table3)))
 16181  
 16182  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */)), *(*int32)(unsafe.Pointer(bp + 48 /* &pulses3[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table2)))
 16183  
 16184  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */)), *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16185  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16186  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 2*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16187  
 16188  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 2*4)), *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16189  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 4*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 2*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16190  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 6*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 3*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16191  
 16192  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */ + 2*4)), *(*int32)(unsafe.Pointer(bp + 48 /* &pulses3[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table2)))
 16193  
 16194  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 4*4)), *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */ + 2*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16195  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 8*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 4*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16196  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 10*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 5*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16197  
 16198  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 6*4)), *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */ + 3*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16199  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 12*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 6*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16200  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 14*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 7*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16201  }
 16202  
 16203  /* Shell decoder, operates on one shell code frame of 16 pulses */
 16204  func SKP_Silk_shell_decoder(tls *libc.TLS, pulses0 uintptr, sRC uintptr, pulses4 int32) { /* SKP_Silk_shell_coder.c:123:6: */
 16205  	bp := tls.Alloc(56)
 16206  	defer tls.Free(56)
 16207  
 16208  	// var pulses3 [2]int32 at bp, 8
 16209  
 16210  	// var pulses2 [4]int32 at bp+8, 16
 16211  
 16212  	// var pulses1 [8]int32 at bp+24, 32
 16213  
 16214  	/* this function operates on one shell code frame of 16 pulses */
 16215  
 16216  	decode_split(tls, (bp /* &pulses3 */), (bp /* &pulses3 */ + 1*4), sRC, pulses4, uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table3)))
 16217  
 16218  	decode_split(tls, (bp + 8 /* &pulses2 */), (bp + 8 /* &pulses2 */ + 1*4), sRC, *(*int32)(unsafe.Pointer(bp /* &pulses3[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table2)))
 16219  
 16220  	decode_split(tls, (bp + 24 /* &pulses1 */), (bp + 24 /* &pulses1 */ + 1*4), sRC, *(*int32)(unsafe.Pointer(bp + 8 /* &pulses2[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16221  	decode_split(tls, (pulses0), (pulses0 + 1*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16222  	decode_split(tls, (pulses0 + 2*4), (pulses0 + 3*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16223  
 16224  	decode_split(tls, (bp + 24 /* &pulses1 */ + 2*4), (bp + 24 /* &pulses1 */ + 3*4), sRC, *(*int32)(unsafe.Pointer(bp + 8 /* &pulses2[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16225  	decode_split(tls, (pulses0 + 4*4), (pulses0 + 5*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 2*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16226  	decode_split(tls, (pulses0 + 6*4), (pulses0 + 7*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 3*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16227  
 16228  	decode_split(tls, (bp + 8 /* &pulses2 */ + 2*4), (bp + 8 /* &pulses2 */ + 3*4), sRC, *(*int32)(unsafe.Pointer(bp /* &pulses3[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table2)))
 16229  
 16230  	decode_split(tls, (bp + 24 /* &pulses1 */ + 4*4), (bp + 24 /* &pulses1 */ + 5*4), sRC, *(*int32)(unsafe.Pointer(bp + 8 /* &pulses2[0] */ + 2*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16231  	decode_split(tls, (pulses0 + 8*4), (pulses0 + 9*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 4*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16232  	decode_split(tls, (pulses0 + 10*4), (pulses0 + 11*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 5*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16233  
 16234  	decode_split(tls, (bp + 24 /* &pulses1 */ + 6*4), (bp + 24 /* &pulses1 */ + 7*4), sRC, *(*int32)(unsafe.Pointer(bp + 8 /* &pulses2[0] */ + 3*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16235  	decode_split(tls, (pulses0 + 12*4), (pulses0 + 13*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 6*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16236  	decode_split(tls, (pulses0 + 14*4), (pulses0 + 15*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 7*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16237  }
 16238  
 16239  /********************************/
 16240  /* approximate sigmoid function */
 16241  /********************************/
 16242  /* fprintf(1, '%d, ', round(1024 * ([1 ./ (1 + exp(-(1:5))), 1] - 1 ./ (1 + exp(-(0:5)))))); */
 16243  var sigm_LUT_slope_Q10 = [6]int32{
 16244  	237, 153, 73, 30, 12, 7,
 16245  } /* SKP_Silk_sigm_Q15.c:41:24 */
 16246  /* fprintf(1, '%d, ', round(32767 * 1 ./ (1 + exp(-(0:5))))); */
 16247  var sigm_LUT_pos_Q15 = [6]int32{
 16248  	16384, 23955, 28861, 31213, 32178, 32548,
 16249  } /* SKP_Silk_sigm_Q15.c:45:24 */
 16250  /* fprintf(1, '%d, ', round(32767 * 1 ./ (1 + exp((0:5))))); */
 16251  var sigm_LUT_neg_Q15 = [6]int32{
 16252  	16384, 8812, 3906, 1554, 589, 219,
 16253  } /* SKP_Silk_sigm_Q15.c:49:24 */
 16254  
 16255  func SKP_Silk_sigm_Q15(tls *libc.TLS, in_Q5 int32) int32 { /* SKP_Silk_sigm_Q15.c:53:9: */
 16256  	var ind int32
 16257  
 16258  	if in_Q5 < 0 {
 16259  		/* Negative input */
 16260  		in_Q5 = -in_Q5
 16261  		if in_Q5 >= (6 * 32) {
 16262  			return 0 /* Clip */
 16263  		} else {
 16264  			/* Linear interpolation of look up table */
 16265  			ind = ((in_Q5) >> (5))
 16266  			return (sigm_LUT_neg_Q15[ind] - ((int32(int16(sigm_LUT_slope_Q10[ind]))) * (int32((int16(in_Q5 & 0x1F))))))
 16267  		}
 16268  	} else {
 16269  		/* Positive input */
 16270  		if in_Q5 >= (6 * 32) {
 16271  			return 32767 /* clip */
 16272  		} else {
 16273  			/* Linear interpolation of look up table */
 16274  			ind = ((in_Q5) >> (5))
 16275  			return (sigm_LUT_pos_Q15[ind] + ((int32(int16(sigm_LUT_slope_Q10[ind]))) * (int32((int16(in_Q5 & 0x1F))))))
 16276  		}
 16277  	}
 16278  	return int32(0)
 16279  }
 16280  
 16281  /***********************************************************************
 16282  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 16283  Redistribution and use in source and binary forms, with or without
 16284  modification, (subject to the limitations in the disclaimer below)
 16285  are permitted provided that the following conditions are met:
 16286  - Redistributions of source code must retain the above copyright notice,
 16287  this list of conditions and the following disclaimer.
 16288  - Redistributions in binary form must reproduce the above copyright
 16289  notice, this list of conditions and the following disclaimer in the
 16290  documentation and/or other materials provided with the distribution.
 16291  - Neither the name of Skype Limited, nor the names of specific
 16292  contributors, may be used to endorse or promote products derived from
 16293  this software without specific prior written permission.
 16294  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 16295  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 16296  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 16297  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 16298  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 16299  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 16300  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 16301  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 16302  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 16303  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 16304  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 16305  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 16306  ***********************************************************************/
 16307  
 16308  /*******************/
 16309  /* Pitch estimator */
 16310  /*******************/
 16311  
 16312  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
 16313  
 16314  /* Bandwidth expansion for whitening filter in pitch analysis */
 16315  
 16316  /* Threshold used by pitch estimator for early escape */
 16317  
 16318  /*********************/
 16319  /* Linear prediction */
 16320  /*********************/
 16321  
 16322  /* LPC analysis defines: regularization and bandwidth expansion */
 16323  
 16324  /* LTP analysis defines */
 16325  
 16326  /* LTP quantization settings */
 16327  
 16328  /***********************/
 16329  /* High pass filtering */
 16330  /***********************/
 16331  
 16332  /* Smoothing parameters for low end of pitch frequency range estimation */
 16333  
 16334  /* Min and max values for low end of pitch frequency range estimation */
 16335  
 16336  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
 16337  
 16338  /***********/
 16339  /* Various */
 16340  /***********/
 16341  
 16342  /* Required speech activity for counting frame as active */
 16343  
 16344  /* Speech Activity LBRR enable threshold (needs tuning) */
 16345  
 16346  /*************************/
 16347  /* Perceptual parameters */
 16348  /*************************/
 16349  
 16350  /* reduction in coding SNR during low speech activity */
 16351  
 16352  /* factor for reducing quantization noise during voiced speech */
 16353  
 16354  /* factor for reducing quantization noise for unvoiced sparse signals */
 16355  
 16356  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
 16357  
 16358  /* warping control */
 16359  
 16360  /* fraction added to first autocorrelation value */
 16361  
 16362  /* noise shaping filter chirp factor */
 16363  
 16364  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
 16365  
 16366  /* gain reduction for fricatives */
 16367  
 16368  /* extra harmonic boosting (signal shaping) at low bitrates */
 16369  
 16370  /* extra harmonic boosting (signal shaping) for noisy input signals */
 16371  
 16372  /* harmonic noise shaping */
 16373  
 16374  /* extra harmonic noise shaping for high bitrates or noisy input */
 16375  
 16376  /* parameter for shaping noise towards higher frequencies */
 16377  
 16378  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
 16379  
 16380  /* parameter for applying a high-pass tilt to the input signal */
 16381  
 16382  /* parameter for extra high-pass tilt to the input signal at high rates */
 16383  
 16384  /* parameter for reducing noise at the very low frequencies */
 16385  
 16386  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
 16387  
 16388  /* noise floor to put a lower limit on the quantization step size */
 16389  
 16390  /* noise floor relative to active speech gain level */
 16391  
 16392  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
 16393  
 16394  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
 16395  
 16396  /* parameters defining the R/D tradeoff in the residual quantizer */
 16397  
 16398  /*****************************/
 16399  /* Internal function headers */
 16400  /*****************************/
 16401  
 16402  type inv_D_t = struct {
 16403  	FQ36_part int32
 16404  	FQ48_part int32
 16405  } /* SKP_Silk_solve_LS_FIX.c:38:3 */
 16406  
 16407  /* Solves Ax = b, assuming A is symmetric */
 16408  func SKP_Silk_solve_LDL_FIX(tls *libc.TLS, A uintptr, M int32, b uintptr, x_Q16 uintptr) { /* SKP_Silk_solve_LS_FIX.c:71:6: */
 16409  	bp := tls.Alloc(1216)
 16410  	defer tls.Free(1216)
 16411  
 16412  	// var L_Q16 [256]int32 at bp, 1024
 16413  
 16414  	// var Y [16]int32 at bp+1152, 64
 16415  
 16416  	// var inv_D [16]inv_D_t at bp+1024, 128
 16417  
 16418  	/***************************************************
 16419  	  Factorize A by LDL such that A = L*D*L',
 16420  	  where L is lower triangular with ones on diagonal
 16421  	  ****************************************************/
 16422  	SKP_Silk_LDL_factorize_FIX(tls, A, M, bp /* &L_Q16[0] */, bp+1024 /* &inv_D[0] */)
 16423  
 16424  	/****************************************************
 16425  	  * substitute D*L'*x = Y. ie:
 16426  	  L*D*L'*x = b => L*Y = b <=> Y = inv(L)*b
 16427  	  ******************************************************/
 16428  	SKP_Silk_LS_SolveFirst_FIX(tls, bp /* &L_Q16[0] */, M, b, bp+1152 /* &Y[0] */)
 16429  
 16430  	/****************************************************
 16431  	  D*L'*x = Y <=> L'*x = inv(D)*Y, because D is
 16432  	  diagonal just multiply with 1/d_i
 16433  	  ****************************************************/
 16434  	SKP_Silk_LS_divide_Q16_FIX(tls, bp+1152 /* &Y[0] */, bp+1024 /* &inv_D[0] */, M)
 16435  
 16436  	/****************************************************
 16437  	  x = inv(L') * inv(D) * Y
 16438  	  *****************************************************/
 16439  	SKP_Silk_LS_SolveLast_FIX(tls, bp /* &L_Q16[0] */, M, bp+1152 /* &Y[0] */, x_Q16)
 16440  }
 16441  
 16442  func SKP_Silk_LDL_factorize_FIX(tls *libc.TLS, A uintptr, M int32, L_Q16 uintptr, inv_D uintptr) { /* SKP_Silk_solve_LS_FIX.c:108:17: */
 16443  	bp := tls.Alloc(128)
 16444  	defer tls.Free(128)
 16445  
 16446  	var i int32
 16447  	var j int32
 16448  	var k int32
 16449  	var status int32
 16450  	var loop_count int32
 16451  	var ptr1 uintptr
 16452  	var ptr2 uintptr
 16453  	var diag_min_value int32
 16454  	var tmp_32 int32
 16455  	var err int32
 16456  	// var v_Q0 [16]int32 at bp, 64
 16457  
 16458  	// var D_Q0 [16]int32 at bp+64, 64
 16459  
 16460  	var one_div_diag_Q36 int32
 16461  	var one_div_diag_Q40 int32
 16462  	var one_div_diag_Q48 int32
 16463  
 16464  	status = 1
 16465  	diag_min_value = SKP_max_32(tls, (int32(((func() int64 {
 16466  		if ((uint32((*(*int32)(unsafe.Pointer(A))) + (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4))))) & 0x80000000) == uint32(0) {
 16467  			return func() int64 {
 16468  				if ((uint32((*(*int32)(unsafe.Pointer(A))) & (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4))))) & 0x80000000) != uint32(0) {
 16469  					return int64(libc.Int32FromUint32(0x80000000))
 16470  				}
 16471  				return (int64((*(*int32)(unsafe.Pointer(A))) + (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4)))))
 16472  			}()
 16473  		}
 16474  		return func() int64 {
 16475  			if ((uint32((*(*int32)(unsafe.Pointer(A))) | (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4))))) & 0x80000000) == uint32(0) {
 16476  				return int64(0x7FFFFFFF)
 16477  			}
 16478  			return (int64((*(*int32)(unsafe.Pointer(A))) + (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4)))))
 16479  		}()
 16480  	}()) * (int64_t(SKP_FIX_CONST(tls, 1e-5, 31)))) >> (32))), (int32(1) << 9))
 16481  	for loop_count = 0; (loop_count < M) && (status == 1); loop_count++ {
 16482  		status = 0
 16483  		for j = 0; j < M; j++ {
 16484  			ptr1 = (L_Q16 + uintptr((((j)*(M))+(0)))*4)
 16485  			tmp_32 = 0
 16486  			for i = 0; i < j; i++ {
 16487  				*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(i)*4)) = (((((*(*int32)(unsafe.Pointer(bp + 64 /* &D_Q0[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 64 /* &D_Q0[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 64 /* &D_Q0[0] */ + uintptr(i)*4))) * (func() int32 {
 16488  					if (16) == 1 {
 16489  						return (((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) & 1))
 16490  					}
 16491  					return ((((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 16492  				}()))) /* Q0 */
 16493  				tmp_32 = (((tmp_32) + ((((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(i)*4))) * (func() int32 {
 16494  					if (16) == 1 {
 16495  						return (((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) & 1))
 16496  					}
 16497  					return ((((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 16498  				}()))) /* Q0 */
 16499  			}
 16500  			tmp_32 = ((*(*int32)(unsafe.Pointer((A + uintptr((((j)*(M))+(j)))*4)))) - (tmp_32))
 16501  
 16502  			if tmp_32 < diag_min_value {
 16503  				tmp_32 = (((int32((int16(loop_count + 1)))) * (int32(int16(diag_min_value)))) - (tmp_32))
 16504  				/* Matrix not positive semi-definite, or ill conditioned */
 16505  				for i = 0; i < M; i++ {
 16506  					*(*int32)(unsafe.Pointer((A + uintptr((((i)*(M))+(i)))*4))) = ((*(*int32)(unsafe.Pointer((A + uintptr((((i)*(M))+(i)))*4)))) + (tmp_32))
 16507  				}
 16508  				status = 1
 16509  				break
 16510  			}
 16511  			*(*int32)(unsafe.Pointer(bp + 64 /* &D_Q0[0] */ + uintptr(j)*4)) = tmp_32 /* always < max(Correlation) */
 16512  
 16513  			/* two-step division */
 16514  			one_div_diag_Q36 = SKP_INVERSE32_varQ(tls, tmp_32, 36) /* Q36 */
 16515  			one_div_diag_Q40 = ((one_div_diag_Q36) << (4))         /* Q40 */
 16516  			err = ((int32(1) << 24) - (((((tmp_32) >> 16) * (int32(int16(one_div_diag_Q40)))) + ((((tmp_32) & 0x0000FFFF) * (int32(int16(one_div_diag_Q40)))) >> 16)) + ((tmp_32) * (func() int32 {
 16517  				if (16) == 1 {
 16518  					return (((one_div_diag_Q40) >> 1) + ((one_div_diag_Q40) & 1))
 16519  				}
 16520  				return ((((one_div_diag_Q40) >> ((16) - 1)) + 1) >> 1)
 16521  			}())))) /* Q24 */
 16522  			one_div_diag_Q48 = (((((err) >> 16) * (int32(int16(one_div_diag_Q40)))) + ((((err) & 0x0000FFFF) * (int32(int16(one_div_diag_Q40)))) >> 16)) + ((err) * (func() int32 {
 16523  				if (16) == 1 {
 16524  					return (((one_div_diag_Q40) >> 1) + ((one_div_diag_Q40) & 1))
 16525  				}
 16526  				return ((((one_div_diag_Q40) >> ((16) - 1)) + 1) >> 1)
 16527  			}()))) /* Q48 */
 16528  
 16529  			/* Save 1/Ds */
 16530  			(*inv_D_t)(unsafe.Pointer(inv_D + uintptr(j)*8)).FQ36_part = one_div_diag_Q36
 16531  			(*inv_D_t)(unsafe.Pointer(inv_D + uintptr(j)*8)).FQ48_part = one_div_diag_Q48
 16532  
 16533  			*(*int32)(unsafe.Pointer((L_Q16 + uintptr((((j)*(M))+(j)))*4))) = 65536 /* 1.0 in Q16 */
 16534  			ptr1 = (A + uintptr((((j)*(M))+(0)))*4)
 16535  			ptr2 = (L_Q16 + uintptr((((j+1)*(M))+(0)))*4)
 16536  			for i = (j + 1); i < M; i++ {
 16537  				tmp_32 = 0
 16538  				for k = 0; k < j; k++ {
 16539  					tmp_32 = (((tmp_32) + ((((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(k)*4))) * (func() int32 {
 16540  						if (16) == 1 {
 16541  							return (((*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4))) & 1))
 16542  						}
 16543  						return ((((*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4))) >> ((16) - 1)) + 1) >> 1)
 16544  					}()))) /* Q0 */
 16545  				}
 16546  				tmp_32 = ((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) - (tmp_32)) /* always < max(Correlation) */
 16547  
 16548  				/* tmp_32 / D_Q0[j] : Divide to Q16 */
 16549  				*(*int32)(unsafe.Pointer((L_Q16 + uintptr((((i)*(M))+(j)))*4))) = ((int32(((int64_t(tmp_32)) * (int64_t(one_div_diag_Q48))) >> (32))) + ((((((tmp_32) >> 16) * (int32(int16(one_div_diag_Q36)))) + ((((tmp_32) & 0x0000FFFF) * (int32(int16(one_div_diag_Q36)))) >> 16)) + ((tmp_32) * (func() int32 {
 16550  					if (16) == 1 {
 16551  						return (((one_div_diag_Q36) >> 1) + ((one_div_diag_Q36) & 1))
 16552  					}
 16553  					return ((((one_div_diag_Q36) >> ((16) - 1)) + 1) >> 1)
 16554  				}()))) >> (4)))
 16555  
 16556  				/* go to next column */
 16557  				ptr2 += 4 * (uintptr(M))
 16558  			}
 16559  		}
 16560  	}
 16561  
 16562  }
 16563  
 16564  func SKP_Silk_LS_divide_Q16_FIX(tls *libc.TLS, T uintptr, inv_D uintptr, M int32) { /* SKP_Silk_solve_LS_FIX.c:180:17: */
 16565  	var i int32
 16566  	var tmp_32 int32
 16567  	var one_div_diag_Q36 int32
 16568  	var one_div_diag_Q48 int32
 16569  
 16570  	for i = 0; i < M; i++ {
 16571  		one_div_diag_Q36 = (*inv_D_t)(unsafe.Pointer(inv_D + uintptr(i)*8)).FQ36_part
 16572  		one_div_diag_Q48 = (*inv_D_t)(unsafe.Pointer(inv_D + uintptr(i)*8)).FQ48_part
 16573  
 16574  		tmp_32 = *(*int32)(unsafe.Pointer(T + uintptr(i)*4))
 16575  		*(*int32)(unsafe.Pointer(T + uintptr(i)*4)) = ((int32(((int64_t(tmp_32)) * (int64_t(one_div_diag_Q48))) >> (32))) + ((((((tmp_32) >> 16) * (int32(int16(one_div_diag_Q36)))) + ((((tmp_32) & 0x0000FFFF) * (int32(int16(one_div_diag_Q36)))) >> 16)) + ((tmp_32) * (func() int32 {
 16576  			if (16) == 1 {
 16577  				return (((one_div_diag_Q36) >> 1) + ((one_div_diag_Q36) & 1))
 16578  			}
 16579  			return ((((one_div_diag_Q36) >> ((16) - 1)) + 1) >> 1)
 16580  		}()))) >> (4)))
 16581  	}
 16582  }
 16583  
 16584  /* Solve Lx = b, when L is lower triangular and has ones on the diagonal */
 16585  func SKP_Silk_LS_SolveFirst_FIX(tls *libc.TLS, L_Q16 uintptr, M int32, b uintptr, x_Q16 uintptr) { /* SKP_Silk_solve_LS_FIX.c:200:17: */
 16586  	var i int32
 16587  	var j int32
 16588  	var ptr32 uintptr
 16589  	var tmp_32 int32
 16590  
 16591  	for i = 0; i < M; i++ {
 16592  		ptr32 = (L_Q16 + uintptr((((i)*(M))+(0)))*4)
 16593  		tmp_32 = 0
 16594  		for j = 0; j < i; j++ {
 16595  			tmp_32 = (((tmp_32) + ((((*(*int32)(unsafe.Pointer(ptr32 + uintptr(j)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4)))))) + ((((*(*int32)(unsafe.Pointer(ptr32 + uintptr(j)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(ptr32 + uintptr(j)*4))) * (func() int32 {
 16596  				if (16) == 1 {
 16597  					return (((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) & 1))
 16598  				}
 16599  				return ((((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) >> ((16) - 1)) + 1) >> 1)
 16600  			}())))
 16601  		}
 16602  		*(*int32)(unsafe.Pointer(x_Q16 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(b + uintptr(i)*4))) - (tmp_32))
 16603  	}
 16604  }
 16605  
 16606  /* Solve L^t*x = b, where L is lower triangular with ones on the diagonal */
 16607  func SKP_Silk_LS_SolveLast_FIX(tls *libc.TLS, L_Q16 uintptr, M int32, b uintptr, x_Q16 uintptr) { /* SKP_Silk_solve_LS_FIX.c:222:17: */
 16608  	var i int32
 16609  	var j int32
 16610  	var ptr32 uintptr
 16611  	var tmp_32 int32
 16612  
 16613  	for i = (M - 1); i >= 0; i-- {
 16614  		ptr32 = (L_Q16 + uintptr((((0)*(M))+(i)))*4)
 16615  		tmp_32 = 0
 16616  		for j = (M - 1); j > i; j-- {
 16617  			tmp_32 = (((tmp_32) + ((((*(*int32)(unsafe.Pointer(ptr32 + uintptr(((int32(int16(j)))*(int32(int16(M)))))*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4)))))) + ((((*(*int32)(unsafe.Pointer(ptr32 + uintptr(((int32(int16(j)))*(int32(int16(M)))))*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(ptr32 + uintptr(((int32(int16(j)))*(int32(int16(M)))))*4))) * (func() int32 {
 16618  				if (16) == 1 {
 16619  					return (((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) & 1))
 16620  				}
 16621  				return ((((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) >> ((16) - 1)) + 1) >> 1)
 16622  			}())))
 16623  		}
 16624  		*(*int32)(unsafe.Pointer(x_Q16 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(b + uintptr(i)*4))) - (tmp_32))
 16625  	}
 16626  }
 16627  
 16628  func SKP_Silk_insertion_sort_increasing(tls *libc.TLS, a uintptr, index uintptr, L int32, K int32) { /* SKP_Silk_sort.c:34:6: */
 16629  	var value int32
 16630  	var i int32
 16631  	var j int32
 16632  
 16633  	/* Safety checks */
 16634  
 16635  	/* Write start indices in index vector */
 16636  	for i = 0; i < K; i++ {
 16637  		*(*int32)(unsafe.Pointer(index + uintptr(i)*4)) = i
 16638  	}
 16639  
 16640  	/* Sort vector elements by value, increasing order */
 16641  	for i = 1; i < K; i++ {
 16642  		value = *(*int32)(unsafe.Pointer(a + uintptr(i)*4))
 16643  		for j = (i - 1); (j >= 0) && (value < *(*int32)(unsafe.Pointer(a + uintptr(j)*4))); j-- {
 16644  			*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(a + uintptr(j)*4))         /* Shift value */
 16645  			*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(index + uintptr(j)*4)) /* Shift index */
 16646  		}
 16647  		*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = value /* Write value */
 16648  		*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = i /* Write index */
 16649  	}
 16650  
 16651  	/* If less than L values are asked for, check the remaining values, */
 16652  	/* but only spend CPU to ensure that the K first values are correct */
 16653  	for i = K; i < L; i++ {
 16654  		value = *(*int32)(unsafe.Pointer(a + uintptr(i)*4))
 16655  		if value < *(*int32)(unsafe.Pointer(a + uintptr((K-1))*4)) {
 16656  			for j = (K - 2); (j >= 0) && (value < *(*int32)(unsafe.Pointer(a + uintptr(j)*4))); j-- {
 16657  				*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(a + uintptr(j)*4))         /* Shift value */
 16658  				*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(index + uintptr(j)*4)) /* Shift index */
 16659  			}
 16660  			*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = value /* Write value */
 16661  			*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = i /* Write index */
 16662  		}
 16663  	}
 16664  }
 16665  
 16666  func SKP_Silk_insertion_sort_decreasing_int16(tls *libc.TLS, a uintptr, index uintptr, L int32, K int32) { /* SKP_Silk_sort.c:80:6: */
 16667  	var i int32
 16668  	var j int32
 16669  	var value int32
 16670  
 16671  	/* Safety checks */
 16672  
 16673  	/* Write start indices in index vector */
 16674  	for i = 0; i < K; i++ {
 16675  		*(*int32)(unsafe.Pointer(index + uintptr(i)*4)) = i
 16676  	}
 16677  
 16678  	/* Sort vector elements by value, decreasing order */
 16679  	for i = 1; i < K; i++ {
 16680  		value = int32(*(*int16)(unsafe.Pointer(a + uintptr(i)*2)))
 16681  		for j = (i - 1); (j >= 0) && (value > int32(*(*int16)(unsafe.Pointer(a + uintptr(j)*2)))); j-- {
 16682  			*(*int16)(unsafe.Pointer(a + uintptr((j+1))*2)) = *(*int16)(unsafe.Pointer(a + uintptr(j)*2))         /* Shift value */
 16683  			*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(index + uintptr(j)*4)) /* Shift index */
 16684  		}
 16685  		*(*int16)(unsafe.Pointer(a + uintptr((j+1))*2)) = int16(value) /* Write value */
 16686  		*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = i        /* Write index */
 16687  	}
 16688  
 16689  	/* If less than L values are asked for, check the remaining values, */
 16690  	/* but only spend CPU to ensure that the K first values are correct */
 16691  	for i = K; i < L; i++ {
 16692  		value = int32(*(*int16)(unsafe.Pointer(a + uintptr(i)*2)))
 16693  		if value > int32(*(*int16)(unsafe.Pointer(a + uintptr((K-1))*2))) {
 16694  			for j = (K - 2); (j >= 0) && (value > int32(*(*int16)(unsafe.Pointer(a + uintptr(j)*2)))); j-- {
 16695  				*(*int16)(unsafe.Pointer(a + uintptr((j+1))*2)) = *(*int16)(unsafe.Pointer(a + uintptr(j)*2))         /* Shift value */
 16696  				*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(index + uintptr(j)*4)) /* Shift index */
 16697  			}
 16698  			*(*int16)(unsafe.Pointer(a + uintptr((j+1))*2)) = int16(value) /* Write value */
 16699  			*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = i        /* Write index */
 16700  		}
 16701  	}
 16702  }
 16703  
 16704  func SKP_Silk_insertion_sort_increasing_all_values(tls *libc.TLS, a uintptr, L int32) { /* SKP_Silk_sort.c:126:6: */
 16705  	var value int32
 16706  	var i int32
 16707  	var j int32
 16708  
 16709  	/* Safety checks */
 16710  
 16711  	/* Sort vector elements by value, increasing order */
 16712  	for i = 1; i < L; i++ {
 16713  		value = *(*int32)(unsafe.Pointer(a + uintptr(i)*4))
 16714  		for j = (i - 1); (j >= 0) && (value < *(*int32)(unsafe.Pointer(a + uintptr(j)*4))); j-- {
 16715  			*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(a + uintptr(j)*4)) /* Shift value */
 16716  		}
 16717  		*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = value /* Write value */
 16718  	}
 16719  }
 16720  
 16721  /* Compute number of bits to right shift the sum of squares of a vector */
 16722  /* of int16s to make it fit in an int32                                 */
 16723  func SKP_Silk_sum_sqr_shift(tls *libc.TLS, energy uintptr, shift uintptr, x uintptr, len int32) { /* SKP_Silk_sum_sqr_shift.c:39:6: */
 16724  	var i int32
 16725  	var shft int32
 16726  	var in32 int32
 16727  	var nrg_tmp int32
 16728  	var nrg int32
 16729  
 16730  	if (int32(intptr_t(x) & int64(2))) != 0 {
 16731  		/* Input is not 4-byte aligned */
 16732  		nrg = ((int32(*(*int16)(unsafe.Pointer(x)))) * (int32(*(*int16)(unsafe.Pointer(x)))))
 16733  		i = 1
 16734  	} else {
 16735  		nrg = 0
 16736  		i = 0
 16737  	}
 16738  	shft = 0
 16739  	len--
 16740  	for i < len {
 16741  		/* Load two values at once */
 16742  		in32 = *(*int32)(unsafe.Pointer((x + uintptr(i)*2)))
 16743  		nrg = (int32((uint32(nrg)) + (uint32((int32(int16(in32))) * (int32(int16(in32)))))))
 16744  		nrg = (int32((uint32(nrg)) + (uint32(((in32) >> 16) * ((in32) >> 16)))))
 16745  		i = i + (2)
 16746  		if nrg < 0 {
 16747  			/* Scale down */
 16748  			nrg = (int32((uint32(nrg)) >> (2)))
 16749  			shft = 2
 16750  			break
 16751  		}
 16752  	}
 16753  	for ; i < len; i = i + (2) {
 16754  		/* Load two values at once */
 16755  		in32 = *(*int32)(unsafe.Pointer((x + uintptr(i)*2)))
 16756  		nrg_tmp = ((int32(int16(in32))) * (int32(int16(in32))))
 16757  		nrg_tmp = (int32((uint32(nrg_tmp)) + (uint32(((in32) >> 16) * ((in32) >> 16)))))
 16758  		nrg = (int32((uint32(nrg)) + ((uint32(nrg_tmp)) >> (shft))))
 16759  		if nrg < 0 {
 16760  			/* Scale down */
 16761  			nrg = (int32((uint32(nrg)) >> (2)))
 16762  			shft = shft + (2)
 16763  		}
 16764  	}
 16765  	if i == len {
 16766  		/* One sample left to process */
 16767  		nrg_tmp = ((int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))))
 16768  		nrg = ((nrg) + ((nrg_tmp) >> (shft)))
 16769  	}
 16770  
 16771  	/* Make sure to have at least one extra leading zero (two leading zeros in total) */
 16772  	if (uint32(nrg) & 0xC0000000) != 0 {
 16773  		nrg = (int32((uint32(nrg)) >> (2)))
 16774  		shft = shft + (2)
 16775  	}
 16776  
 16777  	/* Output arguments */
 16778  	*(*int32)(unsafe.Pointer(shift)) = shft
 16779  	*(*int32)(unsafe.Pointer(energy)) = nrg
 16780  }
 16781  
 16782  var SKP_Silk_gain_CDF = [2][65]uint16{
 16783  	{
 16784  		uint16(0), uint16(18), uint16(45), uint16(94), uint16(181), uint16(320), uint16(519), uint16(777),
 16785  		uint16(1093), uint16(1468), uint16(1909), uint16(2417), uint16(2997), uint16(3657), uint16(4404), uint16(5245),
 16786  		uint16(6185), uint16(7228), uint16(8384), uint16(9664), uint16(11069), uint16(12596), uint16(14244), uint16(16022),
 16787  		uint16(17937), uint16(19979), uint16(22121), uint16(24345), uint16(26646), uint16(29021), uint16(31454), uint16(33927),
 16788  		uint16(36438), uint16(38982), uint16(41538), uint16(44068), uint16(46532), uint16(48904), uint16(51160), uint16(53265),
 16789  		uint16(55184), uint16(56904), uint16(58422), uint16(59739), uint16(60858), uint16(61793), uint16(62568), uint16(63210),
 16790  		uint16(63738), uint16(64165), uint16(64504), uint16(64769), uint16(64976), uint16(65133), uint16(65249), uint16(65330),
 16791  		uint16(65386), uint16(65424), uint16(65451), uint16(65471), uint16(65487), uint16(65501), uint16(65513), uint16(65524),
 16792  		uint16(65535),
 16793  	},
 16794  	{
 16795  		uint16(0), uint16(214), uint16(581), uint16(1261), uint16(2376), uint16(3920), uint16(5742), uint16(7632),
 16796  		uint16(9449), uint16(11157), uint16(12780), uint16(14352), uint16(15897), uint16(17427), uint16(18949), uint16(20462),
 16797  		uint16(21957), uint16(23430), uint16(24889), uint16(26342), uint16(27780), uint16(29191), uint16(30575), uint16(31952),
 16798  		uint16(33345), uint16(34763), uint16(36200), uint16(37642), uint16(39083), uint16(40519), uint16(41930), uint16(43291),
 16799  		uint16(44602), uint16(45885), uint16(47154), uint16(48402), uint16(49619), uint16(50805), uint16(51959), uint16(53069),
 16800  		uint16(54127), uint16(55140), uint16(56128), uint16(57101), uint16(58056), uint16(58979), uint16(59859), uint16(60692),
 16801  		uint16(61468), uint16(62177), uint16(62812), uint16(63368), uint16(63845), uint16(64242), uint16(64563), uint16(64818),
 16802  		uint16(65023), uint16(65184), uint16(65306), uint16(65391), uint16(65447), uint16(65482), uint16(65505), uint16(65521),
 16803  		uint16(65535),
 16804  	},
 16805  } /* SKP_Silk_tables_gain.c:35:18 */
 16806  
 16807  var SKP_Silk_gain_CDF_offset int32 = 32 /* SKP_Silk_tables_gain.c:61:15 */
 16808  
 16809  var SKP_Silk_delta_gain_CDF = [46]uint16{
 16810  	uint16(0), uint16(2358), uint16(3856), uint16(7023), uint16(15376), uint16(53058), uint16(59135), uint16(61555),
 16811  	uint16(62784), uint16(63498), uint16(63949), uint16(64265), uint16(64478), uint16(64647), uint16(64783), uint16(64894),
 16812  	uint16(64986), uint16(65052), uint16(65113), uint16(65169), uint16(65213), uint16(65252), uint16(65284), uint16(65314),
 16813  	uint16(65338), uint16(65359), uint16(65377), uint16(65392), uint16(65403), uint16(65415), uint16(65424), uint16(65432),
 16814  	uint16(65440), uint16(65448), uint16(65455), uint16(65462), uint16(65470), uint16(65477), uint16(65484), uint16(65491),
 16815  	uint16(65499), uint16(65506), uint16(65513), uint16(65521), uint16(65528), uint16(65535),
 16816  } /* SKP_Silk_tables_gain.c:64:18 */
 16817  
 16818  var SKP_Silk_delta_gain_CDF_offset int32 = 5 /* SKP_Silk_tables_gain.c:73:15 */
 16819  
 16820  var SKP_Silk_LTP_per_index_CDF = [4]uint16{
 16821  	uint16(0), uint16(20992), uint16(40788), uint16(65535),
 16822  } /* SKP_Silk_tables_LTP.c:30:18 */
 16823  
 16824  var SKP_Silk_LTP_per_index_CDF_offset int32 = 1 /* SKP_Silk_tables_LTP.c:34:15 */
 16825  
 16826  var SKP_Silk_LTP_gain_CDF_0 = [11]uint16{
 16827  	uint16(0), uint16(49380), uint16(54463), uint16(56494), uint16(58437), uint16(60101), uint16(61683), uint16(62985),
 16828  	uint16(64066), uint16(64823), uint16(65535),
 16829  } /* SKP_Silk_tables_LTP.c:37:18 */
 16830  
 16831  var SKP_Silk_LTP_gain_CDF_1 = [21]uint16{
 16832  	uint16(0), uint16(25290), uint16(30654), uint16(35710), uint16(40386), uint16(42937), uint16(45250), uint16(47459),
 16833  	uint16(49411), uint16(51348), uint16(52974), uint16(54517), uint16(55976), uint16(57423), uint16(58865), uint16(60285),
 16834  	uint16(61667), uint16(62895), uint16(63827), uint16(64724), uint16(65535),
 16835  } /* SKP_Silk_tables_LTP.c:42:18 */
 16836  
 16837  var SKP_Silk_LTP_gain_CDF_2 = [41]uint16{
 16838  	uint16(0), uint16(4958), uint16(9439), uint16(13581), uint16(17638), uint16(21651), uint16(25015), uint16(28025),
 16839  	uint16(30287), uint16(32406), uint16(34330), uint16(36240), uint16(38130), uint16(39790), uint16(41281), uint16(42764),
 16840  	uint16(44229), uint16(45676), uint16(47081), uint16(48431), uint16(49675), uint16(50849), uint16(51932), uint16(52966),
 16841  	uint16(53957), uint16(54936), uint16(55869), uint16(56789), uint16(57708), uint16(58504), uint16(59285), uint16(60043),
 16842  	uint16(60796), uint16(61542), uint16(62218), uint16(62871), uint16(63483), uint16(64076), uint16(64583), uint16(65062),
 16843  	uint16(65535),
 16844  } /* SKP_Silk_tables_LTP.c:48:18 */
 16845  
 16846  var SKP_Silk_LTP_gain_CDF_offsets = [3]int32{
 16847  	1, 3, 10,
 16848  } /* SKP_Silk_tables_LTP.c:57:15 */
 16849  
 16850  var SKP_Silk_LTP_gain_middle_avg_RD_Q14 int32 = 11010 /* SKP_Silk_tables_LTP.c:61:17 */
 16851  
 16852  var SKP_Silk_LTP_gain_BITS_Q6_0 = [10]int16{
 16853  	int16(26), int16(236), int16(321), int16(325), int16(339), int16(344), int16(362), int16(379),
 16854  	int16(412), int16(418),
 16855  } /* SKP_Silk_tables_LTP.c:63:17 */
 16856  
 16857  var SKP_Silk_LTP_gain_BITS_Q6_1 = [20]int16{
 16858  	int16(88), int16(231), int16(237), int16(244), int16(300), int16(309), int16(313), int16(324),
 16859  	int16(325), int16(341), int16(346), int16(351), int16(352), int16(352), int16(354), int16(356),
 16860  	int16(367), int16(393), int16(396), int16(406),
 16861  } /* SKP_Silk_tables_LTP.c:68:17 */
 16862  
 16863  var SKP_Silk_LTP_gain_BITS_Q6_2 = [40]int16{
 16864  	int16(238), int16(248), int16(255), int16(257), int16(258), int16(274), int16(284), int16(311),
 16865  	int16(317), int16(326), int16(326), int16(327), int16(339), int16(349), int16(350), int16(351),
 16866  	int16(352), int16(355), int16(358), int16(366), int16(371), int16(379), int16(383), int16(387),
 16867  	int16(388), int16(393), int16(394), int16(394), int16(407), int16(409), int16(412), int16(412),
 16868  	int16(413), int16(422), int16(426), int16(432), int16(434), int16(449), int16(454), int16(455),
 16869  } /* SKP_Silk_tables_LTP.c:74:17 */
 16870  
 16871  var SKP_Silk_LTP_gain_CDF_ptrs = [3]uintptr{
 16872  	0,
 16873  	0,
 16874  	0,
 16875  } /* SKP_Silk_tables_LTP.c:82:18 */
 16876  
 16877  var SKP_Silk_LTP_gain_BITS_Q6_ptrs = [3]uintptr{
 16878  	0,
 16879  	0,
 16880  	0,
 16881  } /* SKP_Silk_tables_LTP.c:88:17 */
 16882  
 16883  var SKP_Silk_LTP_gain_vq_0_Q14 = [10][5]int16{
 16884  	{
 16885  		int16(594), int16(984), int16(2840), int16(1021), int16(669),
 16886  	},
 16887  	{
 16888  		int16(10), int16(35), int16(304), int16(-1), int16(23),
 16889  	},
 16890  	{
 16891  		int16(-694), int16(1923), int16(4603), int16(2975), int16(2335),
 16892  	},
 16893  	{
 16894  		int16(2437), int16(3176), int16(3778), int16(1940), int16(481),
 16895  	},
 16896  	{
 16897  		int16(214), int16(-46), int16(7870), int16(4406), int16(-521),
 16898  	},
 16899  	{
 16900  		int16(-896), int16(4818), int16(8501), int16(1623), int16(-887),
 16901  	},
 16902  	{
 16903  		int16(-696), int16(3178), int16(6480), int16(-302), int16(1081),
 16904  	},
 16905  	{
 16906  		int16(517), int16(599), int16(1002), int16(567), int16(560),
 16907  	},
 16908  	{
 16909  		int16(-2075), int16(-834), int16(4712), int16(-340), int16(896),
 16910  	},
 16911  	{
 16912  		int16(1435), int16(-644), int16(3993), int16(-612), int16(-2063),
 16913  	},
 16914  } /* SKP_Silk_tables_LTP.c:94:17 */
 16915  
 16916  var SKP_Silk_LTP_gain_vq_1_Q14 = [20][5]int16{
 16917  	{
 16918  		int16(1655), int16(2918), int16(5001), int16(3010), int16(1775),
 16919  	},
 16920  	{
 16921  		int16(113), int16(198), int16(856), int16(176), int16(178),
 16922  	},
 16923  	{
 16924  		int16(-843), int16(2479), int16(7858), int16(5371), int16(574),
 16925  	},
 16926  	{
 16927  		int16(59), int16(5356), int16(7648), int16(2850), int16(-315),
 16928  	},
 16929  	{
 16930  		int16(3840), int16(4851), int16(6527), int16(1583), int16(-1233),
 16931  	},
 16932  	{
 16933  		int16(1620), int16(1760), int16(2330), int16(1876), int16(2045),
 16934  	},
 16935  	{
 16936  		int16(-545), int16(1854), int16(11792), int16(1547), int16(-307),
 16937  	},
 16938  	{
 16939  		int16(-604), int16(689), int16(5369), int16(5074), int16(4265),
 16940  	},
 16941  	{
 16942  		int16(521), int16(-1331), int16(9829), int16(6209), int16(-1211),
 16943  	},
 16944  	{
 16945  		int16(-1315), int16(6747), int16(9929), int16(-1410), int16(546),
 16946  	},
 16947  	{
 16948  		int16(117), int16(-144), int16(2810), int16(1649), int16(5240),
 16949  	},
 16950  	{
 16951  		int16(5392), int16(3476), int16(2425), int16(-38), int16(633),
 16952  	},
 16953  	{
 16954  		int16(14), int16(-449), int16(5274), int16(3547), int16(-171),
 16955  	},
 16956  	{
 16957  		int16(-98), int16(395), int16(9114), int16(1676), int16(844),
 16958  	},
 16959  	{
 16960  		int16(-908), int16(3843), int16(8861), int16(-957), int16(1474),
 16961  	},
 16962  	{
 16963  		int16(396), int16(6747), int16(5379), int16(-329), int16(1269),
 16964  	},
 16965  	{
 16966  		int16(-335), int16(2830), int16(4281), int16(270), int16(-54),
 16967  	},
 16968  	{
 16969  		int16(1502), int16(5609), int16(8958), int16(6045), int16(2059),
 16970  	},
 16971  	{
 16972  		int16(-370), int16(479), int16(5267), int16(5726), int16(1174),
 16973  	},
 16974  	{
 16975  		int16(5237), int16(-1144), int16(6510), int16(455), int16(512),
 16976  	},
 16977  } /* SKP_Silk_tables_LTP.c:128:17 */
 16978  
 16979  var SKP_Silk_LTP_gain_vq_2_Q14 = [40][5]int16{
 16980  	{
 16981  		int16(-278), int16(415), int16(9345), int16(7106), int16(-431),
 16982  	},
 16983  	{
 16984  		int16(-1006), int16(3863), int16(9524), int16(4724), int16(-871),
 16985  	},
 16986  	{
 16987  		int16(-954), int16(4624), int16(11722), int16(973), int16(-300),
 16988  	},
 16989  	{
 16990  		int16(-117), int16(7066), int16(8331), int16(1959), int16(-901),
 16991  	},
 16992  	{
 16993  		int16(593), int16(3412), int16(6070), int16(4914), int16(1567),
 16994  	},
 16995  	{
 16996  		int16(54), int16(-51), int16(12618), int16(4228), int16(-844),
 16997  	},
 16998  	{
 16999  		int16(3157), int16(4822), int16(5229), int16(2313), int16(717),
 17000  	},
 17001  	{
 17002  		int16(-244), int16(1161), int16(14198), int16(779), int16(69),
 17003  	},
 17004  	{
 17005  		int16(-1218), int16(5603), int16(12894), int16(-2301), int16(1001),
 17006  	},
 17007  	{
 17008  		int16(-132), int16(3960), int16(9526), int16(577), int16(1806),
 17009  	},
 17010  	{
 17011  		int16(-1633), int16(8815), int16(10484), int16(-2452), int16(895),
 17012  	},
 17013  	{
 17014  		int16(235), int16(450), int16(1243), int16(667), int16(437),
 17015  	},
 17016  	{
 17017  		int16(959), int16(-2630), int16(10897), int16(8772), int16(-1852),
 17018  	},
 17019  	{
 17020  		int16(2420), int16(2046), int16(8893), int16(4427), int16(-1569),
 17021  	},
 17022  	{
 17023  		int16(23), int16(7091), int16(8356), int16(-1285), int16(1508),
 17024  	},
 17025  	{
 17026  		int16(-1133), int16(835), int16(7662), int16(6043), int16(2800),
 17027  	},
 17028  	{
 17029  		int16(439), int16(391), int16(11016), int16(2253), int16(1362),
 17030  	},
 17031  	{
 17032  		int16(-1020), int16(2876), int16(13436), int16(4015), int16(-3020),
 17033  	},
 17034  	{
 17035  		int16(1060), int16(-2690), int16(13512), int16(5565), int16(-1394),
 17036  	},
 17037  	{
 17038  		int16(-1420), int16(8007), int16(11421), int16(-152), int16(-1672),
 17039  	},
 17040  	{
 17041  		int16(-893), int16(2895), int16(15434), int16(-1490), int16(159),
 17042  	},
 17043  	{
 17044  		int16(-1054), int16(428), int16(12208), int16(8538), int16(-3344),
 17045  	},
 17046  	{
 17047  		int16(1772), int16(-1304), int16(7593), int16(6185), int16(561),
 17048  	},
 17049  	{
 17050  		int16(525), int16(-1207), int16(6659), int16(11151), int16(-1170),
 17051  	},
 17052  	{
 17053  		int16(439), int16(2667), int16(4743), int16(2359), int16(5515),
 17054  	},
 17055  	{
 17056  		int16(2951), int16(7432), int16(7909), int16(-230), int16(-1564),
 17057  	},
 17058  	{
 17059  		int16(-72), int16(2140), int16(5477), int16(1391), int16(1580),
 17060  	},
 17061  	{
 17062  		int16(476), int16(-1312), int16(15912), int16(2174), int16(-1027),
 17063  	},
 17064  	{
 17065  		int16(5737), int16(441), int16(2493), int16(2043), int16(2757),
 17066  	},
 17067  	{
 17068  		int16(228), int16(-43), int16(1803), int16(6663), int16(7064),
 17069  	},
 17070  	{
 17071  		int16(4596), int16(9182), int16(1917), int16(-200), int16(203),
 17072  	},
 17073  	{
 17074  		int16(-704), int16(12039), int16(5451), int16(-1188), int16(542),
 17075  	},
 17076  	{
 17077  		int16(1782), int16(-1040), int16(10078), int16(7513), int16(-2767),
 17078  	},
 17079  	{
 17080  		int16(-2626), int16(7747), int16(9019), int16(62), int16(1710),
 17081  	},
 17082  	{
 17083  		int16(235), int16(-233), int16(2954), int16(10921), int16(1947),
 17084  	},
 17085  	{
 17086  		int16(10854), int16(2814), int16(1232), int16(-111), int16(222),
 17087  	},
 17088  	{
 17089  		int16(2267), int16(2778), int16(12325), int16(156), int16(-1658),
 17090  	},
 17091  	{
 17092  		int16(-2950), int16(8095), int16(16330), int16(268), int16(-3626),
 17093  	},
 17094  	{
 17095  		int16(67), int16(2083), int16(7950), int16(-80), int16(-2432),
 17096  	},
 17097  	{
 17098  		int16(518), int16(-66), int16(1718), int16(415), int16(11435),
 17099  	},
 17100  } /* SKP_Silk_tables_LTP.c:192:17 */
 17101  
 17102  var SKP_Silk_LTP_vq_ptrs_Q14 = [3]uintptr{
 17103  	0,
 17104  	0,
 17105  	0,
 17106  } /* SKP_Silk_tables_LTP.c:316:17 */
 17107  
 17108  var SKP_Silk_LTP_vq_sizes = [3]int32{
 17109  	10, 20, 40,
 17110  } /* SKP_Silk_tables_LTP.c:322:15 */
 17111  
 17112  var SKP_Silk_NLSF_MSVQ_CB0_10_CDF = [126]uint16{
 17113  	uint16(0),
 17114  	uint16(2658),
 17115  	uint16(4420),
 17116  	uint16(6107),
 17117  	uint16(7757),
 17118  	uint16(9408),
 17119  	uint16(10955),
 17120  	uint16(12502),
 17121  	uint16(13983),
 17122  	uint16(15432),
 17123  	uint16(16882),
 17124  	uint16(18331),
 17125  	uint16(19750),
 17126  	uint16(21108),
 17127  	uint16(22409),
 17128  	uint16(23709),
 17129  	uint16(25010),
 17130  	uint16(26256),
 17131  	uint16(27501),
 17132  	uint16(28747),
 17133  	uint16(29965),
 17134  	uint16(31158),
 17135  	uint16(32351),
 17136  	uint16(33544),
 17137  	uint16(34736),
 17138  	uint16(35904),
 17139  	uint16(36997),
 17140  	uint16(38091),
 17141  	uint16(39185),
 17142  	uint16(40232),
 17143  	uint16(41280),
 17144  	uint16(42327),
 17145  	uint16(43308),
 17146  	uint16(44290),
 17147  	uint16(45271),
 17148  	uint16(46232),
 17149  	uint16(47192),
 17150  	uint16(48132),
 17151  	uint16(49032),
 17152  	uint16(49913),
 17153  	uint16(50775),
 17154  	uint16(51618),
 17155  	uint16(52462),
 17156  	uint16(53287),
 17157  	uint16(54095),
 17158  	uint16(54885),
 17159  	uint16(55675),
 17160  	uint16(56449),
 17161  	uint16(57222),
 17162  	uint16(57979),
 17163  	uint16(58688),
 17164  	uint16(59382),
 17165  	uint16(60076),
 17166  	uint16(60726),
 17167  	uint16(61363),
 17168  	uint16(61946),
 17169  	uint16(62505),
 17170  	uint16(63052),
 17171  	uint16(63543),
 17172  	uint16(63983),
 17173  	uint16(64396),
 17174  	uint16(64766),
 17175  	uint16(65023),
 17176  	uint16(65279),
 17177  	uint16(65535),
 17178  	uint16(0),
 17179  	uint16(4977),
 17180  	uint16(9542),
 17181  	uint16(14106),
 17182  	uint16(18671),
 17183  	uint16(23041),
 17184  	uint16(27319),
 17185  	uint16(31596),
 17186  	uint16(35873),
 17187  	uint16(39969),
 17188  	uint16(43891),
 17189  	uint16(47813),
 17190  	uint16(51652),
 17191  	uint16(55490),
 17192  	uint16(59009),
 17193  	uint16(62307),
 17194  	uint16(65535),
 17195  	uint16(0),
 17196  	uint16(8571),
 17197  	uint16(17142),
 17198  	uint16(25529),
 17199  	uint16(33917),
 17200  	uint16(42124),
 17201  	uint16(49984),
 17202  	uint16(57844),
 17203  	uint16(65535),
 17204  	uint16(0),
 17205  	uint16(8732),
 17206  	uint16(17463),
 17207  	uint16(25825),
 17208  	uint16(34007),
 17209  	uint16(42189),
 17210  	uint16(50196),
 17211  	uint16(58032),
 17212  	uint16(65535),
 17213  	uint16(0),
 17214  	uint16(8948),
 17215  	uint16(17704),
 17216  	uint16(25733),
 17217  	uint16(33762),
 17218  	uint16(41791),
 17219  	uint16(49821),
 17220  	uint16(57678),
 17221  	uint16(65535),
 17222  	uint16(0),
 17223  	uint16(4374),
 17224  	uint16(8655),
 17225  	uint16(12936),
 17226  	uint16(17125),
 17227  	uint16(21313),
 17228  	uint16(25413),
 17229  	uint16(29512),
 17230  	uint16(33611),
 17231  	uint16(37710),
 17232  	uint16(41809),
 17233  	uint16(45820),
 17234  	uint16(49832),
 17235  	uint16(53843),
 17236  	uint16(57768),
 17237  	uint16(61694),
 17238  	uint16(65535),
 17239  } /* SKP_Silk_tables_NLSF_CB0_10.c:38:18 */
 17240  
 17241  var SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr = [6]uintptr{
 17242  	0,
 17243  	0,
 17244  	0,
 17245  	0,
 17246  	0,
 17247  	0,
 17248  } /* SKP_Silk_tables_NLSF_CB0_10.c:168:18 */
 17249  
 17250  var SKP_Silk_NLSF_MSVQ_CB0_10_CDF_middle_idx = [6]int32{
 17251  	23,
 17252  	8,
 17253  	5,
 17254  	5,
 17255  	5,
 17256  	9,
 17257  } /* SKP_Silk_tables_NLSF_CB0_10.c:178:15 */
 17258  
 17259  var SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5 = [120]int16{
 17260  	int16(148), int16(167),
 17261  	int16(169), int16(170),
 17262  	int16(170), int16(173),
 17263  	int16(173), int16(175),
 17264  	int16(176), int16(176),
 17265  	int16(176), int16(177),
 17266  	int16(179), int16(181),
 17267  	int16(181), int16(181),
 17268  	int16(183), int16(183),
 17269  	int16(183), int16(184),
 17270  	int16(185), int16(185),
 17271  	int16(185), int16(185),
 17272  	int16(186), int16(189),
 17273  	int16(189), int16(189),
 17274  	int16(191), int16(191),
 17275  	int16(191), int16(194),
 17276  	int16(194), int16(194),
 17277  	int16(195), int16(195),
 17278  	int16(196), int16(198),
 17279  	int16(199), int16(200),
 17280  	int16(201), int16(201),
 17281  	int16(202), int16(203),
 17282  	int16(204), int16(204),
 17283  	int16(205), int16(205),
 17284  	int16(206), int16(209),
 17285  	int16(210), int16(210),
 17286  	int16(213), int16(214),
 17287  	int16(218), int16(220),
 17288  	int16(221), int16(226),
 17289  	int16(231), int16(234),
 17290  	int16(239), int16(256),
 17291  	int16(256), int16(256),
 17292  	int16(119), int16(123),
 17293  	int16(123), int16(123),
 17294  	int16(125), int16(126),
 17295  	int16(126), int16(126),
 17296  	int16(128), int16(130),
 17297  	int16(130), int16(131),
 17298  	int16(131), int16(135),
 17299  	int16(138), int16(139),
 17300  	int16(94), int16(94),
 17301  	int16(95), int16(95),
 17302  	int16(96), int16(98),
 17303  	int16(98), int16(99),
 17304  	int16(93), int16(93),
 17305  	int16(95), int16(96),
 17306  	int16(96), int16(97),
 17307  	int16(98), int16(100),
 17308  	int16(92), int16(93),
 17309  	int16(97), int16(97),
 17310  	int16(97), int16(97),
 17311  	int16(98), int16(98),
 17312  	int16(125), int16(126),
 17313  	int16(126), int16(127),
 17314  	int16(127), int16(128),
 17315  	int16(128), int16(128),
 17316  	int16(128), int16(128),
 17317  	int16(129), int16(129),
 17318  	int16(129), int16(130),
 17319  	int16(130), int16(131),
 17320  } /* SKP_Silk_tables_NLSF_CB0_10.c:188:17 */
 17321  
 17322  var SKP_Silk_NLSF_MSVQ_CB0_10_ndelta_min_Q15 = [11]int32{
 17323  	563,
 17324  	3,
 17325  	22,
 17326  	20,
 17327  	3,
 17328  	3,
 17329  	132,
 17330  	119,
 17331  	358,
 17332  	86,
 17333  	964,
 17334  } /* SKP_Silk_tables_NLSF_CB0_10.c:252:15 */
 17335  
 17336  var SKP_Silk_NLSF_MSVQ_CB0_10_Q15 = [1200]int16{
 17337  	int16(2210), int16(4023),
 17338  	int16(6981), int16(9260),
 17339  	int16(12573), int16(15687),
 17340  	int16(19207), int16(22383),
 17341  	int16(25981), int16(29142),
 17342  	int16(3285), int16(4172),
 17343  	int16(6116), int16(10856),
 17344  	int16(15289), int16(16826),
 17345  	int16(19701), int16(22010),
 17346  	int16(24721), int16(29313),
 17347  	int16(1554), int16(2511),
 17348  	int16(6577), int16(10337),
 17349  	int16(13837), int16(16511),
 17350  	int16(20086), int16(23214),
 17351  	int16(26480), int16(29464),
 17352  	int16(3062), int16(4017),
 17353  	int16(5771), int16(10037),
 17354  	int16(13365), int16(14952),
 17355  	int16(20140), int16(22891),
 17356  	int16(25229), int16(29603),
 17357  	int16(2085), int16(3457),
 17358  	int16(5934), int16(8718),
 17359  	int16(11501), int16(13670),
 17360  	int16(17997), int16(21817),
 17361  	int16(24935), int16(28745),
 17362  	int16(2776), int16(4093),
 17363  	int16(6421), int16(10413),
 17364  	int16(15111), int16(16806),
 17365  	int16(20825), int16(23826),
 17366  	int16(26308), int16(29411),
 17367  	int16(2717), int16(4034),
 17368  	int16(5697), int16(8463),
 17369  	int16(14301), int16(16354),
 17370  	int16(19007), int16(23413),
 17371  	int16(25812), int16(28506),
 17372  	int16(2872), int16(3702),
 17373  	int16(5881), int16(11034),
 17374  	int16(17141), int16(18879),
 17375  	int16(21146), int16(23451),
 17376  	int16(25817), int16(29600),
 17377  	int16(2999), int16(4015),
 17378  	int16(7357), int16(11219),
 17379  	int16(12866), int16(17307),
 17380  	int16(20081), int16(22644),
 17381  	int16(26774), int16(29107),
 17382  	int16(2942), int16(3866),
 17383  	int16(5918), int16(11915),
 17384  	int16(13909), int16(16072),
 17385  	int16(20453), int16(22279),
 17386  	int16(27310), int16(29826),
 17387  	int16(2271), int16(3527),
 17388  	int16(6606), int16(9729),
 17389  	int16(12943), int16(17382),
 17390  	int16(20224), int16(22345),
 17391  	int16(24602), int16(28290),
 17392  	int16(2207), int16(3310),
 17393  	int16(5844), int16(9339),
 17394  	int16(11141), int16(15651),
 17395  	int16(18576), int16(21177),
 17396  	int16(25551), int16(28228),
 17397  	int16(3963), int16(4975),
 17398  	int16(6901), int16(11588),
 17399  	int16(13466), int16(15577),
 17400  	int16(19231), int16(21368),
 17401  	int16(25510), int16(27759),
 17402  	int16(2749), int16(3549),
 17403  	int16(6966), int16(13808),
 17404  	int16(15653), int16(17645),
 17405  	int16(20090), int16(22599),
 17406  	int16(26467), int16(28537),
 17407  	int16(2126), int16(3504),
 17408  	int16(5109), int16(9954),
 17409  	int16(12550), int16(14620),
 17410  	int16(19703), int16(21687),
 17411  	int16(26457), int16(29106),
 17412  	int16(3966), int16(5745),
 17413  	int16(7442), int16(9757),
 17414  	int16(14468), int16(16404),
 17415  	int16(19135), int16(23048),
 17416  	int16(25375), int16(28391),
 17417  	int16(3197), int16(4751),
 17418  	int16(6451), int16(9298),
 17419  	int16(13038), int16(14874),
 17420  	int16(17962), int16(20627),
 17421  	int16(23835), int16(28464),
 17422  	int16(3195), int16(4081),
 17423  	int16(6499), int16(12252),
 17424  	int16(14289), int16(16040),
 17425  	int16(18357), int16(20730),
 17426  	int16(26980), int16(29309),
 17427  	int16(1533), int16(2471),
 17428  	int16(4486), int16(7796),
 17429  	int16(12332), int16(15758),
 17430  	int16(19567), int16(22298),
 17431  	int16(25673), int16(29051),
 17432  	int16(2002), int16(2971),
 17433  	int16(4985), int16(8083),
 17434  	int16(13181), int16(15435),
 17435  	int16(18237), int16(21517),
 17436  	int16(24595), int16(28351),
 17437  	int16(3808), int16(4925),
 17438  	int16(6710), int16(10201),
 17439  	int16(12011), int16(14300),
 17440  	int16(18457), int16(20391),
 17441  	int16(26525), int16(28956),
 17442  	int16(2281), int16(3418),
 17443  	int16(4979), int16(8726),
 17444  	int16(15964), int16(18104),
 17445  	int16(20250), int16(22771),
 17446  	int16(25286), int16(28954),
 17447  	int16(3051), int16(5479),
 17448  	int16(7290), int16(9848),
 17449  	int16(12744), int16(14503),
 17450  	int16(18665), int16(23684),
 17451  	int16(26065), int16(28947),
 17452  	int16(2364), int16(3565),
 17453  	int16(5502), int16(9621),
 17454  	int16(14922), int16(16621),
 17455  	int16(19005), int16(20996),
 17456  	int16(26310), int16(29302),
 17457  	int16(4093), int16(5212),
 17458  	int16(6833), int16(9880),
 17459  	int16(16303), int16(18286),
 17460  	int16(20571), int16(23614),
 17461  	int16(26067), int16(29128),
 17462  	int16(2941), int16(3996),
 17463  	int16(6038), int16(10638),
 17464  	int16(12668), int16(14451),
 17465  	int16(16798), int16(19392),
 17466  	int16(26051), int16(28517),
 17467  	int16(3863), int16(5212),
 17468  	int16(7019), int16(9468),
 17469  	int16(11039), int16(13214),
 17470  	int16(19942), int16(22344),
 17471  	int16(25126), int16(29539),
 17472  	int16(4615), int16(6172),
 17473  	int16(7853), int16(10252),
 17474  	int16(12611), int16(14445),
 17475  	int16(19719), int16(22441),
 17476  	int16(24922), int16(29341),
 17477  	int16(3566), int16(4512),
 17478  	int16(6985), int16(8684),
 17479  	int16(10544), int16(16097),
 17480  	int16(18058), int16(22475),
 17481  	int16(26066), int16(28167),
 17482  	int16(4481), int16(5489),
 17483  	int16(7432), int16(11414),
 17484  	int16(13191), int16(15225),
 17485  	int16(20161), int16(22258),
 17486  	int16(26484), int16(29716),
 17487  	int16(3320), int16(4320),
 17488  	int16(6621), int16(9867),
 17489  	int16(11581), int16(14034),
 17490  	int16(21168), int16(23210),
 17491  	int16(26588), int16(29903),
 17492  	int16(3794), int16(4689),
 17493  	int16(6916), int16(8655),
 17494  	int16(10143), int16(16144),
 17495  	int16(19568), int16(21588),
 17496  	int16(27557), int16(29593),
 17497  	int16(2446), int16(3276),
 17498  	int16(5918), int16(12643),
 17499  	int16(16601), int16(18013),
 17500  	int16(21126), int16(23175),
 17501  	int16(27300), int16(29634),
 17502  	int16(2450), int16(3522),
 17503  	int16(5437), int16(8560),
 17504  	int16(15285), int16(19911),
 17505  	int16(21826), int16(24097),
 17506  	int16(26567), int16(29078),
 17507  	int16(2580), int16(3796),
 17508  	int16(5580), int16(8338),
 17509  	int16(9969), int16(12675),
 17510  	int16(18907), int16(22753),
 17511  	int16(25450), int16(29292),
 17512  	int16(3325), int16(4312),
 17513  	int16(6241), int16(7709),
 17514  	int16(9164), int16(14452),
 17515  	int16(21665), int16(23797),
 17516  	int16(27096), int16(29857),
 17517  	int16(3338), int16(4163),
 17518  	int16(7738), int16(11114),
 17519  	int16(12668), int16(14753),
 17520  	int16(16931), int16(22736),
 17521  	int16(25671), int16(28093),
 17522  	int16(3840), int16(4755),
 17523  	int16(7755), int16(13471),
 17524  	int16(15338), int16(17180),
 17525  	int16(20077), int16(22353),
 17526  	int16(27181), int16(29743),
 17527  	int16(2504), int16(4079),
 17528  	int16(8351), int16(12118),
 17529  	int16(15046), int16(18595),
 17530  	int16(21684), int16(24704),
 17531  	int16(27519), int16(29937),
 17532  	int16(5234), int16(6342),
 17533  	int16(8267), int16(11821),
 17534  	int16(15155), int16(16760),
 17535  	int16(20667), int16(23488),
 17536  	int16(25949), int16(29307),
 17537  	int16(2681), int16(3562),
 17538  	int16(6028), int16(10827),
 17539  	int16(18458), int16(20458),
 17540  	int16(22303), int16(24701),
 17541  	int16(26912), int16(29956),
 17542  	int16(3374), int16(4528),
 17543  	int16(6230), int16(8256),
 17544  	int16(9513), int16(12730),
 17545  	int16(18666), int16(20720),
 17546  	int16(26007), int16(28425),
 17547  	int16(2731), int16(3629),
 17548  	int16(8320), int16(12450),
 17549  	int16(14112), int16(16431),
 17550  	int16(18548), int16(22098),
 17551  	int16(25329), int16(27718),
 17552  	int16(3481), int16(4401),
 17553  	int16(7321), int16(9319),
 17554  	int16(11062), int16(13093),
 17555  	int16(15121), int16(22315),
 17556  	int16(26331), int16(28740),
 17557  	int16(3577), int16(4945),
 17558  	int16(6669), int16(8792),
 17559  	int16(10299), int16(12645),
 17560  	int16(19505), int16(24766),
 17561  	int16(26996), int16(29634),
 17562  	int16(4058), int16(5060),
 17563  	int16(7288), int16(10190),
 17564  	int16(11724), int16(13936),
 17565  	int16(15849), int16(18539),
 17566  	int16(26701), int16(29845),
 17567  	int16(4262), int16(5390),
 17568  	int16(7057), int16(8982),
 17569  	int16(10187), int16(15264),
 17570  	int16(20480), int16(22340),
 17571  	int16(25958), int16(28072),
 17572  	int16(3404), int16(4329),
 17573  	int16(6629), int16(7946),
 17574  	int16(10121), int16(17165),
 17575  	int16(19640), int16(22244),
 17576  	int16(25062), int16(27472),
 17577  	int16(3157), int16(4168),
 17578  	int16(6195), int16(9319),
 17579  	int16(10771), int16(13325),
 17580  	int16(15416), int16(19816),
 17581  	int16(24672), int16(27634),
 17582  	int16(2503), int16(3473),
 17583  	int16(5130), int16(6767),
 17584  	int16(8571), int16(14902),
 17585  	int16(19033), int16(21926),
 17586  	int16(26065), int16(28728),
 17587  	int16(4133), int16(5102),
 17588  	int16(7553), int16(10054),
 17589  	int16(11757), int16(14924),
 17590  	int16(17435), int16(20186),
 17591  	int16(23987), int16(26272),
 17592  	int16(4972), int16(6139),
 17593  	int16(7894), int16(9633),
 17594  	int16(11320), int16(14295),
 17595  	int16(21737), int16(24306),
 17596  	int16(26919), int16(29907),
 17597  	int16(2958), int16(3816),
 17598  	int16(6851), int16(9204),
 17599  	int16(10895), int16(18052),
 17600  	int16(20791), int16(23338),
 17601  	int16(27556), int16(29609),
 17602  	int16(5234), int16(6028),
 17603  	int16(8034), int16(10154),
 17604  	int16(11242), int16(14789),
 17605  	int16(18948), int16(20966),
 17606  	int16(26585), int16(29127),
 17607  	int16(5241), int16(6838),
 17608  	int16(10526), int16(12819),
 17609  	int16(14681), int16(17328),
 17610  	int16(19928), int16(22336),
 17611  	int16(26193), int16(28697),
 17612  	int16(3412), int16(4251),
 17613  	int16(5988), int16(7094),
 17614  	int16(9907), int16(18243),
 17615  	int16(21669), int16(23777),
 17616  	int16(26969), int16(29087),
 17617  	int16(2470), int16(3217),
 17618  	int16(7797), int16(15296),
 17619  	int16(17365), int16(19135),
 17620  	int16(21979), int16(24256),
 17621  	int16(27322), int16(29442),
 17622  	int16(4939), int16(5804),
 17623  	int16(8145), int16(11809),
 17624  	int16(13873), int16(15598),
 17625  	int16(17234), int16(19423),
 17626  	int16(26476), int16(29645),
 17627  	int16(5051), int16(6167),
 17628  	int16(8223), int16(9655),
 17629  	int16(12159), int16(17995),
 17630  	int16(20464), int16(22832),
 17631  	int16(26616), int16(28462),
 17632  	int16(4987), int16(5907),
 17633  	int16(9319), int16(11245),
 17634  	int16(13132), int16(15024),
 17635  	int16(17485), int16(22687),
 17636  	int16(26011), int16(28273),
 17637  	int16(5137), int16(6884),
 17638  	int16(11025), int16(14950),
 17639  	int16(17191), int16(19425),
 17640  	int16(21807), int16(24393),
 17641  	int16(26938), int16(29288),
 17642  	int16(7057), int16(7884),
 17643  	int16(9528), int16(10483),
 17644  	int16(10960), int16(14811),
 17645  	int16(19070), int16(21675),
 17646  	int16(25645), int16(28019),
 17647  	int16(6759), int16(7160),
 17648  	int16(8546), int16(11779),
 17649  	int16(12295), int16(13023),
 17650  	int16(16627), int16(21099),
 17651  	int16(24697), int16(28287),
 17652  	int16(3863), int16(9762),
 17653  	int16(11068), int16(11445),
 17654  	int16(12049), int16(13960),
 17655  	int16(18085), int16(21507),
 17656  	int16(25224), int16(28997),
 17657  	int16(397), int16(335),
 17658  	int16(651), int16(1168),
 17659  	int16(640), int16(765),
 17660  	int16(465), int16(331),
 17661  	int16(214), int16(-194),
 17662  	int16(-578), int16(-647),
 17663  	int16(-657), int16(750),
 17664  	int16(564), int16(613),
 17665  	int16(549), int16(630),
 17666  	int16(304), int16(-52),
 17667  	int16(828), int16(922),
 17668  	int16(443), int16(111),
 17669  	int16(138), int16(124),
 17670  	int16(169), int16(14),
 17671  	int16(144), int16(83),
 17672  	int16(132), int16(58),
 17673  	int16(-413), int16(-752),
 17674  	int16(869), int16(336),
 17675  	int16(385), int16(69),
 17676  	int16(56), int16(830),
 17677  	int16(-227), int16(-266),
 17678  	int16(-368), int16(-440),
 17679  	int16(-1195), int16(163),
 17680  	int16(126), int16(-228),
 17681  	int16(802), int16(156),
 17682  	int16(188), int16(120),
 17683  	int16(376), int16(59),
 17684  	int16(-358), int16(-558),
 17685  	int16(-1326), int16(-254),
 17686  	int16(-202), int16(-789),
 17687  	int16(296), int16(92),
 17688  	int16(-70), int16(-129),
 17689  	int16(-718), int16(-1135),
 17690  	int16(292), int16(-29),
 17691  	int16(-631), int16(487),
 17692  	int16(-157), int16(-153),
 17693  	int16(-279), int16(2),
 17694  	int16(-419), int16(-342),
 17695  	int16(-34), int16(-514),
 17696  	int16(-799), int16(-1571),
 17697  	int16(-687), int16(-609),
 17698  	int16(-546), int16(-130),
 17699  	int16(-215), int16(-252),
 17700  	int16(-446), int16(-574),
 17701  	int16(-1337), int16(207),
 17702  	int16(-72), int16(32),
 17703  	int16(103), int16(-642),
 17704  	int16(942), int16(733),
 17705  	int16(187), int16(29),
 17706  	int16(-211), int16(-814),
 17707  	int16(143), int16(225),
 17708  	int16(20), int16(24),
 17709  	int16(-268), int16(-377),
 17710  	int16(1623), int16(1133),
 17711  	int16(667), int16(164),
 17712  	int16(307), int16(366),
 17713  	int16(187), int16(34),
 17714  	int16(62), int16(-313),
 17715  	int16(-832), int16(-1482),
 17716  	int16(-1181), int16(483),
 17717  	int16(-42), int16(-39),
 17718  	int16(-450), int16(-1406),
 17719  	int16(-587), int16(-52),
 17720  	int16(-760), int16(334),
 17721  	int16(98), int16(-60),
 17722  	int16(-500), int16(-488),
 17723  	int16(-1058), int16(299),
 17724  	int16(131), int16(-250),
 17725  	int16(-251), int16(-703),
 17726  	int16(1037), int16(568),
 17727  	int16(-413), int16(-265),
 17728  	int16(1687), int16(573),
 17729  	int16(345), int16(323),
 17730  	int16(98), int16(61),
 17731  	int16(-102), int16(31),
 17732  	int16(135), int16(149),
 17733  	int16(617), int16(365),
 17734  	int16(-39), int16(34),
 17735  	int16(-611), int16(1201),
 17736  	int16(1421), int16(736),
 17737  	int16(-414), int16(-393),
 17738  	int16(-492), int16(-343),
 17739  	int16(-316), int16(-532),
 17740  	int16(528), int16(172),
 17741  	int16(90), int16(322),
 17742  	int16(-294), int16(-319),
 17743  	int16(-541), int16(503),
 17744  	int16(639), int16(401),
 17745  	int16(1), int16(-149),
 17746  	int16(-73), int16(-167),
 17747  	int16(150), int16(118),
 17748  	int16(308), int16(218),
 17749  	int16(121), int16(195),
 17750  	int16(-143), int16(-261),
 17751  	int16(-1013), int16(-802),
 17752  	int16(387), int16(436),
 17753  	int16(130), int16(-427),
 17754  	int16(-448), int16(-681),
 17755  	int16(123), int16(-87),
 17756  	int16(-251), int16(-113),
 17757  	int16(274), int16(310),
 17758  	int16(445), int16(501),
 17759  	int16(354), int16(272),
 17760  	int16(141), int16(-285),
 17761  	int16(569), int16(656),
 17762  	int16(37), int16(-49),
 17763  	int16(251), int16(-386),
 17764  	int16(-263), int16(1122),
 17765  	int16(604), int16(606),
 17766  	int16(336), int16(95),
 17767  	int16(34), int16(0),
 17768  	int16(85), int16(180),
 17769  	int16(207), int16(-367),
 17770  	int16(-622), int16(1070),
 17771  	int16(-6), int16(-79),
 17772  	int16(-160), int16(-92),
 17773  	int16(-137), int16(-276),
 17774  	int16(-323), int16(-371),
 17775  	int16(-696), int16(-1036),
 17776  	int16(407), int16(102),
 17777  	int16(-86), int16(-214),
 17778  	int16(-482), int16(-647),
 17779  	int16(-28), int16(-291),
 17780  	int16(-97), int16(-180),
 17781  	int16(-250), int16(-435),
 17782  	int16(-18), int16(-76),
 17783  	int16(-332), int16(410),
 17784  	int16(407), int16(168),
 17785  	int16(539), int16(411),
 17786  	int16(254), int16(111),
 17787  	int16(58), int16(-145),
 17788  	int16(200), int16(30),
 17789  	int16(187), int16(116),
 17790  	int16(131), int16(-367),
 17791  	int16(-475), int16(781),
 17792  	int16(-559), int16(561),
 17793  	int16(195), int16(-115),
 17794  	int16(8), int16(-168),
 17795  	int16(30), int16(55),
 17796  	int16(-122), int16(131),
 17797  	int16(82), int16(-5),
 17798  	int16(-273), int16(-50),
 17799  	int16(-632), int16(668),
 17800  	int16(4), int16(32),
 17801  	int16(-26), int16(-279),
 17802  	int16(315), int16(165),
 17803  	int16(197), int16(377),
 17804  	int16(155), int16(-41),
 17805  	int16(-138), int16(-324),
 17806  	int16(-109), int16(-617),
 17807  	int16(360), int16(98),
 17808  	int16(-53), int16(-319),
 17809  	int16(-114), int16(-245),
 17810  	int16(-82), int16(507),
 17811  	int16(468), int16(263),
 17812  	int16(-137), int16(-389),
 17813  	int16(652), int16(354),
 17814  	int16(-18), int16(-227),
 17815  	int16(-462), int16(-135),
 17816  	int16(317), int16(53),
 17817  	int16(-16), int16(66),
 17818  	int16(-72), int16(-126),
 17819  	int16(-356), int16(-347),
 17820  	int16(-328), int16(-72),
 17821  	int16(-337), int16(324),
 17822  	int16(152), int16(349),
 17823  	int16(169), int16(-196),
 17824  	int16(179), int16(254),
 17825  	int16(260), int16(325),
 17826  	int16(-74), int16(-80),
 17827  	int16(75), int16(-31),
 17828  	int16(270), int16(275),
 17829  	int16(87), int16(278),
 17830  	int16(-446), int16(-301),
 17831  	int16(309), int16(71),
 17832  	int16(-25), int16(-242),
 17833  	int16(516), int16(161),
 17834  	int16(-162), int16(-83),
 17835  	int16(329), int16(230),
 17836  	int16(-311), int16(-259),
 17837  	int16(177), int16(-26),
 17838  	int16(-462), int16(89),
 17839  	int16(257), int16(6),
 17840  	int16(-130), int16(-93),
 17841  	int16(-456), int16(-317),
 17842  	int16(-221), int16(-206),
 17843  	int16(-417), int16(-182),
 17844  	int16(-74), int16(234),
 17845  	int16(48), int16(261),
 17846  	int16(359), int16(231),
 17847  	int16(258), int16(85),
 17848  	int16(-282), int16(252),
 17849  	int16(-147), int16(-222),
 17850  	int16(251), int16(-207),
 17851  	int16(443), int16(123),
 17852  	int16(-417), int16(-36),
 17853  	int16(273), int16(-241),
 17854  	int16(240), int16(-112),
 17855  	int16(44), int16(-167),
 17856  	int16(126), int16(-124),
 17857  	int16(-77), int16(58),
 17858  	int16(-401), int16(333),
 17859  	int16(-118), int16(82),
 17860  	int16(126), int16(151),
 17861  	int16(-433), int16(359),
 17862  	int16(-130), int16(-102),
 17863  	int16(131), int16(-244),
 17864  	int16(86), int16(85),
 17865  	int16(-462), int16(414),
 17866  	int16(-240), int16(16),
 17867  	int16(145), int16(28),
 17868  	int16(-205), int16(-481),
 17869  	int16(373), int16(293),
 17870  	int16(-72), int16(-174),
 17871  	int16(62), int16(259),
 17872  	int16(-8), int16(-18),
 17873  	int16(362), int16(233),
 17874  	int16(185), int16(43),
 17875  	int16(278), int16(27),
 17876  	int16(193), int16(570),
 17877  	int16(-248), int16(189),
 17878  	int16(92), int16(31),
 17879  	int16(-275), int16(-3),
 17880  	int16(243), int16(176),
 17881  	int16(438), int16(209),
 17882  	int16(206), int16(-51),
 17883  	int16(79), int16(109),
 17884  	int16(168), int16(-185),
 17885  	int16(-308), int16(-68),
 17886  	int16(-618), int16(385),
 17887  	int16(-310), int16(-108),
 17888  	int16(-164), int16(165),
 17889  	int16(61), int16(-152),
 17890  	int16(-101), int16(-412),
 17891  	int16(-268), int16(-257),
 17892  	int16(-40), int16(-20),
 17893  	int16(-28), int16(-158),
 17894  	int16(-301), int16(271),
 17895  	int16(380), int16(-338),
 17896  	int16(-367), int16(-132),
 17897  	int16(64), int16(114),
 17898  	int16(-131), int16(-225),
 17899  	int16(-156), int16(-260),
 17900  	int16(-63), int16(-116),
 17901  	int16(155), int16(-586),
 17902  	int16(-202), int16(254),
 17903  	int16(-287), int16(178),
 17904  	int16(227), int16(-106),
 17905  	int16(-294), int16(164),
 17906  	int16(298), int16(-100),
 17907  	int16(185), int16(317),
 17908  	int16(193), int16(-45),
 17909  	int16(28), int16(80),
 17910  	int16(-87), int16(-433),
 17911  	int16(22), int16(-48),
 17912  	int16(48), int16(-237),
 17913  	int16(-229), int16(-139),
 17914  	int16(120), int16(-364),
 17915  	int16(268), int16(-136),
 17916  	int16(396), int16(125),
 17917  	int16(130), int16(-89),
 17918  	int16(-272), int16(118),
 17919  	int16(-256), int16(-68),
 17920  	int16(-451), int16(488),
 17921  	int16(143), int16(-165),
 17922  	int16(-48), int16(-190),
 17923  	int16(106), int16(219),
 17924  	int16(47), int16(435),
 17925  	int16(245), int16(97),
 17926  	int16(75), int16(-418),
 17927  	int16(121), int16(-187),
 17928  	int16(570), int16(-200),
 17929  	int16(-351), int16(225),
 17930  	int16(-21), int16(-217),
 17931  	int16(234), int16(-111),
 17932  	int16(194), int16(14),
 17933  	int16(242), int16(118),
 17934  	int16(140), int16(-397),
 17935  	int16(355), int16(361),
 17936  	int16(-45), int16(-195),
 17937  } /* SKP_Silk_tables_NLSF_CB0_10.c:267:17 */
 17938  
 17939  var SKP_Silk_NLSF_CB0_10_Stage_info = [6]SKP_Silk_NLSF_CBS{
 17940  	{FnVectors: 64, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17941  	{FnVectors: 16, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17942  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17943  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17944  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17945  	{FnVectors: 16, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17946  } /* SKP_Silk_tables_NLSF_CB0_10.c:871:25 */
 17947  
 17948  var SKP_Silk_NLSF_CB0_10 = SKP_Silk_NLSF_CB_struct{
 17949  	FnStages:       6,
 17950  	FCBStages:      0,
 17951  	FNDeltaMin_Q15: 0,
 17952  	FCDF:           0,
 17953  	FStartPtr:      0,
 17954  	FMiddleIx:      0,
 17955  } /* SKP_Silk_tables_NLSF_CB0_10.c:881:31 */
 17956  
 17957  var SKP_Silk_NLSF_MSVQ_CB0_16_CDF = [226]uint16{
 17958  	uint16(0),
 17959  	uint16(1449),
 17960  	uint16(2749),
 17961  	uint16(4022),
 17962  	uint16(5267),
 17963  	uint16(6434),
 17964  	uint16(7600),
 17965  	uint16(8647),
 17966  	uint16(9695),
 17967  	uint16(10742),
 17968  	uint16(11681),
 17969  	uint16(12601),
 17970  	uint16(13444),
 17971  	uint16(14251),
 17972  	uint16(15008),
 17973  	uint16(15764),
 17974  	uint16(16521),
 17975  	uint16(17261),
 17976  	uint16(18002),
 17977  	uint16(18710),
 17978  	uint16(19419),
 17979  	uint16(20128),
 17980  	uint16(20837),
 17981  	uint16(21531),
 17982  	uint16(22225),
 17983  	uint16(22919),
 17984  	uint16(23598),
 17985  	uint16(24277),
 17986  	uint16(24956),
 17987  	uint16(25620),
 17988  	uint16(26256),
 17989  	uint16(26865),
 17990  	uint16(27475),
 17991  	uint16(28071),
 17992  	uint16(28667),
 17993  	uint16(29263),
 17994  	uint16(29859),
 17995  	uint16(30443),
 17996  	uint16(31026),
 17997  	uint16(31597),
 17998  	uint16(32168),
 17999  	uint16(32727),
 18000  	uint16(33273),
 18001  	uint16(33808),
 18002  	uint16(34332),
 18003  	uint16(34855),
 18004  	uint16(35379),
 18005  	uint16(35902),
 18006  	uint16(36415),
 18007  	uint16(36927),
 18008  	uint16(37439),
 18009  	uint16(37941),
 18010  	uint16(38442),
 18011  	uint16(38932),
 18012  	uint16(39423),
 18013  	uint16(39914),
 18014  	uint16(40404),
 18015  	uint16(40884),
 18016  	uint16(41364),
 18017  	uint16(41844),
 18018  	uint16(42324),
 18019  	uint16(42805),
 18020  	uint16(43285),
 18021  	uint16(43754),
 18022  	uint16(44224),
 18023  	uint16(44694),
 18024  	uint16(45164),
 18025  	uint16(45623),
 18026  	uint16(46083),
 18027  	uint16(46543),
 18028  	uint16(46993),
 18029  	uint16(47443),
 18030  	uint16(47892),
 18031  	uint16(48333),
 18032  	uint16(48773),
 18033  	uint16(49213),
 18034  	uint16(49653),
 18035  	uint16(50084),
 18036  	uint16(50515),
 18037  	uint16(50946),
 18038  	uint16(51377),
 18039  	uint16(51798),
 18040  	uint16(52211),
 18041  	uint16(52614),
 18042  	uint16(53018),
 18043  	uint16(53422),
 18044  	uint16(53817),
 18045  	uint16(54212),
 18046  	uint16(54607),
 18047  	uint16(55002),
 18048  	uint16(55388),
 18049  	uint16(55775),
 18050  	uint16(56162),
 18051  	uint16(56548),
 18052  	uint16(56910),
 18053  	uint16(57273),
 18054  	uint16(57635),
 18055  	uint16(57997),
 18056  	uint16(58352),
 18057  	uint16(58698),
 18058  	uint16(59038),
 18059  	uint16(59370),
 18060  	uint16(59702),
 18061  	uint16(60014),
 18062  	uint16(60325),
 18063  	uint16(60630),
 18064  	uint16(60934),
 18065  	uint16(61239),
 18066  	uint16(61537),
 18067  	uint16(61822),
 18068  	uint16(62084),
 18069  	uint16(62346),
 18070  	uint16(62602),
 18071  	uint16(62837),
 18072  	uint16(63072),
 18073  	uint16(63302),
 18074  	uint16(63517),
 18075  	uint16(63732),
 18076  	uint16(63939),
 18077  	uint16(64145),
 18078  	uint16(64342),
 18079  	uint16(64528),
 18080  	uint16(64701),
 18081  	uint16(64867),
 18082  	uint16(65023),
 18083  	uint16(65151),
 18084  	uint16(65279),
 18085  	uint16(65407),
 18086  	uint16(65535),
 18087  	uint16(0),
 18088  	uint16(5099),
 18089  	uint16(9982),
 18090  	uint16(14760),
 18091  	uint16(19538),
 18092  	uint16(24213),
 18093  	uint16(28595),
 18094  	uint16(32976),
 18095  	uint16(36994),
 18096  	uint16(41012),
 18097  	uint16(44944),
 18098  	uint16(48791),
 18099  	uint16(52557),
 18100  	uint16(56009),
 18101  	uint16(59388),
 18102  	uint16(62694),
 18103  	uint16(65535),
 18104  	uint16(0),
 18105  	uint16(9955),
 18106  	uint16(19697),
 18107  	uint16(28825),
 18108  	uint16(36842),
 18109  	uint16(44686),
 18110  	uint16(52198),
 18111  	uint16(58939),
 18112  	uint16(65535),
 18113  	uint16(0),
 18114  	uint16(8949),
 18115  	uint16(17335),
 18116  	uint16(25720),
 18117  	uint16(33926),
 18118  	uint16(41957),
 18119  	uint16(49987),
 18120  	uint16(57845),
 18121  	uint16(65535),
 18122  	uint16(0),
 18123  	uint16(9724),
 18124  	uint16(18642),
 18125  	uint16(26998),
 18126  	uint16(35355),
 18127  	uint16(43532),
 18128  	uint16(51534),
 18129  	uint16(59365),
 18130  	uint16(65535),
 18131  	uint16(0),
 18132  	uint16(8750),
 18133  	uint16(17499),
 18134  	uint16(26249),
 18135  	uint16(34448),
 18136  	uint16(42471),
 18137  	uint16(50494),
 18138  	uint16(58178),
 18139  	uint16(65535),
 18140  	uint16(0),
 18141  	uint16(8730),
 18142  	uint16(17273),
 18143  	uint16(25816),
 18144  	uint16(34176),
 18145  	uint16(42536),
 18146  	uint16(50203),
 18147  	uint16(57869),
 18148  	uint16(65535),
 18149  	uint16(0),
 18150  	uint16(8769),
 18151  	uint16(17538),
 18152  	uint16(26307),
 18153  	uint16(34525),
 18154  	uint16(42742),
 18155  	uint16(50784),
 18156  	uint16(58319),
 18157  	uint16(65535),
 18158  	uint16(0),
 18159  	uint16(8736),
 18160  	uint16(17101),
 18161  	uint16(25466),
 18162  	uint16(33653),
 18163  	uint16(41839),
 18164  	uint16(50025),
 18165  	uint16(57864),
 18166  	uint16(65535),
 18167  	uint16(0),
 18168  	uint16(4368),
 18169  	uint16(8735),
 18170  	uint16(12918),
 18171  	uint16(17100),
 18172  	uint16(21283),
 18173  	uint16(25465),
 18174  	uint16(29558),
 18175  	uint16(33651),
 18176  	uint16(37744),
 18177  	uint16(41836),
 18178  	uint16(45929),
 18179  	uint16(50022),
 18180  	uint16(54027),
 18181  	uint16(57947),
 18182  	uint16(61782),
 18183  	uint16(65535),
 18184  } /* SKP_Silk_tables_NLSF_CB0_16.c:38:18 */
 18185  
 18186  var SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr = [10]uintptr{
 18187  	0,
 18188  	0,
 18189  	0,
 18190  	0,
 18191  	0,
 18192  	0,
 18193  	0,
 18194  	0,
 18195  	0,
 18196  	0,
 18197  } /* SKP_Silk_tables_NLSF_CB0_16.c:268:18 */
 18198  
 18199  var SKP_Silk_NLSF_MSVQ_CB0_16_CDF_middle_idx = [10]int32{
 18200  	42,
 18201  	8,
 18202  	4,
 18203  	5,
 18204  	5,
 18205  	5,
 18206  	5,
 18207  	5,
 18208  	5,
 18209  	9,
 18210  } /* SKP_Silk_tables_NLSF_CB0_16.c:282:15 */
 18211  
 18212  var SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5 = [216]int16{
 18213  	int16(176), int16(181),
 18214  	int16(182), int16(183),
 18215  	int16(186), int16(186),
 18216  	int16(191), int16(191),
 18217  	int16(191), int16(196),
 18218  	int16(197), int16(201),
 18219  	int16(203), int16(206),
 18220  	int16(206), int16(206),
 18221  	int16(207), int16(207),
 18222  	int16(209), int16(209),
 18223  	int16(209), int16(209),
 18224  	int16(210), int16(210),
 18225  	int16(210), int16(211),
 18226  	int16(211), int16(211),
 18227  	int16(212), int16(214),
 18228  	int16(216), int16(216),
 18229  	int16(217), int16(217),
 18230  	int16(217), int16(217),
 18231  	int16(218), int16(218),
 18232  	int16(219), int16(219),
 18233  	int16(220), int16(221),
 18234  	int16(222), int16(223),
 18235  	int16(223), int16(223),
 18236  	int16(223), int16(224),
 18237  	int16(224), int16(224),
 18238  	int16(225), int16(225),
 18239  	int16(226), int16(226),
 18240  	int16(226), int16(226),
 18241  	int16(227), int16(227),
 18242  	int16(227), int16(227),
 18243  	int16(227), int16(227),
 18244  	int16(228), int16(228),
 18245  	int16(228), int16(228),
 18246  	int16(229), int16(229),
 18247  	int16(229), int16(230),
 18248  	int16(230), int16(230),
 18249  	int16(231), int16(231),
 18250  	int16(231), int16(231),
 18251  	int16(232), int16(232),
 18252  	int16(232), int16(232),
 18253  	int16(233), int16(234),
 18254  	int16(235), int16(235),
 18255  	int16(235), int16(236),
 18256  	int16(236), int16(236),
 18257  	int16(236), int16(237),
 18258  	int16(237), int16(237),
 18259  	int16(237), int16(240),
 18260  	int16(240), int16(240),
 18261  	int16(240), int16(241),
 18262  	int16(242), int16(243),
 18263  	int16(244), int16(244),
 18264  	int16(247), int16(247),
 18265  	int16(248), int16(248),
 18266  	int16(248), int16(249),
 18267  	int16(251), int16(255),
 18268  	int16(255), int16(256),
 18269  	int16(260), int16(260),
 18270  	int16(261), int16(264),
 18271  	int16(264), int16(266),
 18272  	int16(266), int16(268),
 18273  	int16(271), int16(274),
 18274  	int16(276), int16(279),
 18275  	int16(288), int16(288),
 18276  	int16(288), int16(288),
 18277  	int16(118), int16(120),
 18278  	int16(121), int16(121),
 18279  	int16(122), int16(125),
 18280  	int16(125), int16(129),
 18281  	int16(129), int16(130),
 18282  	int16(131), int16(132),
 18283  	int16(136), int16(137),
 18284  	int16(138), int16(145),
 18285  	int16(87), int16(88),
 18286  	int16(91), int16(97),
 18287  	int16(98), int16(100),
 18288  	int16(105), int16(106),
 18289  	int16(92), int16(95),
 18290  	int16(95), int16(96),
 18291  	int16(97), int16(97),
 18292  	int16(98), int16(99),
 18293  	int16(88), int16(92),
 18294  	int16(95), int16(95),
 18295  	int16(96), int16(97),
 18296  	int16(98), int16(109),
 18297  	int16(93), int16(93),
 18298  	int16(93), int16(96),
 18299  	int16(97), int16(97),
 18300  	int16(99), int16(101),
 18301  	int16(93), int16(94),
 18302  	int16(94), int16(95),
 18303  	int16(95), int16(99),
 18304  	int16(99), int16(99),
 18305  	int16(93), int16(93),
 18306  	int16(93), int16(96),
 18307  	int16(96), int16(97),
 18308  	int16(100), int16(102),
 18309  	int16(93), int16(95),
 18310  	int16(95), int16(96),
 18311  	int16(96), int16(96),
 18312  	int16(98), int16(99),
 18313  	int16(125), int16(125),
 18314  	int16(127), int16(127),
 18315  	int16(127), int16(127),
 18316  	int16(128), int16(128),
 18317  	int16(128), int16(128),
 18318  	int16(128), int16(128),
 18319  	int16(129), int16(130),
 18320  	int16(131), int16(132),
 18321  } /* SKP_Silk_tables_NLSF_CB0_16.c:296:17 */
 18322  
 18323  var SKP_Silk_NLSF_MSVQ_CB0_16_ndelta_min_Q15 = [17]int32{
 18324  	266,
 18325  	3,
 18326  	40,
 18327  	3,
 18328  	3,
 18329  	16,
 18330  	78,
 18331  	89,
 18332  	107,
 18333  	141,
 18334  	188,
 18335  	146,
 18336  	272,
 18337  	240,
 18338  	235,
 18339  	215,
 18340  	632,
 18341  } /* SKP_Silk_tables_NLSF_CB0_16.c:408:15 */
 18342  
 18343  var SKP_Silk_NLSF_MSVQ_CB0_16_Q15 = [3456]int16{
 18344  	int16(1170), int16(2278), int16(3658), int16(5374),
 18345  	int16(7666), int16(9113), int16(11298), int16(13304),
 18346  	int16(15371), int16(17549), int16(19587), int16(21487),
 18347  	int16(23798), int16(26038), int16(28318), int16(30201),
 18348  	int16(1628), int16(2334), int16(4115), int16(6036),
 18349  	int16(7818), int16(9544), int16(11777), int16(14021),
 18350  	int16(15787), int16(17408), int16(19466), int16(21261),
 18351  	int16(22886), int16(24565), int16(26714), int16(28059),
 18352  	int16(1724), int16(2670), int16(4056), int16(6532),
 18353  	int16(8357), int16(10119), int16(12093), int16(14061),
 18354  	int16(16491), int16(18795), int16(20417), int16(22402),
 18355  	int16(24251), int16(26224), int16(28410), int16(29956),
 18356  	int16(1493), int16(3427), int16(4789), int16(6399),
 18357  	int16(8435), int16(10168), int16(12000), int16(14066),
 18358  	int16(16229), int16(18210), int16(20040), int16(22098),
 18359  	int16(24153), int16(26095), int16(28183), int16(30121),
 18360  	int16(1119), int16(2089), int16(4295), int16(6245),
 18361  	int16(8691), int16(10741), int16(12688), int16(15057),
 18362  	int16(17028), int16(18792), int16(20717), int16(22514),
 18363  	int16(24497), int16(26548), int16(28619), int16(30630),
 18364  	int16(1363), int16(2417), int16(3927), int16(5556),
 18365  	int16(7422), int16(9315), int16(11879), int16(13767),
 18366  	int16(16143), int16(18520), int16(20458), int16(22578),
 18367  	int16(24539), int16(26436), int16(28318), int16(30318),
 18368  	int16(1122), int16(2503), int16(5216), int16(7148),
 18369  	int16(9310), int16(11078), int16(13175), int16(14800),
 18370  	int16(16864), int16(18700), int16(20436), int16(22488),
 18371  	int16(24572), int16(26602), int16(28555), int16(30426),
 18372  	int16(600), int16(1317), int16(2970), int16(5609),
 18373  	int16(7694), int16(9784), int16(12169), int16(14087),
 18374  	int16(16379), int16(18378), int16(20551), int16(22686),
 18375  	int16(24739), int16(26697), int16(28646), int16(30355),
 18376  	int16(941), int16(1882), int16(4274), int16(5540),
 18377  	int16(8482), int16(9858), int16(11940), int16(14287),
 18378  	int16(16091), int16(18501), int16(20326), int16(22612),
 18379  	int16(24711), int16(26638), int16(28814), int16(30430),
 18380  	int16(635), int16(1699), int16(4376), int16(5948),
 18381  	int16(8097), int16(10115), int16(12274), int16(14178),
 18382  	int16(16111), int16(17813), int16(19695), int16(21773),
 18383  	int16(23927), int16(25866), int16(28022), int16(30134),
 18384  	int16(1408), int16(2222), int16(3524), int16(5615),
 18385  	int16(7345), int16(8849), int16(10989), int16(12772),
 18386  	int16(15352), int16(17026), int16(18919), int16(21062),
 18387  	int16(23329), int16(25215), int16(27209), int16(29023),
 18388  	int16(701), int16(1307), int16(3548), int16(6301),
 18389  	int16(7744), int16(9574), int16(11227), int16(12978),
 18390  	int16(15170), int16(17565), int16(19775), int16(22097),
 18391  	int16(24230), int16(26335), int16(28377), int16(30231),
 18392  	int16(1752), int16(2364), int16(4879), int16(6569),
 18393  	int16(7813), int16(9796), int16(11199), int16(14290),
 18394  	int16(15795), int16(18000), int16(20396), int16(22417),
 18395  	int16(24308), int16(26124), int16(28360), int16(30633),
 18396  	int16(901), int16(1629), int16(3356), int16(4635),
 18397  	int16(7256), int16(8767), int16(9971), int16(11558),
 18398  	int16(15215), int16(17544), int16(19523), int16(21852),
 18399  	int16(23900), int16(25978), int16(28133), int16(30184),
 18400  	int16(981), int16(1669), int16(3323), int16(4693),
 18401  	int16(6213), int16(8692), int16(10614), int16(12956),
 18402  	int16(15211), int16(17711), int16(19856), int16(22122),
 18403  	int16(24344), int16(26592), int16(28723), int16(30481),
 18404  	int16(1607), int16(2577), int16(4220), int16(5512),
 18405  	int16(8532), int16(10388), int16(11627), int16(13671),
 18406  	int16(15752), int16(17199), int16(19840), int16(21859),
 18407  	int16(23494), int16(25786), int16(28091), int16(30131),
 18408  	int16(811), int16(1471), int16(3144), int16(5041),
 18409  	int16(7430), int16(9389), int16(11174), int16(13255),
 18410  	int16(15157), int16(16741), int16(19583), int16(22167),
 18411  	int16(24115), int16(26142), int16(28383), int16(30395),
 18412  	int16(1543), int16(2144), int16(3629), int16(6347),
 18413  	int16(7333), int16(9339), int16(10710), int16(13596),
 18414  	int16(15099), int16(17340), int16(20102), int16(21886),
 18415  	int16(23732), int16(25637), int16(27818), int16(29917),
 18416  	int16(492), int16(1185), int16(2940), int16(5488),
 18417  	int16(7095), int16(8751), int16(11596), int16(13579),
 18418  	int16(16045), int16(18015), int16(20178), int16(22127),
 18419  	int16(24265), int16(26406), int16(28484), int16(30357),
 18420  	int16(1547), int16(2282), int16(3693), int16(6341),
 18421  	int16(7758), int16(9607), int16(11848), int16(13236),
 18422  	int16(16564), int16(18069), int16(19759), int16(21404),
 18423  	int16(24110), int16(26606), int16(28786), int16(30655),
 18424  	int16(685), int16(1338), int16(3409), int16(5262),
 18425  	int16(6950), int16(9222), int16(11414), int16(14523),
 18426  	int16(16337), int16(17893), int16(19436), int16(21298),
 18427  	int16(23293), int16(25181), int16(27973), int16(30520),
 18428  	int16(887), int16(1581), int16(3057), int16(4318),
 18429  	int16(7192), int16(8617), int16(10047), int16(13106),
 18430  	int16(16265), int16(17893), int16(20233), int16(22350),
 18431  	int16(24379), int16(26384), int16(28314), int16(30189),
 18432  	int16(2285), int16(3745), int16(5662), int16(7576),
 18433  	int16(9323), int16(11320), int16(13239), int16(15191),
 18434  	int16(17175), int16(19225), int16(21108), int16(22972),
 18435  	int16(24821), int16(26655), int16(28561), int16(30460),
 18436  	int16(1496), int16(2108), int16(3448), int16(6898),
 18437  	int16(8328), int16(9656), int16(11252), int16(12823),
 18438  	int16(14979), int16(16482), int16(18180), int16(20085),
 18439  	int16(22962), int16(25160), int16(27705), int16(29629),
 18440  	int16(575), int16(1261), int16(3861), int16(6627),
 18441  	int16(8294), int16(10809), int16(12705), int16(14768),
 18442  	int16(17076), int16(19047), int16(20978), int16(23055),
 18443  	int16(24972), int16(26703), int16(28720), int16(30345),
 18444  	int16(1682), int16(2213), int16(3882), int16(6238),
 18445  	int16(7208), int16(9646), int16(10877), int16(13431),
 18446  	int16(14805), int16(16213), int16(17941), int16(20873),
 18447  	int16(23550), int16(25765), int16(27756), int16(29461),
 18448  	int16(888), int16(1616), int16(3924), int16(5195),
 18449  	int16(7206), int16(8647), int16(9842), int16(11473),
 18450  	int16(16067), int16(18221), int16(20343), int16(22774),
 18451  	int16(24503), int16(26412), int16(28054), int16(29731),
 18452  	int16(805), int16(1454), int16(2683), int16(4472),
 18453  	int16(7936), int16(9360), int16(11398), int16(14345),
 18454  	int16(16205), int16(17832), int16(19453), int16(21646),
 18455  	int16(23899), int16(25928), int16(28387), int16(30463),
 18456  	int16(1640), int16(2383), int16(3484), int16(5082),
 18457  	int16(6032), int16(8606), int16(11640), int16(12966),
 18458  	int16(15842), int16(17368), int16(19346), int16(21182),
 18459  	int16(23638), int16(25889), int16(28368), int16(30299),
 18460  	int16(1632), int16(2204), int16(4510), int16(7580),
 18461  	int16(8718), int16(10512), int16(11962), int16(14096),
 18462  	int16(15640), int16(17194), int16(19143), int16(22247),
 18463  	int16(24563), int16(26561), int16(28604), int16(30509),
 18464  	int16(2043), int16(2612), int16(3985), int16(6851),
 18465  	int16(8038), int16(9514), int16(10979), int16(12789),
 18466  	int16(15426), int16(16728), int16(18899), int16(20277),
 18467  	int16(22902), int16(26209), int16(28711), int16(30618),
 18468  	int16(2224), int16(2798), int16(4465), int16(5320),
 18469  	int16(7108), int16(9436), int16(10986), int16(13222),
 18470  	int16(14599), int16(18317), int16(20141), int16(21843),
 18471  	int16(23601), int16(25700), int16(28184), int16(30582),
 18472  	int16(835), int16(1541), int16(4083), int16(5769),
 18473  	int16(7386), int16(9399), int16(10971), int16(12456),
 18474  	int16(15021), int16(18642), int16(20843), int16(23100),
 18475  	int16(25292), int16(26966), int16(28952), int16(30422),
 18476  	int16(1795), int16(2343), int16(4809), int16(5896),
 18477  	int16(7178), int16(8545), int16(10223), int16(13370),
 18478  	int16(14606), int16(16469), int16(18273), int16(20736),
 18479  	int16(23645), int16(26257), int16(28224), int16(30390),
 18480  	int16(1734), int16(2254), int16(4031), int16(5188),
 18481  	int16(6506), int16(7872), int16(9651), int16(13025),
 18482  	int16(14419), int16(17305), int16(19495), int16(22190),
 18483  	int16(24403), int16(26302), int16(28195), int16(30177),
 18484  	int16(1841), int16(2349), int16(3968), int16(4764),
 18485  	int16(6376), int16(9825), int16(11048), int16(13345),
 18486  	int16(14682), int16(16252), int16(18183), int16(21363),
 18487  	int16(23918), int16(26156), int16(28031), int16(29935),
 18488  	int16(1432), int16(2047), int16(5631), int16(6927),
 18489  	int16(8198), int16(9675), int16(11358), int16(13506),
 18490  	int16(14802), int16(16419), int16(18339), int16(22019),
 18491  	int16(24124), int16(26177), int16(28130), int16(30586),
 18492  	int16(1730), int16(2320), int16(3744), int16(4808),
 18493  	int16(6007), int16(9666), int16(10997), int16(13622),
 18494  	int16(15234), int16(17495), int16(20088), int16(22002),
 18495  	int16(23603), int16(25400), int16(27379), int16(29254),
 18496  	int16(1267), int16(1915), int16(5483), int16(6812),
 18497  	int16(8229), int16(9919), int16(11589), int16(13337),
 18498  	int16(14747), int16(17965), int16(20552), int16(22167),
 18499  	int16(24519), int16(26819), int16(28883), int16(30642),
 18500  	int16(1526), int16(2229), int16(4240), int16(7388),
 18501  	int16(8953), int16(10450), int16(11899), int16(13718),
 18502  	int16(16861), int16(18323), int16(20379), int16(22672),
 18503  	int16(24797), int16(26906), int16(28906), int16(30622),
 18504  	int16(2175), int16(2791), int16(4104), int16(6875),
 18505  	int16(8612), int16(9798), int16(12152), int16(13536),
 18506  	int16(15623), int16(17682), int16(19213), int16(21060),
 18507  	int16(24382), int16(26760), int16(28633), int16(30248),
 18508  	int16(454), int16(1231), int16(4339), int16(5738),
 18509  	int16(7550), int16(9006), int16(10320), int16(13525),
 18510  	int16(16005), int16(17849), int16(20071), int16(21992),
 18511  	int16(23949), int16(26043), int16(28245), int16(30175),
 18512  	int16(2250), int16(2791), int16(4230), int16(5283),
 18513  	int16(6762), int16(10607), int16(11879), int16(13821),
 18514  	int16(15797), int16(17264), int16(20029), int16(22266),
 18515  	int16(24588), int16(26437), int16(28244), int16(30419),
 18516  	int16(1696), int16(2216), int16(4308), int16(8385),
 18517  	int16(9766), int16(11030), int16(12556), int16(14099),
 18518  	int16(16322), int16(17640), int16(19166), int16(20590),
 18519  	int16(23967), int16(26858), int16(28798), int16(30562),
 18520  	int16(2452), int16(3236), int16(4369), int16(6118),
 18521  	int16(7156), int16(9003), int16(11509), int16(12796),
 18522  	int16(15749), int16(17291), int16(19491), int16(22241),
 18523  	int16(24530), int16(26474), int16(28273), int16(30073),
 18524  	int16(1811), int16(2541), int16(3555), int16(5480),
 18525  	int16(9123), int16(10527), int16(11894), int16(13659),
 18526  	int16(15262), int16(16899), int16(19366), int16(21069),
 18527  	int16(22694), int16(24314), int16(27256), int16(29983),
 18528  	int16(1553), int16(2246), int16(4559), int16(5500),
 18529  	int16(6754), int16(7874), int16(11739), int16(13571),
 18530  	int16(15188), int16(17879), int16(20281), int16(22510),
 18531  	int16(24614), int16(26649), int16(28786), int16(30755),
 18532  	int16(1982), int16(2768), int16(3834), int16(5964),
 18533  	int16(8732), int16(9908), int16(11797), int16(14813),
 18534  	int16(16311), int16(17946), int16(21097), int16(22851),
 18535  	int16(24456), int16(26304), int16(28166), int16(29755),
 18536  	int16(1824), int16(2529), int16(3817), int16(5449),
 18537  	int16(6854), int16(8714), int16(10381), int16(12286),
 18538  	int16(14194), int16(15774), int16(19524), int16(21374),
 18539  	int16(23695), int16(26069), int16(28096), int16(30212),
 18540  	int16(2212), int16(2854), int16(3947), int16(5898),
 18541  	int16(9930), int16(11556), int16(12854), int16(14788),
 18542  	int16(16328), int16(17700), int16(20321), int16(22098),
 18543  	int16(23672), int16(25291), int16(26976), int16(28586),
 18544  	int16(2023), int16(2599), int16(4024), int16(4916),
 18545  	int16(6613), int16(11149), int16(12457), int16(14626),
 18546  	int16(16320), int16(17822), int16(19673), int16(21172),
 18547  	int16(23115), int16(26051), int16(28825), int16(30758),
 18548  	int16(1628), int16(2206), int16(3467), int16(4364),
 18549  	int16(8679), int16(10173), int16(11864), int16(13679),
 18550  	int16(14998), int16(16938), int16(19207), int16(21364),
 18551  	int16(23850), int16(26115), int16(28124), int16(30273),
 18552  	int16(2014), int16(2603), int16(4114), int16(7254),
 18553  	int16(8516), int16(10043), int16(11822), int16(13503),
 18554  	int16(16329), int16(17826), int16(19697), int16(21280),
 18555  	int16(23151), int16(24661), int16(26807), int16(30161),
 18556  	int16(2376), int16(2980), int16(4422), int16(5770),
 18557  	int16(7016), int16(9723), int16(11125), int16(13516),
 18558  	int16(15485), int16(16985), int16(19160), int16(20587),
 18559  	int16(24401), int16(27180), int16(29046), int16(30647),
 18560  	int16(2454), int16(3502), int16(4624), int16(6019),
 18561  	int16(7632), int16(8849), int16(10792), int16(13964),
 18562  	int16(15523), int16(17085), int16(19611), int16(21238),
 18563  	int16(22856), int16(25108), int16(28106), int16(29890),
 18564  	int16(1573), int16(2274), int16(3308), int16(5999),
 18565  	int16(8977), int16(10104), int16(12457), int16(14258),
 18566  	int16(15749), int16(18180), int16(19974), int16(21253),
 18567  	int16(23045), int16(25058), int16(27741), int16(30315),
 18568  	int16(1943), int16(2730), int16(4140), int16(6160),
 18569  	int16(7491), int16(8986), int16(11309), int16(12775),
 18570  	int16(14820), int16(16558), int16(17909), int16(19757),
 18571  	int16(21512), int16(23605), int16(27274), int16(29527),
 18572  	int16(2021), int16(2582), int16(4494), int16(5835),
 18573  	int16(6993), int16(8245), int16(9827), int16(14733),
 18574  	int16(16462), int16(17894), int16(19647), int16(21083),
 18575  	int16(23764), int16(26667), int16(29072), int16(30990),
 18576  	int16(1052), int16(1775), int16(3218), int16(4378),
 18577  	int16(7666), int16(9403), int16(11248), int16(13327),
 18578  	int16(14972), int16(17962), int16(20758), int16(22354),
 18579  	int16(25071), int16(27209), int16(29001), int16(30609),
 18580  	int16(2218), int16(2866), int16(4223), int16(5352),
 18581  	int16(6581), int16(9980), int16(11587), int16(13121),
 18582  	int16(15193), int16(16583), int16(18386), int16(20080),
 18583  	int16(22013), int16(25317), int16(28127), int16(29880),
 18584  	int16(2146), int16(2840), int16(4397), int16(5840),
 18585  	int16(7449), int16(8721), int16(10512), int16(11936),
 18586  	int16(13595), int16(17253), int16(19310), int16(20891),
 18587  	int16(23417), int16(25627), int16(27749), int16(30231),
 18588  	int16(1972), int16(2619), int16(3756), int16(6367),
 18589  	int16(7641), int16(8814), int16(12286), int16(13768),
 18590  	int16(15309), int16(18036), int16(19557), int16(20904),
 18591  	int16(22582), int16(24876), int16(27800), int16(30440),
 18592  	int16(2005), int16(2577), int16(4272), int16(7373),
 18593  	int16(8558), int16(10223), int16(11770), int16(13402),
 18594  	int16(16502), int16(18000), int16(19645), int16(21104),
 18595  	int16(22990), int16(26806), int16(29505), int16(30942),
 18596  	int16(1153), int16(1822), int16(3724), int16(5443),
 18597  	int16(6990), int16(8702), int16(10289), int16(11899),
 18598  	int16(13856), int16(15315), int16(17601), int16(21064),
 18599  	int16(23692), int16(26083), int16(28586), int16(30639),
 18600  	int16(1304), int16(1869), int16(3318), int16(7195),
 18601  	int16(9613), int16(10733), int16(12393), int16(13728),
 18602  	int16(15822), int16(17474), int16(18882), int16(20692),
 18603  	int16(23114), int16(25540), int16(27684), int16(29244),
 18604  	int16(2093), int16(2691), int16(4018), int16(6658),
 18605  	int16(7947), int16(9147), int16(10497), int16(11881),
 18606  	int16(15888), int16(17821), int16(19333), int16(21233),
 18607  	int16(23371), int16(25234), int16(27553), int16(29998),
 18608  	int16(575), int16(1331), int16(5304), int16(6910),
 18609  	int16(8425), int16(10086), int16(11577), int16(13498),
 18610  	int16(16444), int16(18527), int16(20565), int16(22847),
 18611  	int16(24914), int16(26692), int16(28759), int16(30157),
 18612  	int16(1435), int16(2024), int16(3283), int16(4156),
 18613  	int16(7611), int16(10592), int16(12049), int16(13927),
 18614  	int16(15459), int16(18413), int16(20495), int16(22270),
 18615  	int16(24222), int16(26093), int16(28065), int16(30099),
 18616  	int16(1632), int16(2168), int16(5540), int16(7478),
 18617  	int16(8630), int16(10391), int16(11644), int16(14321),
 18618  	int16(15741), int16(17357), int16(18756), int16(20434),
 18619  	int16(22799), int16(26060), int16(28542), int16(30696),
 18620  	int16(1407), int16(2245), int16(3405), int16(5639),
 18621  	int16(9419), int16(10685), int16(12104), int16(13495),
 18622  	int16(15535), int16(18357), int16(19996), int16(21689),
 18623  	int16(24351), int16(26550), int16(28853), int16(30564),
 18624  	int16(1675), int16(2226), int16(4005), int16(8223),
 18625  	int16(9975), int16(11155), int16(12822), int16(14316),
 18626  	int16(16504), int16(18137), int16(19574), int16(21050),
 18627  	int16(22759), int16(24912), int16(28296), int16(30634),
 18628  	int16(1080), int16(1614), int16(3622), int16(7565),
 18629  	int16(8748), int16(10303), int16(11713), int16(13848),
 18630  	int16(15633), int16(17434), int16(19761), int16(21825),
 18631  	int16(23571), int16(25393), int16(27406), int16(29063),
 18632  	int16(1693), int16(2229), int16(3456), int16(4354),
 18633  	int16(5670), int16(10890), int16(12563), int16(14167),
 18634  	int16(15879), int16(17377), int16(19817), int16(21971),
 18635  	int16(24094), int16(26131), int16(28298), int16(30099),
 18636  	int16(2042), int16(2959), int16(4195), int16(5740),
 18637  	int16(7106), int16(8267), int16(11126), int16(14973),
 18638  	int16(16914), int16(18295), int16(20532), int16(21982),
 18639  	int16(23711), int16(25769), int16(27609), int16(29351),
 18640  	int16(984), int16(1612), int16(3808), int16(5265),
 18641  	int16(6885), int16(8411), int16(9547), int16(10889),
 18642  	int16(12522), int16(16520), int16(19549), int16(21639),
 18643  	int16(23746), int16(26058), int16(28310), int16(30374),
 18644  	int16(2036), int16(2538), int16(4166), int16(7761),
 18645  	int16(9146), int16(10412), int16(12144), int16(13609),
 18646  	int16(15588), int16(17169), int16(18559), int16(20113),
 18647  	int16(21820), int16(24313), int16(28029), int16(30612),
 18648  	int16(1871), int16(2355), int16(4061), int16(5143),
 18649  	int16(7464), int16(10129), int16(11941), int16(15001),
 18650  	int16(16680), int16(18354), int16(19957), int16(22279),
 18651  	int16(24861), int16(26872), int16(28988), int16(30615),
 18652  	int16(2566), int16(3161), int16(4643), int16(6227),
 18653  	int16(7406), int16(9970), int16(11618), int16(13416),
 18654  	int16(15889), int16(17364), int16(19121), int16(20817),
 18655  	int16(22592), int16(24720), int16(28733), int16(31082),
 18656  	int16(1700), int16(2327), int16(4828), int16(5939),
 18657  	int16(7567), int16(9154), int16(11087), int16(12771),
 18658  	int16(14209), int16(16121), int16(20222), int16(22671),
 18659  	int16(24648), int16(26656), int16(28696), int16(30745),
 18660  	int16(3169), int16(3873), int16(5046), int16(6868),
 18661  	int16(8184), int16(9480), int16(12335), int16(14068),
 18662  	int16(15774), int16(17971), int16(20231), int16(21711),
 18663  	int16(23520), int16(25245), int16(27026), int16(28730),
 18664  	int16(1564), int16(2391), int16(4229), int16(6730),
 18665  	int16(8905), int16(10459), int16(13026), int16(15033),
 18666  	int16(17265), int16(19809), int16(21849), int16(23741),
 18667  	int16(25490), int16(27312), int16(29061), int16(30527),
 18668  	int16(2864), int16(3559), int16(4719), int16(6441),
 18669  	int16(9592), int16(11055), int16(12763), int16(14784),
 18670  	int16(16428), int16(18164), int16(20486), int16(22262),
 18671  	int16(24183), int16(26263), int16(28383), int16(30224),
 18672  	int16(2673), int16(3449), int16(4581), int16(5983),
 18673  	int16(6863), int16(8311), int16(12464), int16(13911),
 18674  	int16(15738), int16(17791), int16(19416), int16(21182),
 18675  	int16(24025), int16(26561), int16(28723), int16(30440),
 18676  	int16(2419), int16(3049), int16(4274), int16(6384),
 18677  	int16(8564), int16(9661), int16(11288), int16(12676),
 18678  	int16(14447), int16(17578), int16(19816), int16(21231),
 18679  	int16(23099), int16(25270), int16(26899), int16(28926),
 18680  	int16(1278), int16(2001), int16(3000), int16(5353),
 18681  	int16(9995), int16(11777), int16(13018), int16(14570),
 18682  	int16(16050), int16(17762), int16(19982), int16(21617),
 18683  	int16(23371), int16(25083), int16(27656), int16(30172),
 18684  	int16(932), int16(1624), int16(2798), int16(4570),
 18685  	int16(8592), int16(9988), int16(11552), int16(13050),
 18686  	int16(16921), int16(18677), int16(20415), int16(22810),
 18687  	int16(24817), int16(26819), int16(28804), int16(30385),
 18688  	int16(2324), int16(2973), int16(4156), int16(5702),
 18689  	int16(6919), int16(8806), int16(10259), int16(12503),
 18690  	int16(15015), int16(16567), int16(19418), int16(21375),
 18691  	int16(22943), int16(24550), int16(27024), int16(29849),
 18692  	int16(1564), int16(2373), int16(3455), int16(4907),
 18693  	int16(5975), int16(7436), int16(11786), int16(14505),
 18694  	int16(16107), int16(18148), int16(20019), int16(21653),
 18695  	int16(23740), int16(25814), int16(28578), int16(30372),
 18696  	int16(3025), int16(3729), int16(4866), int16(6520),
 18697  	int16(9487), int16(10943), int16(12358), int16(14258),
 18698  	int16(16174), int16(17501), int16(19476), int16(21408),
 18699  	int16(23227), int16(24906), int16(27347), int16(29407),
 18700  	int16(1270), int16(1965), int16(6802), int16(7995),
 18701  	int16(9204), int16(10828), int16(12507), int16(14230),
 18702  	int16(15759), int16(17860), int16(20369), int16(22502),
 18703  	int16(24633), int16(26514), int16(28535), int16(30525),
 18704  	int16(2210), int16(2749), int16(4266), int16(7487),
 18705  	int16(9878), int16(11018), int16(12823), int16(14431),
 18706  	int16(16247), int16(18626), int16(20450), int16(22054),
 18707  	int16(23739), int16(25291), int16(27074), int16(29169),
 18708  	int16(1275), int16(1926), int16(4330), int16(6573),
 18709  	int16(8441), int16(10920), int16(13260), int16(15008),
 18710  	int16(16927), int16(18573), int16(20644), int16(22217),
 18711  	int16(23983), int16(25474), int16(27372), int16(28645),
 18712  	int16(3015), int16(3670), int16(5086), int16(6372),
 18713  	int16(7888), int16(9309), int16(10966), int16(12642),
 18714  	int16(14495), int16(16172), int16(18080), int16(19972),
 18715  	int16(22454), int16(24899), int16(27362), int16(29975),
 18716  	int16(2882), int16(3733), int16(5113), int16(6482),
 18717  	int16(8125), int16(9685), int16(11598), int16(13288),
 18718  	int16(15405), int16(17192), int16(20178), int16(22426),
 18719  	int16(24801), int16(27014), int16(29212), int16(30811),
 18720  	int16(2300), int16(2968), int16(4101), int16(5442),
 18721  	int16(6327), int16(7910), int16(12455), int16(13862),
 18722  	int16(15747), int16(17505), int16(19053), int16(20679),
 18723  	int16(22615), int16(24658), int16(27499), int16(30065),
 18724  	int16(2257), int16(2940), int16(4430), int16(5991),
 18725  	int16(7042), int16(8364), int16(9414), int16(11224),
 18726  	int16(15723), int16(17420), int16(19253), int16(21469),
 18727  	int16(23915), int16(26053), int16(28430), int16(30384),
 18728  	int16(1227), int16(2045), int16(3818), int16(5011),
 18729  	int16(6990), int16(9231), int16(11024), int16(13011),
 18730  	int16(17341), int16(19017), int16(20583), int16(22799),
 18731  	int16(25195), int16(26876), int16(29351), int16(30805),
 18732  	int16(1354), int16(1924), int16(3789), int16(8077),
 18733  	int16(10453), int16(11639), int16(13352), int16(14817),
 18734  	int16(16743), int16(18189), int16(20095), int16(22014),
 18735  	int16(24593), int16(26677), int16(28647), int16(30256),
 18736  	int16(3142), int16(4049), int16(6197), int16(7417),
 18737  	int16(8753), int16(10156), int16(11533), int16(13181),
 18738  	int16(15947), int16(17655), int16(19606), int16(21402),
 18739  	int16(23487), int16(25659), int16(28123), int16(30304),
 18740  	int16(1317), int16(2263), int16(4725), int16(7611),
 18741  	int16(9667), int16(11634), int16(14143), int16(16258),
 18742  	int16(18724), int16(20698), int16(22379), int16(24007),
 18743  	int16(25775), int16(27251), int16(28930), int16(30593),
 18744  	int16(1570), int16(2323), int16(3818), int16(6215),
 18745  	int16(9893), int16(11556), int16(13070), int16(14631),
 18746  	int16(16152), int16(18290), int16(21386), int16(23346),
 18747  	int16(25114), int16(26923), int16(28712), int16(30168),
 18748  	int16(2297), int16(3905), int16(6287), int16(8558),
 18749  	int16(10668), int16(12766), int16(15019), int16(17102),
 18750  	int16(19036), int16(20677), int16(22341), int16(23871),
 18751  	int16(25478), int16(27085), int16(28851), int16(30520),
 18752  	int16(1915), int16(2507), int16(4033), int16(5749),
 18753  	int16(7059), int16(8871), int16(10659), int16(12198),
 18754  	int16(13937), int16(15383), int16(16869), int16(18707),
 18755  	int16(23175), int16(25818), int16(28514), int16(30501),
 18756  	int16(2404), int16(2918), int16(5190), int16(6252),
 18757  	int16(7426), int16(9887), int16(12387), int16(14795),
 18758  	int16(16754), int16(18368), int16(20338), int16(22003),
 18759  	int16(24236), int16(26456), int16(28490), int16(30397),
 18760  	int16(1621), int16(2227), int16(3479), int16(5085),
 18761  	int16(9425), int16(12892), int16(14246), int16(15652),
 18762  	int16(17205), int16(18674), int16(20446), int16(22209),
 18763  	int16(23778), int16(25867), int16(27931), int16(30093),
 18764  	int16(1869), int16(2390), int16(4105), int16(7021),
 18765  	int16(11221), int16(12775), int16(14059), int16(15590),
 18766  	int16(17024), int16(18608), int16(20595), int16(22075),
 18767  	int16(23649), int16(25154), int16(26914), int16(28671),
 18768  	int16(2551), int16(3252), int16(4688), int16(6562),
 18769  	int16(7869), int16(9125), int16(10475), int16(11800),
 18770  	int16(15402), int16(18780), int16(20992), int16(22555),
 18771  	int16(24289), int16(25968), int16(27465), int16(29232),
 18772  	int16(2705), int16(3493), int16(4735), int16(6360),
 18773  	int16(7905), int16(9352), int16(11538), int16(13430),
 18774  	int16(15239), int16(16919), int16(18619), int16(20094),
 18775  	int16(21800), int16(23342), int16(25200), int16(29257),
 18776  	int16(2166), int16(2791), int16(4011), int16(5081),
 18777  	int16(5896), int16(9038), int16(13407), int16(14703),
 18778  	int16(16543), int16(18189), int16(19896), int16(21857),
 18779  	int16(24872), int16(26971), int16(28955), int16(30514),
 18780  	int16(1865), int16(3021), int16(4696), int16(6534),
 18781  	int16(8343), int16(9914), int16(12789), int16(14103),
 18782  	int16(16533), int16(17729), int16(21340), int16(22439),
 18783  	int16(24873), int16(26330), int16(28428), int16(30154),
 18784  	int16(3369), int16(4345), int16(6573), int16(8763),
 18785  	int16(10309), int16(11713), int16(13367), int16(14784),
 18786  	int16(16483), int16(18145), int16(19839), int16(21247),
 18787  	int16(23292), int16(25477), int16(27555), int16(29447),
 18788  	int16(1265), int16(2184), int16(5443), int16(7893),
 18789  	int16(10591), int16(13139), int16(15105), int16(16639),
 18790  	int16(18402), int16(19826), int16(21419), int16(22995),
 18791  	int16(24719), int16(26437), int16(28363), int16(30125),
 18792  	int16(1584), int16(2004), int16(3535), int16(4450),
 18793  	int16(8662), int16(10764), int16(12832), int16(14978),
 18794  	int16(16972), int16(18794), int16(20932), int16(22547),
 18795  	int16(24636), int16(26521), int16(28701), int16(30567),
 18796  	int16(3419), int16(4528), int16(6602), int16(7890),
 18797  	int16(9508), int16(10875), int16(12771), int16(14357),
 18798  	int16(16051), int16(18330), int16(20630), int16(22490),
 18799  	int16(25070), int16(26936), int16(28946), int16(30542),
 18800  	int16(1726), int16(2252), int16(4597), int16(6950),
 18801  	int16(8379), int16(9823), int16(11363), int16(12794),
 18802  	int16(14306), int16(15476), int16(16798), int16(18018),
 18803  	int16(21671), int16(25550), int16(28148), int16(30367),
 18804  	int16(3385), int16(3870), int16(5307), int16(6388),
 18805  	int16(7141), int16(8684), int16(12695), int16(14939),
 18806  	int16(16480), int16(18277), int16(20537), int16(22048),
 18807  	int16(23947), int16(25965), int16(28214), int16(29956),
 18808  	int16(2771), int16(3306), int16(4450), int16(5560),
 18809  	int16(6453), int16(9493), int16(13548), int16(14754),
 18810  	int16(16743), int16(18447), int16(20028), int16(21736),
 18811  	int16(23746), int16(25353), int16(27141), int16(29066),
 18812  	int16(3028), int16(3900), int16(6617), int16(7893),
 18813  	int16(9211), int16(10480), int16(12047), int16(13583),
 18814  	int16(15182), int16(16662), int16(18502), int16(20092),
 18815  	int16(22190), int16(24358), int16(26302), int16(28957),
 18816  	int16(2000), int16(2550), int16(4067), int16(6837),
 18817  	int16(9628), int16(11002), int16(12594), int16(14098),
 18818  	int16(15589), int16(17195), int16(18679), int16(20099),
 18819  	int16(21530), int16(23085), int16(24641), int16(29022),
 18820  	int16(2844), int16(3302), int16(5103), int16(6107),
 18821  	int16(6911), int16(8598), int16(12416), int16(14054),
 18822  	int16(16026), int16(18567), int16(20672), int16(22270),
 18823  	int16(23952), int16(25771), int16(27658), int16(30026),
 18824  	int16(4043), int16(5150), int16(7268), int16(9056),
 18825  	int16(10916), int16(12638), int16(14543), int16(16184),
 18826  	int16(17948), int16(19691), int16(21357), int16(22981),
 18827  	int16(24825), int16(26591), int16(28479), int16(30233),
 18828  	int16(2109), int16(2625), int16(4320), int16(5525),
 18829  	int16(7454), int16(10220), int16(12980), int16(14698),
 18830  	int16(17627), int16(19263), int16(20485), int16(22381),
 18831  	int16(24279), int16(25777), int16(27847), int16(30458),
 18832  	int16(1550), int16(2667), int16(6473), int16(9496),
 18833  	int16(10985), int16(12352), int16(13795), int16(15233),
 18834  	int16(17099), int16(18642), int16(20461), int16(22116),
 18835  	int16(24197), int16(26291), int16(28403), int16(30132),
 18836  	int16(2411), int16(3084), int16(4145), int16(5394),
 18837  	int16(6367), int16(8154), int16(13125), int16(16049),
 18838  	int16(17561), int16(19125), int16(21258), int16(22762),
 18839  	int16(24459), int16(26317), int16(28255), int16(29702),
 18840  	int16(4159), int16(4516), int16(5956), int16(7635),
 18841  	int16(8254), int16(8980), int16(11208), int16(14133),
 18842  	int16(16210), int16(17875), int16(20196), int16(21864),
 18843  	int16(23840), int16(25747), int16(28058), int16(30012),
 18844  	int16(2026), int16(2431), int16(2845), int16(3618),
 18845  	int16(7950), int16(9802), int16(12721), int16(14460),
 18846  	int16(16576), int16(18984), int16(21376), int16(23319),
 18847  	int16(24961), int16(26718), int16(28971), int16(30640),
 18848  	int16(3429), int16(3833), int16(4472), int16(4912),
 18849  	int16(7723), int16(10386), int16(12981), int16(15322),
 18850  	int16(16699), int16(18807), int16(20778), int16(22551),
 18851  	int16(24627), int16(26494), int16(28334), int16(30482),
 18852  	int16(4740), int16(5169), int16(5796), int16(6485),
 18853  	int16(6998), int16(8830), int16(11777), int16(14414),
 18854  	int16(16831), int16(18413), int16(20789), int16(22369),
 18855  	int16(24236), int16(25835), int16(27807), int16(30021),
 18856  	int16(150), int16(168), int16(-17), int16(-107),
 18857  	int16(-142), int16(-229), int16(-320), int16(-406),
 18858  	int16(-503), int16(-620), int16(-867), int16(-935),
 18859  	int16(-902), int16(-680), int16(-398), int16(-114),
 18860  	int16(-398), int16(-355), int16(49), int16(255),
 18861  	int16(114), int16(260), int16(399), int16(264),
 18862  	int16(317), int16(431), int16(514), int16(531),
 18863  	int16(435), int16(356), int16(238), int16(106),
 18864  	int16(-43), int16(-36), int16(-169), int16(-224),
 18865  	int16(-391), int16(-633), int16(-776), int16(-970),
 18866  	int16(-844), int16(-455), int16(-181), int16(-12),
 18867  	int16(85), int16(85), int16(164), int16(195),
 18868  	int16(122), int16(85), int16(-158), int16(-640),
 18869  	int16(-903), int16(9), int16(7), int16(-124),
 18870  	int16(149), int16(32), int16(220), int16(369),
 18871  	int16(242), int16(115), int16(79), int16(84),
 18872  	int16(-146), int16(-216), int16(-70), int16(1024),
 18873  	int16(751), int16(574), int16(440), int16(377),
 18874  	int16(352), int16(203), int16(30), int16(16),
 18875  	int16(-3), int16(81), int16(161), int16(100),
 18876  	int16(-148), int16(-176), int16(933), int16(750),
 18877  	int16(404), int16(171), int16(-2), int16(-146),
 18878  	int16(-411), int16(-442), int16(-541), int16(-552),
 18879  	int16(-442), int16(-269), int16(-240), int16(-52),
 18880  	int16(603), int16(635), int16(405), int16(178),
 18881  	int16(215), int16(19), int16(-153), int16(-167),
 18882  	int16(-290), int16(-219), int16(151), int16(271),
 18883  	int16(151), int16(119), int16(303), int16(266),
 18884  	int16(100), int16(69), int16(-293), int16(-657),
 18885  	int16(939), int16(659), int16(442), int16(351),
 18886  	int16(132), int16(98), int16(-16), int16(-1),
 18887  	int16(-135), int16(-200), int16(-223), int16(-89),
 18888  	int16(167), int16(154), int16(172), int16(237),
 18889  	int16(-45), int16(-183), int16(-228), int16(-486),
 18890  	int16(263), int16(608), int16(158), int16(-125),
 18891  	int16(-390), int16(-227), int16(-118), int16(43),
 18892  	int16(-457), int16(-392), int16(-769), int16(-840),
 18893  	int16(20), int16(-117), int16(-194), int16(-189),
 18894  	int16(-173), int16(-173), int16(-33), int16(32),
 18895  	int16(174), int16(144), int16(115), int16(167),
 18896  	int16(57), int16(44), int16(14), int16(147),
 18897  	int16(96), int16(-54), int16(-142), int16(-129),
 18898  	int16(-254), int16(-331), int16(304), int16(310),
 18899  	int16(-52), int16(-419), int16(-846), int16(-1060),
 18900  	int16(-88), int16(-123), int16(-202), int16(-343),
 18901  	int16(-554), int16(-961), int16(-951), int16(327),
 18902  	int16(159), int16(81), int16(255), int16(227),
 18903  	int16(120), int16(203), int16(256), int16(192),
 18904  	int16(164), int16(224), int16(290), int16(195),
 18905  	int16(216), int16(209), int16(128), int16(832),
 18906  	int16(1028), int16(889), int16(698), int16(504),
 18907  	int16(408), int16(355), int16(218), int16(32),
 18908  	int16(-115), int16(-84), int16(-276), int16(-100),
 18909  	int16(-312), int16(-484), int16(899), int16(682),
 18910  	int16(465), int16(456), int16(241), int16(-12),
 18911  	int16(-275), int16(-425), int16(-461), int16(-367),
 18912  	int16(-33), int16(-28), int16(-102), int16(-194),
 18913  	int16(-527), int16(863), int16(906), int16(463),
 18914  	int16(245), int16(13), int16(-212), int16(-305),
 18915  	int16(-105), int16(163), int16(279), int16(176),
 18916  	int16(93), int16(67), int16(115), int16(192),
 18917  	int16(61), int16(-50), int16(-132), int16(-175),
 18918  	int16(-224), int16(-271), int16(-629), int16(-252),
 18919  	int16(1158), int16(972), int16(638), int16(280),
 18920  	int16(300), int16(326), int16(143), int16(-152),
 18921  	int16(-214), int16(-287), int16(53), int16(-42),
 18922  	int16(-236), int16(-352), int16(-423), int16(-248),
 18923  	int16(-129), int16(-163), int16(-178), int16(-119),
 18924  	int16(85), int16(57), int16(514), int16(382),
 18925  	int16(374), int16(402), int16(424), int16(423),
 18926  	int16(271), int16(197), int16(97), int16(40),
 18927  	int16(39), int16(-97), int16(-191), int16(-164),
 18928  	int16(-230), int16(-256), int16(-410), int16(396),
 18929  	int16(327), int16(127), int16(10), int16(-119),
 18930  	int16(-167), int16(-291), int16(-274), int16(-141),
 18931  	int16(-99), int16(-226), int16(-218), int16(-139),
 18932  	int16(-224), int16(-209), int16(-268), int16(-442),
 18933  	int16(-413), int16(222), int16(58), int16(521),
 18934  	int16(344), int16(258), int16(76), int16(-42),
 18935  	int16(-142), int16(-165), int16(-123), int16(-92),
 18936  	int16(47), int16(8), int16(-3), int16(-191),
 18937  	int16(-11), int16(-164), int16(-167), int16(-351),
 18938  	int16(-740), int16(311), int16(538), int16(291),
 18939  	int16(184), int16(29), int16(-105), int16(9),
 18940  	int16(-30), int16(-54), int16(-17), int16(-77),
 18941  	int16(-271), int16(-412), int16(-622), int16(-648),
 18942  	int16(476), int16(186), int16(-66), int16(-197),
 18943  	int16(-73), int16(-94), int16(-15), int16(47),
 18944  	int16(28), int16(112), int16(-58), int16(-33),
 18945  	int16(65), int16(19), int16(84), int16(86),
 18946  	int16(276), int16(114), int16(472), int16(786),
 18947  	int16(799), int16(625), int16(415), int16(178),
 18948  	int16(-35), int16(-26), int16(5), int16(9),
 18949  	int16(83), int16(39), int16(37), int16(39),
 18950  	int16(-184), int16(-374), int16(-265), int16(-362),
 18951  	int16(-501), int16(337), int16(716), int16(478),
 18952  	int16(-60), int16(-125), int16(-163), int16(362),
 18953  	int16(17), int16(-122), int16(-233), int16(279),
 18954  	int16(138), int16(157), int16(318), int16(193),
 18955  	int16(189), int16(209), int16(266), int16(252),
 18956  	int16(-46), int16(-56), int16(-277), int16(-429),
 18957  	int16(464), int16(386), int16(142), int16(44),
 18958  	int16(-43), int16(66), int16(264), int16(182),
 18959  	int16(47), int16(14), int16(-26), int16(-79),
 18960  	int16(49), int16(15), int16(-128), int16(-203),
 18961  	int16(-400), int16(-478), int16(325), int16(27),
 18962  	int16(234), int16(411), int16(205), int16(129),
 18963  	int16(12), int16(58), int16(123), int16(57),
 18964  	int16(171), int16(137), int16(96), int16(128),
 18965  	int16(-32), int16(134), int16(-12), int16(57),
 18966  	int16(119), int16(26), int16(-22), int16(-165),
 18967  	int16(-500), int16(-701), int16(-528), int16(-116),
 18968  	int16(64), int16(-8), int16(97), int16(-9),
 18969  	int16(-162), int16(-66), int16(-156), int16(-194),
 18970  	int16(-303), int16(-546), int16(-341), int16(546),
 18971  	int16(358), int16(95), int16(45), int16(76),
 18972  	int16(270), int16(403), int16(205), int16(100),
 18973  	int16(123), int16(50), int16(-53), int16(-144),
 18974  	int16(-110), int16(-13), int16(32), int16(-228),
 18975  	int16(-130), int16(353), int16(296), int16(56),
 18976  	int16(-372), int16(-253), int16(365), int16(73),
 18977  	int16(10), int16(-34), int16(-139), int16(-191),
 18978  	int16(-96), int16(5), int16(44), int16(-85),
 18979  	int16(-179), int16(-129), int16(-192), int16(-246),
 18980  	int16(-85), int16(-110), int16(-155), int16(-44),
 18981  	int16(-27), int16(145), int16(138), int16(79),
 18982  	int16(32), int16(-148), int16(-577), int16(-634),
 18983  	int16(191), int16(94), int16(-9), int16(-35),
 18984  	int16(-77), int16(-84), int16(-56), int16(-171),
 18985  	int16(-298), int16(-271), int16(-243), int16(-156),
 18986  	int16(-328), int16(-235), int16(-76), int16(-128),
 18987  	int16(-121), int16(129), int16(13), int16(-22),
 18988  	int16(32), int16(45), int16(-248), int16(-65),
 18989  	int16(193), int16(-81), int16(299), int16(57),
 18990  	int16(-147), int16(192), int16(-165), int16(-354),
 18991  	int16(-334), int16(-106), int16(-156), int16(-40),
 18992  	int16(-3), int16(-68), int16(124), int16(-257),
 18993  	int16(78), int16(124), int16(170), int16(412),
 18994  	int16(227), int16(105), int16(-104), int16(12),
 18995  	int16(154), int16(250), int16(274), int16(258),
 18996  	int16(4), int16(-27), int16(235), int16(152),
 18997  	int16(51), int16(338), int16(300), int16(7),
 18998  	int16(-314), int16(-411), int16(215), int16(170),
 18999  	int16(-9), int16(-93), int16(-77), int16(76),
 19000  	int16(67), int16(54), int16(200), int16(315),
 19001  	int16(163), int16(72), int16(-91), int16(-402),
 19002  	int16(158), int16(187), int16(-156), int16(-91),
 19003  	int16(290), int16(267), int16(167), int16(91),
 19004  	int16(140), int16(171), int16(112), int16(9),
 19005  	int16(-42), int16(-177), int16(-440), int16(385),
 19006  	int16(80), int16(15), int16(172), int16(129),
 19007  	int16(41), int16(-129), int16(-372), int16(-24),
 19008  	int16(-75), int16(-30), int16(-170), int16(10),
 19009  	int16(-118), int16(57), int16(78), int16(-101),
 19010  	int16(232), int16(161), int16(123), int16(256),
 19011  	int16(277), int16(101), int16(-192), int16(-629),
 19012  	int16(-100), int16(-60), int16(-232), int16(66),
 19013  	int16(13), int16(-13), int16(-80), int16(-239),
 19014  	int16(239), int16(37), int16(32), int16(89),
 19015  	int16(-319), int16(-579), int16(450), int16(360),
 19016  	int16(3), int16(-29), int16(-299), int16(-89),
 19017  	int16(-54), int16(-110), int16(-246), int16(-164),
 19018  	int16(6), int16(-188), int16(338), int16(176),
 19019  	int16(-92), int16(197), int16(137), int16(134),
 19020  	int16(12), int16(-2), int16(56), int16(-183),
 19021  	int16(114), int16(-36), int16(-131), int16(-204),
 19022  	int16(75), int16(-25), int16(-174), int16(191),
 19023  	int16(-15), int16(-290), int16(-429), int16(-267),
 19024  	int16(79), int16(37), int16(106), int16(23),
 19025  	int16(-384), int16(425), int16(70), int16(-14),
 19026  	int16(212), int16(105), int16(15), int16(-2),
 19027  	int16(-42), int16(-37), int16(-123), int16(108),
 19028  	int16(28), int16(-48), int16(193), int16(197),
 19029  	int16(173), int16(-33), int16(37), int16(73),
 19030  	int16(-57), int16(256), int16(137), int16(-58),
 19031  	int16(-430), int16(-228), int16(217), int16(-51),
 19032  	int16(-10), int16(-58), int16(-6), int16(22),
 19033  	int16(104), int16(61), int16(-119), int16(169),
 19034  	int16(144), int16(16), int16(-46), int16(-394),
 19035  	int16(60), int16(454), int16(-80), int16(-298),
 19036  	int16(-65), int16(25), int16(0), int16(-24),
 19037  	int16(-65), int16(-417), int16(465), int16(276),
 19038  	int16(-3), int16(-194), int16(-13), int16(130),
 19039  	int16(19), int16(-6), int16(-21), int16(-24),
 19040  	int16(-180), int16(-53), int16(-85), int16(20),
 19041  	int16(118), int16(147), int16(113), int16(-75),
 19042  	int16(-289), int16(226), int16(-122), int16(227),
 19043  	int16(270), int16(125), int16(109), int16(197),
 19044  	int16(125), int16(138), int16(44), int16(60),
 19045  	int16(25), int16(-55), int16(-167), int16(-32),
 19046  	int16(-139), int16(-193), int16(-173), int16(-316),
 19047  	int16(287), int16(-208), int16(253), int16(239),
 19048  	int16(27), int16(-80), int16(-188), int16(-28),
 19049  	int16(-182), int16(-235), int16(156), int16(-117),
 19050  	int16(128), int16(-48), int16(-58), int16(-226),
 19051  	int16(172), int16(181), int16(167), int16(19),
 19052  	int16(62), int16(10), int16(2), int16(181),
 19053  	int16(151), int16(108), int16(-16), int16(-11),
 19054  	int16(-78), int16(-331), int16(411), int16(133),
 19055  	int16(17), int16(104), int16(64), int16(-184),
 19056  	int16(24), int16(-30), int16(-3), int16(-283),
 19057  	int16(121), int16(204), int16(-8), int16(-199),
 19058  	int16(-21), int16(-80), int16(-169), int16(-157),
 19059  	int16(-191), int16(-136), int16(81), int16(155),
 19060  	int16(14), int16(-131), int16(244), int16(74),
 19061  	int16(-57), int16(-47), int16(-280), int16(347),
 19062  	int16(111), int16(-77), int16(-128), int16(-142),
 19063  	int16(-194), int16(-125), int16(-6), int16(-68),
 19064  	int16(91), int16(1), int16(23), int16(14),
 19065  	int16(-154), int16(-34), int16(23), int16(-38),
 19066  	int16(-343), int16(503), int16(146), int16(-38),
 19067  	int16(-46), int16(-41), int16(58), int16(31),
 19068  	int16(63), int16(-48), int16(-117), int16(45),
 19069  	int16(28), int16(1), int16(-89), int16(-5),
 19070  	int16(-44), int16(-29), int16(-448), int16(487),
 19071  	int16(204), int16(81), int16(46), int16(-106),
 19072  	int16(-302), int16(380), int16(120), int16(-38),
 19073  	int16(-12), int16(-39), int16(70), int16(-3),
 19074  	int16(25), int16(-65), int16(30), int16(-11),
 19075  	int16(34), int16(-15), int16(22), int16(-115),
 19076  	int16(0), int16(-79), int16(-83), int16(45),
 19077  	int16(114), int16(43), int16(150), int16(36),
 19078  	int16(233), int16(149), int16(195), int16(5),
 19079  	int16(25), int16(-52), int16(-475), int16(274),
 19080  	int16(28), int16(-39), int16(-8), int16(-66),
 19081  	int16(-255), int16(258), int16(56), int16(143),
 19082  	int16(-45), int16(-190), int16(165), int16(-60),
 19083  	int16(20), int16(2), int16(125), int16(-129),
 19084  	int16(51), int16(-8), int16(-335), int16(288),
 19085  	int16(38), int16(59), int16(25), int16(-42),
 19086  	int16(23), int16(-118), int16(-112), int16(11),
 19087  	int16(-55), int16(-133), int16(-109), int16(24),
 19088  	int16(-105), int16(78), int16(-64), int16(-245),
 19089  	int16(202), int16(-65), int16(-127), int16(162),
 19090  	int16(40), int16(-94), int16(89), int16(-85),
 19091  	int16(-119), int16(-103), int16(97), int16(9),
 19092  	int16(-70), int16(-28), int16(194), int16(86),
 19093  	int16(-112), int16(-92), int16(-114), int16(74),
 19094  	int16(-49), int16(46), int16(-84), int16(-178),
 19095  	int16(113), int16(52), int16(-205), int16(333),
 19096  	int16(88), int16(222), int16(56), int16(-55),
 19097  	int16(13), int16(86), int16(4), int16(-77),
 19098  	int16(224), int16(114), int16(-105), int16(112),
 19099  	int16(125), int16(-29), int16(-18), int16(-144),
 19100  	int16(22), int16(-58), int16(-99), int16(28),
 19101  	int16(114), int16(-66), int16(-32), int16(-169),
 19102  	int16(-314), int16(285), int16(72), int16(-74),
 19103  	int16(179), int16(28), int16(-79), int16(-182),
 19104  	int16(13), int16(-55), int16(147), int16(13),
 19105  	int16(12), int16(-54), int16(31), int16(-84),
 19106  	int16(-17), int16(-75), int16(-228), int16(83),
 19107  	int16(-375), int16(436), int16(110), int16(-63),
 19108  	int16(-27), int16(-136), int16(169), int16(-56),
 19109  	int16(-8), int16(-171), int16(184), int16(-42),
 19110  	int16(148), int16(68), int16(204), int16(235),
 19111  	int16(110), int16(-229), int16(91), int16(171),
 19112  	int16(-43), int16(-3), int16(-26), int16(-99),
 19113  	int16(-111), int16(71), int16(-170), int16(202),
 19114  	int16(-67), int16(181), int16(-37), int16(109),
 19115  	int16(-120), int16(3), int16(-55), int16(-260),
 19116  	int16(-16), int16(152), int16(91), int16(142),
 19117  	int16(42), int16(44), int16(134), int16(47),
 19118  	int16(17), int16(-35), int16(22), int16(79),
 19119  	int16(-169), int16(41), int16(46), int16(277),
 19120  	int16(-93), int16(-49), int16(-126), int16(37),
 19121  	int16(-103), int16(-34), int16(-22), int16(-90),
 19122  	int16(-134), int16(-205), int16(92), int16(-9),
 19123  	int16(1), int16(-195), int16(-239), int16(45),
 19124  	int16(54), int16(18), int16(-23), int16(-1),
 19125  	int16(-80), int16(-98), int16(-20), int16(-261),
 19126  	int16(306), int16(72), int16(20), int16(-89),
 19127  	int16(-217), int16(11), int16(6), int16(-82),
 19128  	int16(89), int16(13), int16(-129), int16(-89),
 19129  	int16(83), int16(-71), int16(-55), int16(130),
 19130  	int16(-98), int16(-146), int16(-27), int16(-57),
 19131  	int16(53), int16(275), int16(17), int16(170),
 19132  	int16(-5), int16(-54), int16(132), int16(-64),
 19133  	int16(72), int16(160), int16(-125), int16(-168),
 19134  	int16(72), int16(40), int16(170), int16(78),
 19135  	int16(248), int16(116), int16(20), int16(84),
 19136  	int16(31), int16(-34), int16(190), int16(38),
 19137  	int16(13), int16(-106), int16(225), int16(27),
 19138  	int16(-168), int16(24), int16(-157), int16(-122),
 19139  	int16(165), int16(11), int16(-161), int16(-213),
 19140  	int16(-12), int16(-51), int16(-101), int16(42),
 19141  	int16(101), int16(27), int16(55), int16(111),
 19142  	int16(75), int16(71), int16(-96), int16(-1),
 19143  	int16(65), int16(-277), int16(393), int16(-26),
 19144  	int16(-44), int16(-68), int16(-84), int16(-66),
 19145  	int16(-95), int16(235), int16(179), int16(-25),
 19146  	int16(-41), int16(27), int16(-91), int16(-128),
 19147  	int16(-222), int16(146), int16(-72), int16(-30),
 19148  	int16(-24), int16(55), int16(-126), int16(-68),
 19149  	int16(-58), int16(-127), int16(13), int16(-97),
 19150  	int16(-106), int16(174), int16(-100), int16(155),
 19151  	int16(101), int16(-146), int16(-21), int16(261),
 19152  	int16(22), int16(38), int16(-66), int16(65),
 19153  	int16(4), int16(70), int16(64), int16(144),
 19154  	int16(59), int16(213), int16(71), int16(-337),
 19155  	int16(303), int16(-52), int16(51), int16(-56),
 19156  	int16(1), int16(10), int16(-15), int16(-5),
 19157  	int16(34), int16(52), int16(228), int16(131),
 19158  	int16(161), int16(-127), int16(-214), int16(238),
 19159  	int16(123), int16(64), int16(-147), int16(-50),
 19160  	int16(-34), int16(-127), int16(204), int16(162),
 19161  	int16(85), int16(41), int16(5), int16(-140),
 19162  	int16(73), int16(-150), int16(56), int16(-96),
 19163  	int16(-66), int16(-20), int16(2), int16(-235),
 19164  	int16(59), int16(-22), int16(-107), int16(150),
 19165  	int16(-16), int16(-47), int16(-4), int16(81),
 19166  	int16(-67), int16(167), int16(149), int16(149),
 19167  	int16(-157), int16(288), int16(-156), int16(-27),
 19168  	int16(-8), int16(18), int16(83), int16(-24),
 19169  	int16(-41), int16(-167), int16(158), int16(-100),
 19170  	int16(93), int16(53), int16(201), int16(15),
 19171  	int16(42), int16(266), int16(278), int16(-12),
 19172  	int16(-6), int16(-37), int16(85), int16(6),
 19173  	int16(20), int16(-188), int16(-271), int16(107),
 19174  	int16(-13), int16(-80), int16(51), int16(202),
 19175  	int16(173), int16(-69), int16(78), int16(-188),
 19176  	int16(46), int16(4), int16(153), int16(12),
 19177  	int16(-138), int16(169), int16(5), int16(-58),
 19178  	int16(-123), int16(-108), int16(-243), int16(150),
 19179  	int16(10), int16(-191), int16(246), int16(-15),
 19180  	int16(38), int16(25), int16(-10), int16(14),
 19181  	int16(61), int16(50), int16(-206), int16(-215),
 19182  	int16(-220), int16(90), int16(5), int16(-149),
 19183  	int16(-219), int16(56), int16(142), int16(24),
 19184  	int16(-376), int16(77), int16(-80), int16(75),
 19185  	int16(6), int16(42), int16(-101), int16(16),
 19186  	int16(56), int16(14), int16(-57), int16(3),
 19187  	int16(-17), int16(80), int16(57), int16(-36),
 19188  	int16(88), int16(-59), int16(-97), int16(-19),
 19189  	int16(-148), int16(46), int16(-219), int16(226),
 19190  	int16(114), int16(-4), int16(-72), int16(-15),
 19191  	int16(37), int16(-49), int16(-28), int16(247),
 19192  	int16(44), int16(123), int16(47), int16(-122),
 19193  	int16(-38), int16(17), int16(4), int16(-113),
 19194  	int16(-32), int16(-224), int16(154), int16(-134),
 19195  	int16(196), int16(71), int16(-267), int16(-85),
 19196  	int16(28), int16(-70), int16(89), int16(-120),
 19197  	int16(99), int16(-2), int16(64), int16(76),
 19198  	int16(-166), int16(-48), int16(189), int16(-35),
 19199  	int16(-92), int16(-169), int16(-123), int16(339),
 19200  	int16(38), int16(-25), int16(38), int16(-35),
 19201  	int16(225), int16(-139), int16(-50), int16(-63),
 19202  	int16(246), int16(60), int16(-185), int16(-109),
 19203  	int16(-49), int16(-53), int16(-167), int16(51),
 19204  	int16(149), int16(60), int16(-101), int16(-33),
 19205  	int16(25), int16(-76), int16(120), int16(32),
 19206  	int16(-30), int16(-83), int16(102), int16(91),
 19207  	int16(-186), int16(-261), int16(131), int16(-197),
 19208  } /* SKP_Silk_tables_NLSF_CB0_16.c:429:17 */
 19209  
 19210  var SKP_Silk_NLSF_CB0_16_Stage_info = [10]SKP_Silk_NLSF_CBS{
 19211  	{FnVectors: 128, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19212  	{FnVectors: 16, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19213  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19214  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19215  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19216  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19217  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19218  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19219  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19220  	{FnVectors: 16, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19221  } /* SKP_Silk_tables_NLSF_CB0_16.c:1297:25 */
 19222  
 19223  var SKP_Silk_NLSF_CB0_16 = SKP_Silk_NLSF_CB_struct{
 19224  	FnStages:       10,
 19225  	FCBStages:      0,
 19226  	FNDeltaMin_Q15: 0,
 19227  	FCDF:           0,
 19228  	FStartPtr:      0,
 19229  	FMiddleIx:      0,
 19230  } /* SKP_Silk_tables_NLSF_CB0_16.c:1311:31 */
 19231  
 19232  var SKP_Silk_NLSF_MSVQ_CB1_10_CDF = [78]uint16{
 19233  	uint16(0),
 19234  	uint16(17096),
 19235  	uint16(24130),
 19236  	uint16(28997),
 19237  	uint16(33179),
 19238  	uint16(36696),
 19239  	uint16(40213),
 19240  	uint16(42493),
 19241  	uint16(44252),
 19242  	uint16(45973),
 19243  	uint16(47551),
 19244  	uint16(49095),
 19245  	uint16(50542),
 19246  	uint16(51898),
 19247  	uint16(53196),
 19248  	uint16(54495),
 19249  	uint16(55685),
 19250  	uint16(56851),
 19251  	uint16(57749),
 19252  	uint16(58628),
 19253  	uint16(59435),
 19254  	uint16(60207),
 19255  	uint16(60741),
 19256  	uint16(61220),
 19257  	uint16(61700),
 19258  	uint16(62179),
 19259  	uint16(62659),
 19260  	uint16(63138),
 19261  	uint16(63617),
 19262  	uint16(64097),
 19263  	uint16(64576),
 19264  	uint16(65056),
 19265  	uint16(65535),
 19266  	uint16(0),
 19267  	uint16(20378),
 19268  	uint16(33032),
 19269  	uint16(40395),
 19270  	uint16(46721),
 19271  	uint16(51707),
 19272  	uint16(56585),
 19273  	uint16(61157),
 19274  	uint16(65535),
 19275  	uint16(0),
 19276  	uint16(15055),
 19277  	uint16(25472),
 19278  	uint16(35447),
 19279  	uint16(42501),
 19280  	uint16(48969),
 19281  	uint16(54773),
 19282  	uint16(60212),
 19283  	uint16(65535),
 19284  	uint16(0),
 19285  	uint16(12069),
 19286  	uint16(22440),
 19287  	uint16(32812),
 19288  	uint16(40145),
 19289  	uint16(46870),
 19290  	uint16(53595),
 19291  	uint16(59630),
 19292  	uint16(65535),
 19293  	uint16(0),
 19294  	uint16(10839),
 19295  	uint16(19954),
 19296  	uint16(27957),
 19297  	uint16(35961),
 19298  	uint16(43965),
 19299  	uint16(51465),
 19300  	uint16(58805),
 19301  	uint16(65535),
 19302  	uint16(0),
 19303  	uint16(8933),
 19304  	uint16(17674),
 19305  	uint16(26415),
 19306  	uint16(34785),
 19307  	uint16(42977),
 19308  	uint16(50820),
 19309  	uint16(58496),
 19310  	uint16(65535),
 19311  } /* SKP_Silk_tables_NLSF_CB1_10.c:38:18 */
 19312  
 19313  var SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr = [6]uintptr{
 19314  	0,
 19315  	0,
 19316  	0,
 19317  	0,
 19318  	0,
 19319  	0,
 19320  } /* SKP_Silk_tables_NLSF_CB1_10.c:120:18 */
 19321  
 19322  var SKP_Silk_NLSF_MSVQ_CB1_10_CDF_middle_idx = [6]int32{
 19323  	5,
 19324  	3,
 19325  	4,
 19326  	4,
 19327  	5,
 19328  	5,
 19329  } /* SKP_Silk_tables_NLSF_CB1_10.c:130:15 */
 19330  
 19331  var SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5 = [72]int16{
 19332  	int16(62), int16(103),
 19333  	int16(120), int16(127),
 19334  	int16(135), int16(135),
 19335  	int16(155), int16(167),
 19336  	int16(168), int16(172),
 19337  	int16(173), int16(176),
 19338  	int16(179), int16(181),
 19339  	int16(181), int16(185),
 19340  	int16(186), int16(198),
 19341  	int16(199), int16(203),
 19342  	int16(205), int16(222),
 19343  	int16(227), int16(227),
 19344  	int16(227), int16(227),
 19345  	int16(227), int16(227),
 19346  	int16(227), int16(227),
 19347  	int16(227), int16(227),
 19348  	int16(54), int16(76),
 19349  	int16(101), int16(108),
 19350  	int16(119), int16(120),
 19351  	int16(123), int16(125),
 19352  	int16(68), int16(85),
 19353  	int16(87), int16(103),
 19354  	int16(107), int16(112),
 19355  	int16(115), int16(116),
 19356  	int16(78), int16(85),
 19357  	int16(85), int16(101),
 19358  	int16(105), int16(105),
 19359  	int16(110), int16(111),
 19360  	int16(83), int16(91),
 19361  	int16(97), int16(97),
 19362  	int16(97), int16(100),
 19363  	int16(101), int16(105),
 19364  	int16(92), int16(93),
 19365  	int16(93), int16(95),
 19366  	int16(96), int16(98),
 19367  	int16(99), int16(103),
 19368  } /* SKP_Silk_tables_NLSF_CB1_10.c:140:17 */
 19369  
 19370  var SKP_Silk_NLSF_MSVQ_CB1_10_ndelta_min_Q15 = [11]int32{
 19371  	462,
 19372  	3,
 19373  	64,
 19374  	74,
 19375  	98,
 19376  	50,
 19377  	97,
 19378  	68,
 19379  	120,
 19380  	53,
 19381  	639,
 19382  } /* SKP_Silk_tables_NLSF_CB1_10.c:180:15 */
 19383  
 19384  var SKP_Silk_NLSF_MSVQ_CB1_10_Q15 = [720]int16{
 19385  	int16(1877), int16(4646),
 19386  	int16(7712), int16(10745),
 19387  	int16(13964), int16(17028),
 19388  	int16(20239), int16(23182),
 19389  	int16(26471), int16(29287),
 19390  	int16(1612), int16(3278),
 19391  	int16(7086), int16(9975),
 19392  	int16(13228), int16(16264),
 19393  	int16(19596), int16(22690),
 19394  	int16(26037), int16(28965),
 19395  	int16(2169), int16(3830),
 19396  	int16(6460), int16(8958),
 19397  	int16(11960), int16(14750),
 19398  	int16(18408), int16(21659),
 19399  	int16(25018), int16(28043),
 19400  	int16(3680), int16(6024),
 19401  	int16(8986), int16(12256),
 19402  	int16(15201), int16(18188),
 19403  	int16(21741), int16(24460),
 19404  	int16(27484), int16(30059),
 19405  	int16(2584), int16(5187),
 19406  	int16(7799), int16(10902),
 19407  	int16(13179), int16(15765),
 19408  	int16(19017), int16(22431),
 19409  	int16(25891), int16(28698),
 19410  	int16(3731), int16(5751),
 19411  	int16(8650), int16(11742),
 19412  	int16(15090), int16(17407),
 19413  	int16(20391), int16(23421),
 19414  	int16(26228), int16(29247),
 19415  	int16(2107), int16(6323),
 19416  	int16(8915), int16(12226),
 19417  	int16(14775), int16(17791),
 19418  	int16(20664), int16(23679),
 19419  	int16(26829), int16(29353),
 19420  	int16(1677), int16(2870),
 19421  	int16(5386), int16(8077),
 19422  	int16(11817), int16(15176),
 19423  	int16(18657), int16(22006),
 19424  	int16(25513), int16(28689),
 19425  	int16(2111), int16(3625),
 19426  	int16(7027), int16(10588),
 19427  	int16(14059), int16(17193),
 19428  	int16(21137), int16(24260),
 19429  	int16(27577), int16(30036),
 19430  	int16(2428), int16(4010),
 19431  	int16(5765), int16(9376),
 19432  	int16(13805), int16(15821),
 19433  	int16(19444), int16(22389),
 19434  	int16(25295), int16(29310),
 19435  	int16(2256), int16(4628),
 19436  	int16(8377), int16(12441),
 19437  	int16(15283), int16(19462),
 19438  	int16(22257), int16(25551),
 19439  	int16(28432), int16(30304),
 19440  	int16(2352), int16(3675),
 19441  	int16(6129), int16(11868),
 19442  	int16(14551), int16(16655),
 19443  	int16(19624), int16(21883),
 19444  	int16(26526), int16(28849),
 19445  	int16(5243), int16(7248),
 19446  	int16(10558), int16(13269),
 19447  	int16(15651), int16(17919),
 19448  	int16(21141), int16(23827),
 19449  	int16(27102), int16(29519),
 19450  	int16(4422), int16(6725),
 19451  	int16(10449), int16(13273),
 19452  	int16(16124), int16(19921),
 19453  	int16(22826), int16(26061),
 19454  	int16(28763), int16(30583),
 19455  	int16(4508), int16(6291),
 19456  	int16(9504), int16(11809),
 19457  	int16(13827), int16(15950),
 19458  	int16(19077), int16(22084),
 19459  	int16(25740), int16(28658),
 19460  	int16(2540), int16(4297),
 19461  	int16(8579), int16(13578),
 19462  	int16(16634), int16(19101),
 19463  	int16(21547), int16(23887),
 19464  	int16(26777), int16(29146),
 19465  	int16(3377), int16(6358),
 19466  	int16(10224), int16(14518),
 19467  	int16(17905), int16(21056),
 19468  	int16(23637), int16(25784),
 19469  	int16(28161), int16(30109),
 19470  	int16(4177), int16(5942),
 19471  	int16(8159), int16(10108),
 19472  	int16(12130), int16(15470),
 19473  	int16(20191), int16(23326),
 19474  	int16(26782), int16(29359),
 19475  	int16(2492), int16(3801),
 19476  	int16(6144), int16(9825),
 19477  	int16(16000), int16(18671),
 19478  	int16(20893), int16(23663),
 19479  	int16(25899), int16(28974),
 19480  	int16(3011), int16(4727),
 19481  	int16(6834), int16(10505),
 19482  	int16(12465), int16(14496),
 19483  	int16(17065), int16(20052),
 19484  	int16(25265), int16(28057),
 19485  	int16(4149), int16(7197),
 19486  	int16(12338), int16(15076),
 19487  	int16(18002), int16(20190),
 19488  	int16(22187), int16(24723),
 19489  	int16(27083), int16(29125),
 19490  	int16(2975), int16(4578),
 19491  	int16(6448), int16(8378),
 19492  	int16(9671), int16(13225),
 19493  	int16(19502), int16(22277),
 19494  	int16(26058), int16(28850),
 19495  	int16(4102), int16(5760),
 19496  	int16(7744), int16(9484),
 19497  	int16(10744), int16(12308),
 19498  	int16(14677), int16(19607),
 19499  	int16(24841), int16(28381),
 19500  	int16(4931), int16(9287),
 19501  	int16(12477), int16(13395),
 19502  	int16(13712), int16(14351),
 19503  	int16(16048), int16(19867),
 19504  	int16(24188), int16(28994),
 19505  	int16(4141), int16(7867),
 19506  	int16(13140), int16(17720),
 19507  	int16(20064), int16(21108),
 19508  	int16(21692), int16(22722),
 19509  	int16(23736), int16(27449),
 19510  	int16(4011), int16(8720),
 19511  	int16(13234), int16(16206),
 19512  	int16(17601), int16(18289),
 19513  	int16(18524), int16(19689),
 19514  	int16(23234), int16(27882),
 19515  	int16(3420), int16(5995),
 19516  	int16(11230), int16(15117),
 19517  	int16(15907), int16(16783),
 19518  	int16(17762), int16(23347),
 19519  	int16(26898), int16(29946),
 19520  	int16(3080), int16(6786),
 19521  	int16(10465), int16(13676),
 19522  	int16(18059), int16(23615),
 19523  	int16(27058), int16(29082),
 19524  	int16(29563), int16(29905),
 19525  	int16(3038), int16(5620),
 19526  	int16(9266), int16(12870),
 19527  	int16(18803), int16(19610),
 19528  	int16(20010), int16(20802),
 19529  	int16(23882), int16(29306),
 19530  	int16(3314), int16(6420),
 19531  	int16(9046), int16(13262),
 19532  	int16(15869), int16(23117),
 19533  	int16(23667), int16(24215),
 19534  	int16(24487), int16(25915),
 19535  	int16(3469), int16(6963),
 19536  	int16(10103), int16(15282),
 19537  	int16(20531), int16(23240),
 19538  	int16(25024), int16(26021),
 19539  	int16(26736), int16(27255),
 19540  	int16(3041), int16(6459),
 19541  	int16(9777), int16(12896),
 19542  	int16(16315), int16(19410),
 19543  	int16(24070), int16(29353),
 19544  	int16(31795), int16(32075),
 19545  	int16(-200), int16(-134),
 19546  	int16(-113), int16(-204),
 19547  	int16(-347), int16(-440),
 19548  	int16(-352), int16(-211),
 19549  	int16(-418), int16(-172),
 19550  	int16(-313), int16(59),
 19551  	int16(495), int16(772),
 19552  	int16(721), int16(614),
 19553  	int16(334), int16(444),
 19554  	int16(225), int16(242),
 19555  	int16(161), int16(16),
 19556  	int16(274), int16(564),
 19557  	int16(-73), int16(-188),
 19558  	int16(-395), int16(-171),
 19559  	int16(777), int16(508),
 19560  	int16(1340), int16(1145),
 19561  	int16(699), int16(196),
 19562  	int16(223), int16(173),
 19563  	int16(90), int16(25),
 19564  	int16(-26), int16(18),
 19565  	int16(133), int16(-105),
 19566  	int16(-360), int16(-277),
 19567  	int16(859), int16(634),
 19568  	int16(41), int16(-557),
 19569  	int16(-768), int16(-926),
 19570  	int16(-601), int16(-1021),
 19571  	int16(-1189), int16(-365),
 19572  	int16(225), int16(107),
 19573  	int16(374), int16(-50),
 19574  	int16(433), int16(417),
 19575  	int16(156), int16(39),
 19576  	int16(-597), int16(-1397),
 19577  	int16(-1594), int16(-592),
 19578  	int16(-485), int16(-292),
 19579  	int16(253), int16(87),
 19580  	int16(-0), int16(-6),
 19581  	int16(-25), int16(-345),
 19582  	int16(-240), int16(120),
 19583  	int16(1261), int16(946),
 19584  	int16(166), int16(-277),
 19585  	int16(241), int16(167),
 19586  	int16(170), int16(429),
 19587  	int16(518), int16(714),
 19588  	int16(602), int16(254),
 19589  	int16(134), int16(92),
 19590  	int16(-152), int16(-324),
 19591  	int16(-394), int16(49),
 19592  	int16(-151), int16(-304),
 19593  	int16(-724), int16(-657),
 19594  	int16(-162), int16(-369),
 19595  	int16(-35), int16(3),
 19596  	int16(-2), int16(-312),
 19597  	int16(-200), int16(-92),
 19598  	int16(-227), int16(242),
 19599  	int16(628), int16(565),
 19600  	int16(-124), int16(1056),
 19601  	int16(770), int16(101),
 19602  	int16(-84), int16(-33),
 19603  	int16(4), int16(-192),
 19604  	int16(-272), int16(5),
 19605  	int16(-627), int16(-977),
 19606  	int16(419), int16(472),
 19607  	int16(53), int16(-103),
 19608  	int16(145), int16(322),
 19609  	int16(-95), int16(-31),
 19610  	int16(-100), int16(-303),
 19611  	int16(-560), int16(-1067),
 19612  	int16(-413), int16(714),
 19613  	int16(283), int16(2),
 19614  	int16(-223), int16(-367),
 19615  	int16(523), int16(360),
 19616  	int16(-38), int16(-115),
 19617  	int16(378), int16(-591),
 19618  	int16(-718), int16(448),
 19619  	int16(-481), int16(-274),
 19620  	int16(180), int16(-88),
 19621  	int16(-581), int16(-157),
 19622  	int16(-696), int16(-1265),
 19623  	int16(394), int16(-479),
 19624  	int16(-23), int16(124),
 19625  	int16(-43), int16(19),
 19626  	int16(-113), int16(-236),
 19627  	int16(-412), int16(-659),
 19628  	int16(-200), int16(2),
 19629  	int16(-69), int16(-342),
 19630  	int16(199), int16(55),
 19631  	int16(58), int16(-36),
 19632  	int16(-51), int16(-62),
 19633  	int16(507), int16(507),
 19634  	int16(427), int16(442),
 19635  	int16(36), int16(601),
 19636  	int16(-141), int16(68),
 19637  	int16(274), int16(274),
 19638  	int16(68), int16(-12),
 19639  	int16(-4), int16(71),
 19640  	int16(-193), int16(-464),
 19641  	int16(-425), int16(-383),
 19642  	int16(408), int16(203),
 19643  	int16(-337), int16(236),
 19644  	int16(410), int16(-59),
 19645  	int16(-25), int16(-341),
 19646  	int16(-449), int16(28),
 19647  	int16(-9), int16(90),
 19648  	int16(332), int16(-14),
 19649  	int16(-905), int16(96),
 19650  	int16(-540), int16(-242),
 19651  	int16(679), int16(-59),
 19652  	int16(192), int16(-24),
 19653  	int16(60), int16(-217),
 19654  	int16(5), int16(-37),
 19655  	int16(179), int16(-20),
 19656  	int16(311), int16(519),
 19657  	int16(274), int16(72),
 19658  	int16(-326), int16(-1030),
 19659  	int16(-262), int16(213),
 19660  	int16(380), int16(82),
 19661  	int16(328), int16(411),
 19662  	int16(-540), int16(574),
 19663  	int16(-283), int16(151),
 19664  	int16(181), int16(-402),
 19665  	int16(-278), int16(-240),
 19666  	int16(-110), int16(-227),
 19667  	int16(-264), int16(-89),
 19668  	int16(-250), int16(-259),
 19669  	int16(-27), int16(106),
 19670  	int16(-239), int16(-98),
 19671  	int16(-390), int16(118),
 19672  	int16(61), int16(104),
 19673  	int16(294), int16(532),
 19674  	int16(92), int16(-13),
 19675  	int16(60), int16(-233),
 19676  	int16(335), int16(541),
 19677  	int16(307), int16(-26),
 19678  	int16(-110), int16(-91),
 19679  	int16(-231), int16(-460),
 19680  	int16(170), int16(201),
 19681  	int16(96), int16(-372),
 19682  	int16(132), int16(435),
 19683  	int16(-302), int16(216),
 19684  	int16(-279), int16(-41),
 19685  	int16(74), int16(190),
 19686  	int16(368), int16(273),
 19687  	int16(-186), int16(-608),
 19688  	int16(-157), int16(159),
 19689  	int16(12), int16(278),
 19690  	int16(245), int16(307),
 19691  	int16(25), int16(-187),
 19692  	int16(-16), int16(55),
 19693  	int16(30), int16(-163),
 19694  	int16(548), int16(-307),
 19695  	int16(106), int16(-5),
 19696  	int16(27), int16(330),
 19697  	int16(-416), int16(475),
 19698  	int16(438), int16(-235),
 19699  	int16(104), int16(137),
 19700  	int16(21), int16(-5),
 19701  	int16(-300), int16(-468),
 19702  	int16(521), int16(-347),
 19703  	int16(170), int16(-200),
 19704  	int16(-219), int16(308),
 19705  	int16(-122), int16(-133),
 19706  	int16(219), int16(-16),
 19707  	int16(359), int16(412),
 19708  	int16(-89), int16(-111),
 19709  	int16(48), int16(322),
 19710  	int16(142), int16(177),
 19711  	int16(-286), int16(-127),
 19712  	int16(-39), int16(-63),
 19713  	int16(-42), int16(-451),
 19714  	int16(160), int16(308),
 19715  	int16(-57), int16(193),
 19716  	int16(-48), int16(74),
 19717  	int16(-346), int16(59),
 19718  	int16(-27), int16(27),
 19719  	int16(-469), int16(-277),
 19720  	int16(-344), int16(282),
 19721  	int16(262), int16(122),
 19722  	int16(171), int16(-249),
 19723  	int16(27), int16(258),
 19724  	int16(188), int16(-3),
 19725  	int16(67), int16(-206),
 19726  	int16(-284), int16(291),
 19727  	int16(-117), int16(-88),
 19728  	int16(-477), int16(375),
 19729  	int16(50), int16(106),
 19730  	int16(99), int16(-182),
 19731  	int16(438), int16(-376),
 19732  	int16(-401), int16(-49),
 19733  	int16(119), int16(-23),
 19734  	int16(-10), int16(-48),
 19735  	int16(-116), int16(-200),
 19736  	int16(-310), int16(121),
 19737  	int16(73), int16(7),
 19738  	int16(237), int16(-226),
 19739  	int16(139), int16(-456),
 19740  	int16(397), int16(35),
 19741  	int16(3), int16(-108),
 19742  	int16(323), int16(-75),
 19743  	int16(332), int16(198),
 19744  	int16(-99), int16(-21),
 19745  } /* SKP_Silk_tables_NLSF_CB1_10.c:195:17 */
 19746  
 19747  var SKP_Silk_NLSF_CB1_10_Stage_info = [6]SKP_Silk_NLSF_CBS{
 19748  	{FnVectors: 32, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19749  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19750  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19751  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19752  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19753  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19754  } /* SKP_Silk_tables_NLSF_CB1_10.c:559:25 */
 19755  
 19756  var SKP_Silk_NLSF_CB1_10 = SKP_Silk_NLSF_CB_struct{
 19757  	FnStages:       6,
 19758  	FCBStages:      0,
 19759  	FNDeltaMin_Q15: 0,
 19760  	FCDF:           0,
 19761  	FStartPtr:      0,
 19762  	FMiddleIx:      0,
 19763  } /* SKP_Silk_tables_NLSF_CB1_10.c:569:31 */
 19764  
 19765  var SKP_Silk_NLSF_MSVQ_CB1_16_CDF = [114]uint16{
 19766  	uint16(0),
 19767  	uint16(19099),
 19768  	uint16(26957),
 19769  	uint16(30639),
 19770  	uint16(34242),
 19771  	uint16(37546),
 19772  	uint16(40447),
 19773  	uint16(43287),
 19774  	uint16(46005),
 19775  	uint16(48445),
 19776  	uint16(49865),
 19777  	uint16(51284),
 19778  	uint16(52673),
 19779  	uint16(53975),
 19780  	uint16(55221),
 19781  	uint16(56441),
 19782  	uint16(57267),
 19783  	uint16(58025),
 19784  	uint16(58648),
 19785  	uint16(59232),
 19786  	uint16(59768),
 19787  	uint16(60248),
 19788  	uint16(60729),
 19789  	uint16(61210),
 19790  	uint16(61690),
 19791  	uint16(62171),
 19792  	uint16(62651),
 19793  	uint16(63132),
 19794  	uint16(63613),
 19795  	uint16(64093),
 19796  	uint16(64574),
 19797  	uint16(65054),
 19798  	uint16(65535),
 19799  	uint16(0),
 19800  	uint16(28808),
 19801  	uint16(38775),
 19802  	uint16(46801),
 19803  	uint16(51785),
 19804  	uint16(55886),
 19805  	uint16(59410),
 19806  	uint16(62572),
 19807  	uint16(65535),
 19808  	uint16(0),
 19809  	uint16(27376),
 19810  	uint16(38639),
 19811  	uint16(45052),
 19812  	uint16(51465),
 19813  	uint16(55448),
 19814  	uint16(59021),
 19815  	uint16(62594),
 19816  	uint16(65535),
 19817  	uint16(0),
 19818  	uint16(33403),
 19819  	uint16(39569),
 19820  	uint16(45102),
 19821  	uint16(49961),
 19822  	uint16(54047),
 19823  	uint16(57959),
 19824  	uint16(61788),
 19825  	uint16(65535),
 19826  	uint16(0),
 19827  	uint16(25851),
 19828  	uint16(43356),
 19829  	uint16(47828),
 19830  	uint16(52204),
 19831  	uint16(55964),
 19832  	uint16(59413),
 19833  	uint16(62507),
 19834  	uint16(65535),
 19835  	uint16(0),
 19836  	uint16(34277),
 19837  	uint16(40337),
 19838  	uint16(45432),
 19839  	uint16(50311),
 19840  	uint16(54326),
 19841  	uint16(58171),
 19842  	uint16(61853),
 19843  	uint16(65535),
 19844  	uint16(0),
 19845  	uint16(33538),
 19846  	uint16(39865),
 19847  	uint16(45302),
 19848  	uint16(50076),
 19849  	uint16(54549),
 19850  	uint16(58478),
 19851  	uint16(62159),
 19852  	uint16(65535),
 19853  	uint16(0),
 19854  	uint16(27445),
 19855  	uint16(35258),
 19856  	uint16(40665),
 19857  	uint16(46072),
 19858  	uint16(51362),
 19859  	uint16(56540),
 19860  	uint16(61086),
 19861  	uint16(65535),
 19862  	uint16(0),
 19863  	uint16(22080),
 19864  	uint16(30779),
 19865  	uint16(37065),
 19866  	uint16(43085),
 19867  	uint16(48849),
 19868  	uint16(54613),
 19869  	uint16(60133),
 19870  	uint16(65535),
 19871  	uint16(0),
 19872  	uint16(13417),
 19873  	uint16(21748),
 19874  	uint16(30078),
 19875  	uint16(38231),
 19876  	uint16(46383),
 19877  	uint16(53091),
 19878  	uint16(59515),
 19879  	uint16(65535),
 19880  } /* SKP_Silk_tables_NLSF_CB1_16.c:38:18 */
 19881  
 19882  var SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr = [10]uintptr{
 19883  	0,
 19884  	0,
 19885  	0,
 19886  	0,
 19887  	0,
 19888  	0,
 19889  	0,
 19890  	0,
 19891  	0,
 19892  	0,
 19893  } /* SKP_Silk_tables_NLSF_CB1_16.c:156:18 */
 19894  
 19895  var SKP_Silk_NLSF_MSVQ_CB1_16_CDF_middle_idx = [10]int32{
 19896  	5,
 19897  	2,
 19898  	2,
 19899  	2,
 19900  	2,
 19901  	2,
 19902  	2,
 19903  	3,
 19904  	3,
 19905  	4,
 19906  } /* SKP_Silk_tables_NLSF_CB1_16.c:170:15 */
 19907  
 19908  var SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5 = [104]int16{
 19909  	int16(57), int16(98),
 19910  	int16(133), int16(134),
 19911  	int16(138), int16(144),
 19912  	int16(145), int16(147),
 19913  	int16(152), int16(177),
 19914  	int16(177), int16(178),
 19915  	int16(181), int16(183),
 19916  	int16(184), int16(202),
 19917  	int16(206), int16(215),
 19918  	int16(218), int16(222),
 19919  	int16(227), int16(227),
 19920  	int16(227), int16(227),
 19921  	int16(227), int16(227),
 19922  	int16(227), int16(227),
 19923  	int16(227), int16(227),
 19924  	int16(227), int16(227),
 19925  	int16(38), int16(87),
 19926  	int16(97), int16(119),
 19927  	int16(128), int16(135),
 19928  	int16(140), int16(143),
 19929  	int16(40), int16(81),
 19930  	int16(107), int16(107),
 19931  	int16(129), int16(134),
 19932  	int16(134), int16(143),
 19933  	int16(31), int16(109),
 19934  	int16(114), int16(120),
 19935  	int16(128), int16(130),
 19936  	int16(131), int16(132),
 19937  	int16(43), int16(61),
 19938  	int16(124), int16(125),
 19939  	int16(132), int16(136),
 19940  	int16(141), int16(142),
 19941  	int16(30), int16(110),
 19942  	int16(118), int16(120),
 19943  	int16(129), int16(131),
 19944  	int16(133), int16(133),
 19945  	int16(31), int16(108),
 19946  	int16(115), int16(121),
 19947  	int16(124), int16(130),
 19948  	int16(133), int16(137),
 19949  	int16(40), int16(98),
 19950  	int16(115), int16(115),
 19951  	int16(116), int16(117),
 19952  	int16(123), int16(124),
 19953  	int16(50), int16(93),
 19954  	int16(108), int16(110),
 19955  	int16(112), int16(112),
 19956  	int16(114), int16(115),
 19957  	int16(73), int16(95),
 19958  	int16(95), int16(96),
 19959  	int16(96), int16(105),
 19960  	int16(107), int16(110),
 19961  } /* SKP_Silk_tables_NLSF_CB1_16.c:184:17 */
 19962  
 19963  var SKP_Silk_NLSF_MSVQ_CB1_16_ndelta_min_Q15 = [17]int32{
 19964  	148,
 19965  	3,
 19966  	60,
 19967  	68,
 19968  	117,
 19969  	86,
 19970  	121,
 19971  	124,
 19972  	152,
 19973  	153,
 19974  	207,
 19975  	151,
 19976  	225,
 19977  	239,
 19978  	126,
 19979  	183,
 19980  	792,
 19981  } /* SKP_Silk_tables_NLSF_CB1_16.c:240:15 */
 19982  
 19983  var SKP_Silk_NLSF_MSVQ_CB1_16_Q15 = [1664]int16{
 19984  	int16(1309), int16(3060), int16(5071), int16(6996),
 19985  	int16(9028), int16(10938), int16(12934), int16(14891),
 19986  	int16(16933), int16(18854), int16(20792), int16(22764),
 19987  	int16(24753), int16(26659), int16(28626), int16(30501),
 19988  	int16(1264), int16(2745), int16(4610), int16(6408),
 19989  	int16(8286), int16(10043), int16(12084), int16(14108),
 19990  	int16(16118), int16(18163), int16(20095), int16(22164),
 19991  	int16(24264), int16(26316), int16(28329), int16(30251),
 19992  	int16(1044), int16(2080), int16(3672), int16(5179),
 19993  	int16(7140), int16(9100), int16(11070), int16(13065),
 19994  	int16(15423), int16(17790), int16(19931), int16(22101),
 19995  	int16(24290), int16(26361), int16(28499), int16(30418),
 19996  	int16(1131), int16(2476), int16(4478), int16(6149),
 19997  	int16(7902), int16(9875), int16(11938), int16(13809),
 19998  	int16(15869), int16(17730), int16(19948), int16(21707),
 19999  	int16(23761), int16(25535), int16(27426), int16(28917),
 20000  	int16(1040), int16(2004), int16(4026), int16(6100),
 20001  	int16(8432), int16(10494), int16(12610), int16(14694),
 20002  	int16(16797), int16(18775), int16(20799), int16(22782),
 20003  	int16(24772), int16(26682), int16(28631), int16(30516),
 20004  	int16(2310), int16(3812), int16(5913), int16(7933),
 20005  	int16(10033), int16(11881), int16(13885), int16(15798),
 20006  	int16(17751), int16(19576), int16(21482), int16(23276),
 20007  	int16(25157), int16(27010), int16(28833), int16(30623),
 20008  	int16(1254), int16(2847), int16(5013), int16(6781),
 20009  	int16(8626), int16(10370), int16(12726), int16(14633),
 20010  	int16(16281), int16(17852), int16(19870), int16(21472),
 20011  	int16(23002), int16(24629), int16(26710), int16(27960),
 20012  	int16(1468), int16(3059), int16(4987), int16(7026),
 20013  	int16(8741), int16(10412), int16(12281), int16(14020),
 20014  	int16(15970), int16(17723), int16(19640), int16(21522),
 20015  	int16(23472), int16(25661), int16(27986), int16(30225),
 20016  	int16(2171), int16(3566), int16(5605), int16(7384),
 20017  	int16(9404), int16(11220), int16(13030), int16(14758),
 20018  	int16(16687), int16(18417), int16(20346), int16(22091),
 20019  	int16(24055), int16(26212), int16(28356), int16(30397),
 20020  	int16(2409), int16(4676), int16(7543), int16(9786),
 20021  	int16(11419), int16(12935), int16(14368), int16(15653),
 20022  	int16(17366), int16(18943), int16(20762), int16(22477),
 20023  	int16(24440), int16(26327), int16(28284), int16(30242),
 20024  	int16(2354), int16(4222), int16(6820), int16(9107),
 20025  	int16(11596), int16(13934), int16(15973), int16(17682),
 20026  	int16(19158), int16(20517), int16(21991), int16(23420),
 20027  	int16(25178), int16(26936), int16(28794), int16(30527),
 20028  	int16(1323), int16(2414), int16(4184), int16(6039),
 20029  	int16(7534), int16(9398), int16(11099), int16(13097),
 20030  	int16(14799), int16(16451), int16(18434), int16(20887),
 20031  	int16(23490), int16(25838), int16(28046), int16(30225),
 20032  	int16(1361), int16(3243), int16(6048), int16(8511),
 20033  	int16(11001), int16(13145), int16(15073), int16(16608),
 20034  	int16(18126), int16(19381), int16(20912), int16(22607),
 20035  	int16(24660), int16(26668), int16(28663), int16(30566),
 20036  	int16(1216), int16(2648), int16(5901), int16(8422),
 20037  	int16(10037), int16(11425), int16(12973), int16(14603),
 20038  	int16(16686), int16(18600), int16(20555), int16(22415),
 20039  	int16(24450), int16(26280), int16(28206), int16(30077),
 20040  	int16(2417), int16(4048), int16(6316), int16(8433),
 20041  	int16(10510), int16(12757), int16(15072), int16(17295),
 20042  	int16(19573), int16(21503), int16(23329), int16(24782),
 20043  	int16(26235), int16(27689), int16(29214), int16(30819),
 20044  	int16(1012), int16(2345), int16(4991), int16(7377),
 20045  	int16(9465), int16(11916), int16(14296), int16(16566),
 20046  	int16(18672), int16(20544), int16(22292), int16(23838),
 20047  	int16(25415), int16(27050), int16(28848), int16(30551),
 20048  	int16(1937), int16(3693), int16(6267), int16(8019),
 20049  	int16(10372), int16(12194), int16(14287), int16(15657),
 20050  	int16(17431), int16(18864), int16(20769), int16(22206),
 20051  	int16(24037), int16(25463), int16(27383), int16(28602),
 20052  	int16(1969), int16(3305), int16(5017), int16(6726),
 20053  	int16(8375), int16(9993), int16(11634), int16(13280),
 20054  	int16(15078), int16(16751), int16(18464), int16(20119),
 20055  	int16(21959), int16(23858), int16(26224), int16(29298),
 20056  	int16(1198), int16(2647), int16(5428), int16(7423),
 20057  	int16(9775), int16(12155), int16(14665), int16(16344),
 20058  	int16(18121), int16(19790), int16(21557), int16(22847),
 20059  	int16(24484), int16(25742), int16(27639), int16(28711),
 20060  	int16(1636), int16(3353), int16(5447), int16(7597),
 20061  	int16(9837), int16(11647), int16(13964), int16(16019),
 20062  	int16(17862), int16(20116), int16(22319), int16(24037),
 20063  	int16(25966), int16(28086), int16(29914), int16(31294),
 20064  	int16(2676), int16(4105), int16(6378), int16(8223),
 20065  	int16(10058), int16(11549), int16(13072), int16(14453),
 20066  	int16(15956), int16(17355), int16(18931), int16(20402),
 20067  	int16(22183), int16(23884), int16(25717), int16(27723),
 20068  	int16(1373), int16(2593), int16(4449), int16(5633),
 20069  	int16(7300), int16(8425), int16(9474), int16(10818),
 20070  	int16(12769), int16(15722), int16(19002), int16(21429),
 20071  	int16(23682), int16(25924), int16(28135), int16(30333),
 20072  	int16(1596), int16(3183), int16(5378), int16(7164),
 20073  	int16(8670), int16(10105), int16(11470), int16(12834),
 20074  	int16(13991), int16(15042), int16(16642), int16(17903),
 20075  	int16(20759), int16(25283), int16(27770), int16(30240),
 20076  	int16(2037), int16(3987), int16(6237), int16(8117),
 20077  	int16(9954), int16(12245), int16(14217), int16(15892),
 20078  	int16(17775), int16(20114), int16(22314), int16(25942),
 20079  	int16(26305), int16(26483), int16(26796), int16(28561),
 20080  	int16(2181), int16(3858), int16(5760), int16(7924),
 20081  	int16(10041), int16(11577), int16(13769), int16(15700),
 20082  	int16(17429), int16(19879), int16(23583), int16(24538),
 20083  	int16(25212), int16(25693), int16(28688), int16(30507),
 20084  	int16(1992), int16(3882), int16(6474), int16(7883),
 20085  	int16(9381), int16(12672), int16(14340), int16(15701),
 20086  	int16(16658), int16(17832), int16(20850), int16(22885),
 20087  	int16(24677), int16(26457), int16(28491), int16(30460),
 20088  	int16(2391), int16(3988), int16(5448), int16(7432),
 20089  	int16(11014), int16(12579), int16(13140), int16(14146),
 20090  	int16(15898), int16(18592), int16(21104), int16(22993),
 20091  	int16(24673), int16(27186), int16(28142), int16(29612),
 20092  	int16(1713), int16(5102), int16(6989), int16(7798),
 20093  	int16(8670), int16(10110), int16(12746), int16(14881),
 20094  	int16(16709), int16(18407), int16(20126), int16(22107),
 20095  	int16(24181), int16(26198), int16(28237), int16(30137),
 20096  	int16(1612), int16(3617), int16(6148), int16(8359),
 20097  	int16(9576), int16(11528), int16(14936), int16(17809),
 20098  	int16(18287), int16(18729), int16(19001), int16(21111),
 20099  	int16(24631), int16(26596), int16(28740), int16(30643),
 20100  	int16(2266), int16(4168), int16(7862), int16(9546),
 20101  	int16(9618), int16(9703), int16(10134), int16(13897),
 20102  	int16(16265), int16(18432), int16(20587), int16(22605),
 20103  	int16(24754), int16(26994), int16(29125), int16(30840),
 20104  	int16(1840), int16(3917), int16(6272), int16(7809),
 20105  	int16(9714), int16(11438), int16(13767), int16(15799),
 20106  	int16(19244), int16(21972), int16(22980), int16(23180),
 20107  	int16(23723), int16(25650), int16(29117), int16(31085),
 20108  	int16(1458), int16(3612), int16(6008), int16(7488),
 20109  	int16(9827), int16(11893), int16(14086), int16(15734),
 20110  	int16(17440), int16(19535), int16(22424), int16(24767),
 20111  	int16(29246), int16(29928), int16(30516), int16(30947),
 20112  	int16(-102), int16(-121), int16(-31), int16(-6),
 20113  	int16(5), int16(-2), int16(8), int16(-18),
 20114  	int16(-4), int16(6), int16(14), int16(-2),
 20115  	int16(-12), int16(-16), int16(-12), int16(-60),
 20116  	int16(-126), int16(-353), int16(-574), int16(-677),
 20117  	int16(-657), int16(-617), int16(-498), int16(-393),
 20118  	int16(-348), int16(-277), int16(-225), int16(-164),
 20119  	int16(-102), int16(-70), int16(-31), int16(33),
 20120  	int16(4), int16(379), int16(387), int16(551),
 20121  	int16(605), int16(620), int16(532), int16(482),
 20122  	int16(442), int16(454), int16(385), int16(347),
 20123  	int16(322), int16(299), int16(266), int16(200),
 20124  	int16(1168), int16(951), int16(672), int16(246),
 20125  	int16(60), int16(-161), int16(-259), int16(-234),
 20126  	int16(-253), int16(-282), int16(-203), int16(-187),
 20127  	int16(-155), int16(-176), int16(-198), int16(-178),
 20128  	int16(10), int16(170), int16(393), int16(609),
 20129  	int16(555), int16(208), int16(-330), int16(-571),
 20130  	int16(-769), int16(-633), int16(-319), int16(-43),
 20131  	int16(95), int16(105), int16(106), int16(116),
 20132  	int16(-152), int16(-140), int16(-125), int16(5),
 20133  	int16(173), int16(274), int16(264), int16(331),
 20134  	int16(-37), int16(-293), int16(-609), int16(-786),
 20135  	int16(-959), int16(-814), int16(-645), int16(-238),
 20136  	int16(-91), int16(36), int16(-11), int16(-101),
 20137  	int16(-279), int16(-227), int16(-40), int16(90),
 20138  	int16(530), int16(677), int16(890), int16(1104),
 20139  	int16(999), int16(835), int16(564), int16(295),
 20140  	int16(-280), int16(-364), int16(-340), int16(-331),
 20141  	int16(-284), int16(288), int16(761), int16(880),
 20142  	int16(988), int16(627), int16(146), int16(-226),
 20143  	int16(-203), int16(-181), int16(-142), int16(39),
 20144  	int16(24), int16(-26), int16(-107), int16(-92),
 20145  	int16(-161), int16(-135), int16(-131), int16(-88),
 20146  	int16(-160), int16(-156), int16(-75), int16(-43),
 20147  	int16(-36), int16(-6), int16(-33), int16(33),
 20148  	int16(-324), int16(-415), int16(-108), int16(124),
 20149  	int16(157), int16(191), int16(203), int16(197),
 20150  	int16(144), int16(109), int16(152), int16(176),
 20151  	int16(190), int16(122), int16(101), int16(159),
 20152  	int16(663), int16(668), int16(480), int16(400),
 20153  	int16(379), int16(444), int16(446), int16(458),
 20154  	int16(343), int16(351), int16(310), int16(228),
 20155  	int16(133), int16(44), int16(75), int16(63),
 20156  	int16(-84), int16(39), int16(-29), int16(35),
 20157  	int16(-94), int16(-233), int16(-261), int16(-354),
 20158  	int16(77), int16(262), int16(-24), int16(-145),
 20159  	int16(-333), int16(-409), int16(-404), int16(-597),
 20160  	int16(-488), int16(-300), int16(910), int16(592),
 20161  	int16(412), int16(120), int16(130), int16(-51),
 20162  	int16(-37), int16(-77), int16(-172), int16(-181),
 20163  	int16(-159), int16(-148), int16(-72), int16(-62),
 20164  	int16(510), int16(516), int16(113), int16(-585),
 20165  	int16(-1075), int16(-957), int16(-417), int16(-195),
 20166  	int16(9), int16(7), int16(-88), int16(-173),
 20167  	int16(-91), int16(54), int16(98), int16(95),
 20168  	int16(-28), int16(197), int16(-527), int16(-621),
 20169  	int16(157), int16(122), int16(-168), int16(147),
 20170  	int16(309), int16(300), int16(336), int16(315),
 20171  	int16(396), int16(408), int16(376), int16(106),
 20172  	int16(-162), int16(-170), int16(-315), int16(98),
 20173  	int16(821), int16(908), int16(570), int16(-33),
 20174  	int16(-312), int16(-568), int16(-572), int16(-378),
 20175  	int16(-107), int16(23), int16(156), int16(93),
 20176  	int16(-129), int16(-87), int16(20), int16(-72),
 20177  	int16(-37), int16(40), int16(21), int16(27),
 20178  	int16(48), int16(75), int16(77), int16(65),
 20179  	int16(46), int16(71), int16(66), int16(47),
 20180  	int16(136), int16(344), int16(236), int16(322),
 20181  	int16(170), int16(283), int16(269), int16(291),
 20182  	int16(162), int16(-43), int16(-204), int16(-259),
 20183  	int16(-240), int16(-305), int16(-350), int16(-312),
 20184  	int16(447), int16(348), int16(345), int16(257),
 20185  	int16(71), int16(-131), int16(-77), int16(-190),
 20186  	int16(-202), int16(-40), int16(35), int16(133),
 20187  	int16(261), int16(365), int16(438), int16(303),
 20188  	int16(-8), int16(22), int16(140), int16(137),
 20189  	int16(-300), int16(-641), int16(-764), int16(-268),
 20190  	int16(-23), int16(-25), int16(73), int16(-162),
 20191  	int16(-150), int16(-212), int16(-72), int16(6),
 20192  	int16(39), int16(78), int16(104), int16(-93),
 20193  	int16(-308), int16(-136), int16(117), int16(-71),
 20194  	int16(-513), int16(-820), int16(-700), int16(-450),
 20195  	int16(-161), int16(-23), int16(29), int16(78),
 20196  	int16(337), int16(106), int16(-406), int16(-782),
 20197  	int16(-112), int16(233), int16(383), int16(62),
 20198  	int16(-126), int16(6), int16(-77), int16(-29),
 20199  	int16(-146), int16(-123), int16(-51), int16(-27),
 20200  	int16(-27), int16(-381), int16(-641), int16(402),
 20201  	int16(539), int16(8), int16(-207), int16(-366),
 20202  	int16(-36), int16(-27), int16(-204), int16(-227),
 20203  	int16(-237), int16(-189), int16(-64), int16(51),
 20204  	int16(-92), int16(-137), int16(-281), int16(62),
 20205  	int16(233), int16(92), int16(148), int16(294),
 20206  	int16(363), int16(416), int16(564), int16(625),
 20207  	int16(370), int16(-36), int16(-469), int16(-462),
 20208  	int16(102), int16(168), int16(32), int16(117),
 20209  	int16(-21), int16(97), int16(139), int16(89),
 20210  	int16(104), int16(35), int16(4), int16(82),
 20211  	int16(66), int16(58), int16(73), int16(93),
 20212  	int16(-76), int16(-320), int16(-236), int16(-189),
 20213  	int16(-203), int16(-142), int16(-27), int16(-73),
 20214  	int16(9), int16(-9), int16(-25), int16(12),
 20215  	int16(-15), int16(4), int16(4), int16(-50),
 20216  	int16(314), int16(180), int16(162), int16(-49),
 20217  	int16(199), int16(-108), int16(-227), int16(-66),
 20218  	int16(-447), int16(-67), int16(-264), int16(-394),
 20219  	int16(5), int16(55), int16(-133), int16(-176),
 20220  	int16(-116), int16(-241), int16(272), int16(109),
 20221  	int16(282), int16(262), int16(192), int16(-64),
 20222  	int16(-392), int16(-514), int16(156), int16(203),
 20223  	int16(154), int16(72), int16(-34), int16(-160),
 20224  	int16(-73), int16(3), int16(-33), int16(-431),
 20225  	int16(321), int16(18), int16(-567), int16(-590),
 20226  	int16(-108), int16(88), int16(66), int16(51),
 20227  	int16(-31), int16(-193), int16(-46), int16(65),
 20228  	int16(-29), int16(-23), int16(215), int16(-31),
 20229  	int16(101), int16(-113), int16(32), int16(304),
 20230  	int16(88), int16(320), int16(448), int16(5),
 20231  	int16(-439), int16(-562), int16(-508), int16(-135),
 20232  	int16(-13), int16(-171), int16(-8), int16(182),
 20233  	int16(-99), int16(-181), int16(-149), int16(376),
 20234  	int16(476), int16(64), int16(-396), int16(-652),
 20235  	int16(-150), int16(176), int16(222), int16(65),
 20236  	int16(-590), int16(719), int16(271), int16(399),
 20237  	int16(245), int16(72), int16(-156), int16(-152),
 20238  	int16(-176), int16(59), int16(94), int16(125),
 20239  	int16(-9), int16(-7), int16(9), int16(1),
 20240  	int16(-61), int16(-116), int16(-82), int16(1),
 20241  	int16(79), int16(22), int16(-44), int16(-15),
 20242  	int16(-48), int16(-65), int16(-62), int16(-101),
 20243  	int16(-102), int16(-54), int16(-70), int16(-78),
 20244  	int16(-80), int16(-25), int16(398), int16(71),
 20245  	int16(139), int16(38), int16(90), int16(194),
 20246  	int16(222), int16(249), int16(165), int16(94),
 20247  	int16(221), int16(262), int16(163), int16(91),
 20248  	int16(-206), int16(573), int16(200), int16(-287),
 20249  	int16(-147), int16(5), int16(-18), int16(-85),
 20250  	int16(-74), int16(-125), int16(-87), int16(85),
 20251  	int16(141), int16(4), int16(-4), int16(28),
 20252  	int16(234), int16(48), int16(-150), int16(-111),
 20253  	int16(-506), int16(237), int16(-209), int16(345),
 20254  	int16(94), int16(-124), int16(77), int16(121),
 20255  	int16(143), int16(12), int16(-80), int16(-48),
 20256  	int16(191), int16(144), int16(-93), int16(-65),
 20257  	int16(-151), int16(-643), int16(435), int16(106),
 20258  	int16(87), int16(7), int16(65), int16(102),
 20259  	int16(94), int16(68), int16(5), int16(99),
 20260  	int16(222), int16(93), int16(94), int16(355),
 20261  	int16(-13), int16(-89), int16(-228), int16(-503),
 20262  	int16(287), int16(109), int16(108), int16(449),
 20263  	int16(253), int16(-29), int16(-109), int16(-116),
 20264  	int16(15), int16(-73), int16(-20), int16(131),
 20265  	int16(-147), int16(72), int16(59), int16(-150),
 20266  	int16(-594), int16(273), int16(316), int16(132),
 20267  	int16(199), int16(106), int16(198), int16(212),
 20268  	int16(220), int16(82), int16(45), int16(-13),
 20269  	int16(223), int16(137), int16(270), int16(38),
 20270  	int16(252), int16(135), int16(-177), int16(-207),
 20271  	int16(-360), int16(-102), int16(403), int16(406),
 20272  	int16(-14), int16(83), int16(64), int16(51),
 20273  	int16(-7), int16(-99), int16(-97), int16(-88),
 20274  	int16(-124), int16(-65), int16(42), int16(32),
 20275  	int16(28), int16(29), int16(12), int16(20),
 20276  	int16(119), int16(-26), int16(-212), int16(-201),
 20277  	int16(373), int16(251), int16(141), int16(103),
 20278  	int16(36), int16(-52), int16(66), int16(18),
 20279  	int16(-6), int16(-95), int16(-196), int16(5),
 20280  	int16(98), int16(-85), int16(-108), int16(218),
 20281  	int16(-164), int16(20), int16(356), int16(172),
 20282  	int16(37), int16(266), int16(23), int16(112),
 20283  	int16(-24), int16(-99), int16(-92), int16(-178),
 20284  	int16(29), int16(-278), int16(388), int16(-60),
 20285  	int16(-220), int16(300), int16(-13), int16(154),
 20286  	int16(191), int16(15), int16(-37), int16(-110),
 20287  	int16(-153), int16(-150), int16(-114), int16(-7),
 20288  	int16(-94), int16(-31), int16(-62), int16(-177),
 20289  	int16(4), int16(-70), int16(35), int16(453),
 20290  	int16(147), int16(-247), int16(-328), int16(101),
 20291  	int16(20), int16(-114), int16(147), int16(108),
 20292  	int16(-119), int16(-109), int16(-102), int16(-238),
 20293  	int16(55), int16(-102), int16(173), int16(-89),
 20294  	int16(129), int16(138), int16(-330), int16(-160),
 20295  	int16(485), int16(154), int16(-59), int16(-170),
 20296  	int16(-20), int16(-34), int16(-261), int16(-40),
 20297  	int16(-129), int16(77), int16(-84), int16(69),
 20298  	int16(83), int16(160), int16(169), int16(63),
 20299  	int16(-516), int16(30), int16(336), int16(52),
 20300  	int16(-0), int16(-52), int16(-124), int16(158),
 20301  	int16(19), int16(197), int16(-10), int16(-375),
 20302  	int16(405), int16(285), int16(114), int16(-395),
 20303  	int16(-47), int16(196), int16(62), int16(87),
 20304  	int16(-106), int16(-65), int16(-75), int16(-69),
 20305  	int16(-13), int16(34), int16(99), int16(59),
 20306  	int16(83), int16(98), int16(44), int16(0),
 20307  	int16(24), int16(18), int16(17), int16(70),
 20308  	int16(-22), int16(194), int16(208), int16(144),
 20309  	int16(-79), int16(-15), int16(32), int16(-104),
 20310  	int16(-28), int16(-105), int16(-186), int16(-212),
 20311  	int16(-228), int16(-79), int16(-76), int16(51),
 20312  	int16(-71), int16(72), int16(118), int16(-34),
 20313  	int16(-3), int16(-171), int16(5), int16(2),
 20314  	int16(-108), int16(-125), int16(62), int16(-58),
 20315  	int16(58), int16(-121), int16(73), int16(-466),
 20316  	int16(92), int16(63), int16(-94), int16(-78),
 20317  	int16(-76), int16(212), int16(36), int16(-225),
 20318  	int16(-71), int16(-354), int16(152), int16(143),
 20319  	int16(-79), int16(-246), int16(-51), int16(-31),
 20320  	int16(-6), int16(-270), int16(240), int16(210),
 20321  	int16(30), int16(-157), int16(-231), int16(74),
 20322  	int16(-146), int16(88), int16(-273), int16(156),
 20323  	int16(92), int16(56), int16(71), int16(2),
 20324  	int16(318), int16(164), int16(32), int16(-110),
 20325  	int16(-35), int16(-41), int16(-95), int16(-106),
 20326  	int16(11), int16(132), int16(-68), int16(55),
 20327  	int16(123), int16(-83), int16(-149), int16(212),
 20328  	int16(132), int16(0), int16(-194), int16(55),
 20329  	int16(206), int16(-108), int16(-353), int16(289),
 20330  	int16(-195), int16(1), int16(233), int16(-22),
 20331  	int16(-60), int16(20), int16(26), int16(68),
 20332  	int16(166), int16(27), int16(-58), int16(130),
 20333  	int16(112), int16(107), int16(27), int16(-165),
 20334  	int16(115), int16(-93), int16(-37), int16(38),
 20335  	int16(83), int16(483), int16(65), int16(-229),
 20336  	int16(-13), int16(157), int16(85), int16(50),
 20337  	int16(136), int16(10), int16(32), int16(83),
 20338  	int16(82), int16(55), int16(5), int16(-9),
 20339  	int16(-52), int16(-78), int16(-81), int16(-51),
 20340  	int16(40), int16(18), int16(-127), int16(-224),
 20341  	int16(-41), int16(53), int16(-210), int16(-113),
 20342  	int16(24), int16(-17), int16(-187), int16(-89),
 20343  	int16(8), int16(121), int16(83), int16(77),
 20344  	int16(91), int16(-74), int16(-35), int16(-112),
 20345  	int16(-161), int16(-173), int16(102), int16(132),
 20346  	int16(-125), int16(-61), int16(103), int16(-260),
 20347  	int16(52), int16(166), int16(-32), int16(-156),
 20348  	int16(-87), int16(-56), int16(60), int16(-70),
 20349  	int16(-124), int16(242), int16(114), int16(-251),
 20350  	int16(-166), int16(201), int16(127), int16(28),
 20351  	int16(-11), int16(23), int16(-80), int16(-115),
 20352  	int16(-20), int16(-51), int16(-348), int16(340),
 20353  	int16(-34), int16(133), int16(13), int16(92),
 20354  	int16(-124), int16(-136), int16(-120), int16(-26),
 20355  	int16(-6), int16(17), int16(28), int16(21),
 20356  	int16(120), int16(-168), int16(160), int16(-35),
 20357  	int16(115), int16(28), int16(9), int16(7),
 20358  	int16(-56), int16(39), int16(156), int16(256),
 20359  	int16(-18), int16(1), int16(277), int16(82),
 20360  	int16(-70), int16(-144), int16(-88), int16(-13),
 20361  	int16(-59), int16(-157), int16(8), int16(-134),
 20362  	int16(21), int16(-40), int16(58), int16(-21),
 20363  	int16(194), int16(-276), int16(97), int16(279),
 20364  	int16(-56), int16(-140), int16(125), int16(57),
 20365  	int16(-184), int16(-204), int16(-70), int16(-2),
 20366  	int16(128), int16(-202), int16(-78), int16(230),
 20367  	int16(-23), int16(161), int16(-102), int16(1),
 20368  	int16(1), int16(180), int16(-31), int16(-86),
 20369  	int16(-167), int16(-57), int16(-60), int16(27),
 20370  	int16(-13), int16(99), int16(108), int16(111),
 20371  	int16(76), int16(69), int16(34), int16(-21),
 20372  	int16(53), int16(38), int16(34), int16(78),
 20373  	int16(73), int16(219), int16(51), int16(15),
 20374  	int16(-72), int16(-103), int16(-207), int16(30),
 20375  	int16(213), int16(-14), int16(31), int16(-94),
 20376  	int16(-40), int16(-144), int16(67), int16(4),
 20377  	int16(105), int16(59), int16(-240), int16(25),
 20378  	int16(244), int16(69), int16(58), int16(23),
 20379  	int16(-24), int16(-5), int16(-15), int16(-133),
 20380  	int16(-71), int16(-67), int16(181), int16(29),
 20381  	int16(-45), int16(121), int16(96), int16(51),
 20382  	int16(-72), int16(-53), int16(56), int16(-153),
 20383  	int16(-27), int16(85), int16(183), int16(211),
 20384  	int16(105), int16(-34), int16(-46), int16(43),
 20385  	int16(-72), int16(-93), int16(36), int16(-128),
 20386  	int16(29), int16(111), int16(-95), int16(-156),
 20387  	int16(-179), int16(-235), int16(21), int16(-39),
 20388  	int16(-71), int16(-33), int16(-61), int16(-252),
 20389  	int16(230), int16(-131), int16(157), int16(-21),
 20390  	int16(-85), int16(-28), int16(-123), int16(80),
 20391  	int16(-160), int16(63), int16(47), int16(-6),
 20392  	int16(-49), int16(-96), int16(-19), int16(17),
 20393  	int16(-58), int16(17), int16(-0), int16(-13),
 20394  	int16(-170), int16(25), int16(-35), int16(59),
 20395  	int16(10), int16(-31), int16(-413), int16(81),
 20396  	int16(62), int16(18), int16(-164), int16(245),
 20397  	int16(92), int16(-165), int16(42), int16(26),
 20398  	int16(126), int16(-248), int16(193), int16(-55),
 20399  	int16(16), int16(39), int16(14), int16(50),
 20400  } /* SKP_Silk_tables_NLSF_CB1_16.c:261:17 */
 20401  
 20402  var SKP_Silk_NLSF_CB1_16_Stage_info = [10]SKP_Silk_NLSF_CBS{
 20403  	{FnVectors: 32, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20404  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20405  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20406  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20407  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20408  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20409  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20410  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20411  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20412  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20413  } /* SKP_Silk_tables_NLSF_CB1_16.c:681:25 */
 20414  
 20415  var SKP_Silk_NLSF_CB1_16 = SKP_Silk_NLSF_CB_struct{
 20416  	FnStages:       10,
 20417  	FCBStages:      0,
 20418  	FNDeltaMin_Q15: 0,
 20419  	FCDF:           0,
 20420  	FStartPtr:      0,
 20421  	FMiddleIx:      0,
 20422  } /* SKP_Silk_tables_NLSF_CB1_16.c:695:31 */
 20423  
 20424  /* Piece-wise linear mapping from bitrate in kbps to coding quality in dB SNR */
 20425  var TargetRate_table_NB = [8]int32{
 20426  	0, 8000, 9000, 11000, 13000, 16000, 22000, 100000,
 20427  } /* SKP_Silk_tables_other.c:37:17 */
 20428  var TargetRate_table_MB = [8]int32{
 20429  	0, 10000, 12000, 14000, 17000, 21000, 28000, 100000,
 20430  } /* SKP_Silk_tables_other.c:40:17 */
 20431  var TargetRate_table_WB = [8]int32{
 20432  	0, 11000, 14000, 17000, 21000, 26000, 36000, 100000,
 20433  } /* SKP_Silk_tables_other.c:43:17 */
 20434  var TargetRate_table_SWB = [8]int32{
 20435  	0, 13000, 16000, 19000, 25000, 32000, 46000, 100000,
 20436  } /* SKP_Silk_tables_other.c:46:17 */
 20437  var SNR_table_Q1 = [8]int32{
 20438  	19, 31, 35, 39, 43, 47, 54, 64,
 20439  } /* SKP_Silk_tables_other.c:49:17 */
 20440  
 20441  var SNR_table_one_bit_per_sample_Q7 = [4]int32{
 20442  	1984, 2240, 2408, 2708,
 20443  } /* SKP_Silk_tables_other.c:53:17 */
 20444  
 20445  /* Filter coeficicnts for HP filter: 4. Order filter implementad as two biquad filters  */
 20446  var SKP_Silk_SWB_detect_B_HP_Q13 = [3][3]int16{
 20447  	//{400, -550, 400}, {400, 130, 400}, {400, 390, 400}
 20448  	{int16(575), int16(-948), int16(575)}, {int16(575), int16(-221), int16(575)}, {int16(575), int16(104), int16(575)},
 20449  } /* SKP_Silk_tables_other.c:58:17 */
 20450  var SKP_Silk_SWB_detect_A_HP_Q13 = [3][2]int16{
 20451  	{int16(14613), int16(6868)}, {int16(12883), int16(7337)}, {int16(11586), int16(7911)},
 20452  	//{14880, 6900}, {14400, 7300}, {13700, 7800}
 20453  } /* SKP_Silk_tables_other.c:62:17 */
 20454  
 20455  /* Decoder high-pass filter coefficients for 24 kHz sampling, -6 dB @ 44 Hz */
 20456  var SKP_Silk_Dec_A_HP_24 = [2]int16{int16(-16220), int16(8030)}              /* SKP_Silk_tables_other.c:68:17 */ // second order AR coefs, Q13
 20457  var SKP_Silk_Dec_B_HP_24 = [3]int16{int16(8000), int16(-16000), int16(8000)} /* SKP_Silk_tables_other.c:69:17 */ // second order MA coefs, Q13
 20458  
 20459  /* Decoder high-pass filter coefficients for 16 kHz sampling, - 6 dB @ 46 Hz */
 20460  var SKP_Silk_Dec_A_HP_16 = [2]int16{int16(-16127), int16(7940)}              /* SKP_Silk_tables_other.c:72:17 */ // second order AR coefs, Q13
 20461  var SKP_Silk_Dec_B_HP_16 = [3]int16{int16(8000), int16(-16000), int16(8000)} /* SKP_Silk_tables_other.c:73:17 */ // second order MA coefs, Q13
 20462  
 20463  /* Decoder high-pass filter coefficients for 12 kHz sampling, -6 dB @ 44 Hz */
 20464  var SKP_Silk_Dec_A_HP_12 = [2]int16{int16(-16043), int16(7859)}              /* SKP_Silk_tables_other.c:76:17 */ // second order AR coefs, Q13
 20465  var SKP_Silk_Dec_B_HP_12 = [3]int16{int16(8000), int16(-16000), int16(8000)} /* SKP_Silk_tables_other.c:77:17 */ // second order MA coefs, Q13
 20466  
 20467  /* Decoder high-pass filter coefficients for 8 kHz sampling, -6 dB @ 43 Hz */
 20468  var SKP_Silk_Dec_A_HP_8 = [2]int16{int16(-15885), int16(7710)}              /* SKP_Silk_tables_other.c:80:17 */ // second order AR coefs, Q13
 20469  var SKP_Silk_Dec_B_HP_8 = [3]int16{int16(8000), int16(-16000), int16(8000)} /* SKP_Silk_tables_other.c:81:17 */ // second order MA coefs, Q13
 20470  
 20471  /* table for LSB coding */
 20472  var SKP_Silk_lsb_CDF = [3]uint16{uint16(0), uint16(40000), uint16(65535)} /* SKP_Silk_tables_other.c:84:18 */
 20473  
 20474  /* tables for LTPScale */
 20475  var SKP_Silk_LTPscale_CDF = [4]uint16{uint16(0), uint16(32000), uint16(48000), uint16(65535)} /* SKP_Silk_tables_other.c:87:18 */
 20476  var SKP_Silk_LTPscale_offset int32 = 2                                                        /* SKP_Silk_tables_other.c:88:18 */
 20477  
 20478  /* tables for VAD flag */
 20479  var SKP_Silk_vadflag_CDF = [3]uint16{uint16(0), uint16(22000), uint16(65535)} /* SKP_Silk_tables_other.c:91:18 */ // 66% for speech, 33% for no speech
 20480  var SKP_Silk_vadflag_offset int32 = 1                                         /* SKP_Silk_tables_other.c:92:18 */
 20481  
 20482  /* tables for sampling rate */
 20483  var SKP_Silk_SamplingRates_table = [4]int32{8, 12, 16, 24}                                                        /* SKP_Silk_tables_other.c:95:18 */
 20484  var SKP_Silk_SamplingRates_CDF = [5]uint16{uint16(0), uint16(16000), uint16(32000), uint16(48000), uint16(65535)} /* SKP_Silk_tables_other.c:96:18 */
 20485  var SKP_Silk_SamplingRates_offset int32 = 2                                                                       /* SKP_Silk_tables_other.c:97:18 */
 20486  
 20487  /* tables for NLSF interpolation factor */
 20488  var SKP_Silk_NLSF_interpolation_factor_CDF = [6]uint16{uint16(0), uint16(3706), uint16(8703), uint16(19226), uint16(30926), uint16(65535)} /* SKP_Silk_tables_other.c:100:18 */
 20489  var SKP_Silk_NLSF_interpolation_factor_offset int32 = 4                                                                                    /* SKP_Silk_tables_other.c:101:18 */
 20490  
 20491  /* Table for frame termination indication */
 20492  var SKP_Silk_FrameTermination_CDF = [5]uint16{uint16(0), uint16(20000), uint16(45000), uint16(56000), uint16(65535)} /* SKP_Silk_tables_other.c:104:18 */
 20493  var SKP_Silk_FrameTermination_offset int32 = 2                                                                       /* SKP_Silk_tables_other.c:105:18 */
 20494  
 20495  /* Table for random seed */
 20496  var SKP_Silk_Seed_CDF = [5]uint16{uint16(0), uint16(16384), uint16(32768), uint16(49152), uint16(65535)} /* SKP_Silk_tables_other.c:108:18 */
 20497  var SKP_Silk_Seed_offset int32 = 2                                                                       /* SKP_Silk_tables_other.c:109:18 */
 20498  
 20499  /* Quantization offsets */
 20500  var SKP_Silk_Quantization_Offsets_Q10 = [2][2]int16{
 20501  	{int16(32), int16(100)}, {int16(100), int16(256)},
 20502  } /* SKP_Silk_tables_other.c:112:18 */
 20503  
 20504  /* Table for LTPScale */
 20505  var SKP_Silk_LTPScales_table_Q14 = [3]int16{int16(15565), int16(11469), int16(8192)} /* SKP_Silk_tables_other.c:117:17 */
 20506  
 20507  /*  Elliptic/Cauer filters designed with 0.1 dB passband ripple,
 20508      80 dB minimum stopband attenuation, and
 20509      [0.95 : 0.15 : 0.35] normalized cut off frequencies. */
 20510  
 20511  /* Interpolation points for filter coefficients used in the bandwidth transition smoother */
 20512  var SKP_Silk_Transition_LP_B_Q28 = [5][3]int32{
 20513  	{250767114, 501534038, 250767114},
 20514  	{209867381, 419732057, 209867381},
 20515  	{170987846, 341967853, 170987846},
 20516  	{131531482, 263046905, 131531482},
 20517  	{89306658, 178584282, 89306658},
 20518  } /* SKP_Silk_tables_other.c:125:17 */
 20519  
 20520  /* Interpolation points for filter coefficients used in the bandwidth transition smoother */
 20521  var SKP_Silk_Transition_LP_A_Q28 = [5][2]int32{
 20522  	{506393414, 239854379},
 20523  	{411067935, 169683996},
 20524  	{306733530, 116694253},
 20525  	{185807084, 77959395},
 20526  	{35497197, 57401098},
 20527  } /* SKP_Silk_tables_other.c:135:17 */
 20528  
 20529  var SKP_Silk_pitch_lag_NB_CDF = [130]uint16{
 20530  	uint16(0), uint16(194), uint16(395), uint16(608), uint16(841), uint16(1099), uint16(1391), uint16(1724),
 20531  	uint16(2105), uint16(2544), uint16(3047), uint16(3624), uint16(4282), uint16(5027), uint16(5865), uint16(6799),
 20532  	uint16(7833), uint16(8965), uint16(10193), uint16(11510), uint16(12910), uint16(14379), uint16(15905), uint16(17473),
 20533  	uint16(19065), uint16(20664), uint16(22252), uint16(23814), uint16(25335), uint16(26802), uint16(28206), uint16(29541),
 20534  	uint16(30803), uint16(31992), uint16(33110), uint16(34163), uint16(35156), uint16(36098), uint16(36997), uint16(37861),
 20535  	uint16(38698), uint16(39515), uint16(40319), uint16(41115), uint16(41906), uint16(42696), uint16(43485), uint16(44273),
 20536  	uint16(45061), uint16(45847), uint16(46630), uint16(47406), uint16(48175), uint16(48933), uint16(49679), uint16(50411),
 20537  	uint16(51126), uint16(51824), uint16(52502), uint16(53161), uint16(53799), uint16(54416), uint16(55011), uint16(55584),
 20538  	uint16(56136), uint16(56666), uint16(57174), uint16(57661), uint16(58126), uint16(58570), uint16(58993), uint16(59394),
 20539  	uint16(59775), uint16(60134), uint16(60472), uint16(60790), uint16(61087), uint16(61363), uint16(61620), uint16(61856),
 20540  	uint16(62075), uint16(62275), uint16(62458), uint16(62625), uint16(62778), uint16(62918), uint16(63045), uint16(63162),
 20541  	uint16(63269), uint16(63368), uint16(63459), uint16(63544), uint16(63623), uint16(63698), uint16(63769), uint16(63836),
 20542  	uint16(63901), uint16(63963), uint16(64023), uint16(64081), uint16(64138), uint16(64194), uint16(64248), uint16(64301),
 20543  	uint16(64354), uint16(64406), uint16(64457), uint16(64508), uint16(64558), uint16(64608), uint16(64657), uint16(64706),
 20544  	uint16(64754), uint16(64803), uint16(64851), uint16(64899), uint16(64946), uint16(64994), uint16(65041), uint16(65088),
 20545  	uint16(65135), uint16(65181), uint16(65227), uint16(65272), uint16(65317), uint16(65361), uint16(65405), uint16(65449),
 20546  	uint16(65492), uint16(65535),
 20547  } /* SKP_Silk_tables_pitch_lag.c:30:18 */
 20548  
 20549  var SKP_Silk_pitch_lag_NB_CDF_offset int32 = 43 /* SKP_Silk_tables_pitch_lag.c:50:15 */
 20550  
 20551  var SKP_Silk_pitch_contour_NB_CDF = [12]uint16{
 20552  	uint16(0), uint16(14445), uint16(18587), uint16(25628), uint16(30013), uint16(34859), uint16(40597), uint16(48426),
 20553  	uint16(54460), uint16(59033), uint16(62990), uint16(65535),
 20554  } /* SKP_Silk_tables_pitch_lag.c:52:18 */
 20555  
 20556  var SKP_Silk_pitch_contour_NB_CDF_offset int32 = 5 /* SKP_Silk_tables_pitch_lag.c:57:15 */
 20557  
 20558  var SKP_Silk_pitch_lag_MB_CDF = [194]uint16{
 20559  	uint16(0), uint16(132), uint16(266), uint16(402), uint16(542), uint16(686), uint16(838), uint16(997),
 20560  	uint16(1167), uint16(1349), uint16(1546), uint16(1760), uint16(1993), uint16(2248), uint16(2528), uint16(2835),
 20561  	uint16(3173), uint16(3544), uint16(3951), uint16(4397), uint16(4882), uint16(5411), uint16(5984), uint16(6604),
 20562  	uint16(7270), uint16(7984), uint16(8745), uint16(9552), uint16(10405), uint16(11300), uint16(12235), uint16(13206),
 20563  	uint16(14209), uint16(15239), uint16(16289), uint16(17355), uint16(18430), uint16(19507), uint16(20579), uint16(21642),
 20564  	uint16(22688), uint16(23712), uint16(24710), uint16(25677), uint16(26610), uint16(27507), uint16(28366), uint16(29188),
 20565  	uint16(29971), uint16(30717), uint16(31427), uint16(32104), uint16(32751), uint16(33370), uint16(33964), uint16(34537),
 20566  	uint16(35091), uint16(35630), uint16(36157), uint16(36675), uint16(37186), uint16(37692), uint16(38195), uint16(38697),
 20567  	uint16(39199), uint16(39701), uint16(40206), uint16(40713), uint16(41222), uint16(41733), uint16(42247), uint16(42761),
 20568  	uint16(43277), uint16(43793), uint16(44309), uint16(44824), uint16(45336), uint16(45845), uint16(46351), uint16(46851),
 20569  	uint16(47347), uint16(47836), uint16(48319), uint16(48795), uint16(49264), uint16(49724), uint16(50177), uint16(50621),
 20570  	uint16(51057), uint16(51484), uint16(51902), uint16(52312), uint16(52714), uint16(53106), uint16(53490), uint16(53866),
 20571  	uint16(54233), uint16(54592), uint16(54942), uint16(55284), uint16(55618), uint16(55944), uint16(56261), uint16(56571),
 20572  	uint16(56873), uint16(57167), uint16(57453), uint16(57731), uint16(58001), uint16(58263), uint16(58516), uint16(58762),
 20573  	uint16(58998), uint16(59226), uint16(59446), uint16(59656), uint16(59857), uint16(60050), uint16(60233), uint16(60408),
 20574  	uint16(60574), uint16(60732), uint16(60882), uint16(61024), uint16(61159), uint16(61288), uint16(61410), uint16(61526),
 20575  	uint16(61636), uint16(61742), uint16(61843), uint16(61940), uint16(62033), uint16(62123), uint16(62210), uint16(62293),
 20576  	uint16(62374), uint16(62452), uint16(62528), uint16(62602), uint16(62674), uint16(62744), uint16(62812), uint16(62879),
 20577  	uint16(62945), uint16(63009), uint16(63072), uint16(63135), uint16(63196), uint16(63256), uint16(63316), uint16(63375),
 20578  	uint16(63434), uint16(63491), uint16(63549), uint16(63605), uint16(63661), uint16(63717), uint16(63772), uint16(63827),
 20579  	uint16(63881), uint16(63935), uint16(63988), uint16(64041), uint16(64094), uint16(64147), uint16(64199), uint16(64252),
 20580  	uint16(64304), uint16(64356), uint16(64409), uint16(64461), uint16(64513), uint16(64565), uint16(64617), uint16(64669),
 20581  	uint16(64721), uint16(64773), uint16(64824), uint16(64875), uint16(64925), uint16(64975), uint16(65024), uint16(65072),
 20582  	uint16(65121), uint16(65168), uint16(65215), uint16(65262), uint16(65308), uint16(65354), uint16(65399), uint16(65445),
 20583  	uint16(65490), uint16(65535),
 20584  } /* SKP_Silk_tables_pitch_lag.c:59:18 */
 20585  
 20586  var SKP_Silk_pitch_lag_MB_CDF_offset int32 = 64 /* SKP_Silk_tables_pitch_lag.c:87:15 */
 20587  
 20588  var SKP_Silk_pitch_lag_WB_CDF = [258]uint16{
 20589  	uint16(0), uint16(106), uint16(213), uint16(321), uint16(429), uint16(539), uint16(651), uint16(766),
 20590  	uint16(884), uint16(1005), uint16(1132), uint16(1264), uint16(1403), uint16(1549), uint16(1705), uint16(1870),
 20591  	uint16(2047), uint16(2236), uint16(2439), uint16(2658), uint16(2893), uint16(3147), uint16(3420), uint16(3714),
 20592  	uint16(4030), uint16(4370), uint16(4736), uint16(5127), uint16(5546), uint16(5993), uint16(6470), uint16(6978),
 20593  	uint16(7516), uint16(8086), uint16(8687), uint16(9320), uint16(9985), uint16(10680), uint16(11405), uint16(12158),
 20594  	uint16(12938), uint16(13744), uint16(14572), uint16(15420), uint16(16286), uint16(17166), uint16(18057), uint16(18955),
 20595  	uint16(19857), uint16(20759), uint16(21657), uint16(22547), uint16(23427), uint16(24293), uint16(25141), uint16(25969),
 20596  	uint16(26774), uint16(27555), uint16(28310), uint16(29037), uint16(29736), uint16(30406), uint16(31048), uint16(31662),
 20597  	uint16(32248), uint16(32808), uint16(33343), uint16(33855), uint16(34345), uint16(34815), uint16(35268), uint16(35704),
 20598  	uint16(36127), uint16(36537), uint16(36938), uint16(37330), uint16(37715), uint16(38095), uint16(38471), uint16(38844),
 20599  	uint16(39216), uint16(39588), uint16(39959), uint16(40332), uint16(40707), uint16(41084), uint16(41463), uint16(41844),
 20600  	uint16(42229), uint16(42615), uint16(43005), uint16(43397), uint16(43791), uint16(44186), uint16(44583), uint16(44982),
 20601  	uint16(45381), uint16(45780), uint16(46179), uint16(46578), uint16(46975), uint16(47371), uint16(47765), uint16(48156),
 20602  	uint16(48545), uint16(48930), uint16(49312), uint16(49690), uint16(50064), uint16(50433), uint16(50798), uint16(51158),
 20603  	uint16(51513), uint16(51862), uint16(52206), uint16(52544), uint16(52877), uint16(53204), uint16(53526), uint16(53842),
 20604  	uint16(54152), uint16(54457), uint16(54756), uint16(55050), uint16(55338), uint16(55621), uint16(55898), uint16(56170),
 20605  	uint16(56436), uint16(56697), uint16(56953), uint16(57204), uint16(57449), uint16(57689), uint16(57924), uint16(58154),
 20606  	uint16(58378), uint16(58598), uint16(58812), uint16(59022), uint16(59226), uint16(59426), uint16(59620), uint16(59810),
 20607  	uint16(59994), uint16(60173), uint16(60348), uint16(60517), uint16(60681), uint16(60840), uint16(60993), uint16(61141),
 20608  	uint16(61284), uint16(61421), uint16(61553), uint16(61679), uint16(61800), uint16(61916), uint16(62026), uint16(62131),
 20609  	uint16(62231), uint16(62326), uint16(62417), uint16(62503), uint16(62585), uint16(62663), uint16(62737), uint16(62807),
 20610  	uint16(62874), uint16(62938), uint16(62999), uint16(63057), uint16(63113), uint16(63166), uint16(63217), uint16(63266),
 20611  	uint16(63314), uint16(63359), uint16(63404), uint16(63446), uint16(63488), uint16(63528), uint16(63567), uint16(63605),
 20612  	uint16(63642), uint16(63678), uint16(63713), uint16(63748), uint16(63781), uint16(63815), uint16(63847), uint16(63879),
 20613  	uint16(63911), uint16(63942), uint16(63973), uint16(64003), uint16(64033), uint16(64063), uint16(64092), uint16(64121),
 20614  	uint16(64150), uint16(64179), uint16(64207), uint16(64235), uint16(64263), uint16(64291), uint16(64319), uint16(64347),
 20615  	uint16(64374), uint16(64401), uint16(64428), uint16(64455), uint16(64481), uint16(64508), uint16(64534), uint16(64560),
 20616  	uint16(64585), uint16(64610), uint16(64635), uint16(64660), uint16(64685), uint16(64710), uint16(64734), uint16(64758),
 20617  	uint16(64782), uint16(64807), uint16(64831), uint16(64855), uint16(64878), uint16(64902), uint16(64926), uint16(64950),
 20618  	uint16(64974), uint16(64998), uint16(65022), uint16(65045), uint16(65069), uint16(65093), uint16(65116), uint16(65139),
 20619  	uint16(65163), uint16(65186), uint16(65209), uint16(65231), uint16(65254), uint16(65276), uint16(65299), uint16(65321),
 20620  	uint16(65343), uint16(65364), uint16(65386), uint16(65408), uint16(65429), uint16(65450), uint16(65471), uint16(65493),
 20621  	uint16(65514), uint16(65535),
 20622  } /* SKP_Silk_tables_pitch_lag.c:89:18 */
 20623  
 20624  var SKP_Silk_pitch_lag_WB_CDF_offset int32 = 86 /* SKP_Silk_tables_pitch_lag.c:125:15 */
 20625  
 20626  var SKP_Silk_pitch_lag_SWB_CDF = [386]uint16{
 20627  	uint16(0), uint16(253), uint16(505), uint16(757), uint16(1008), uint16(1258), uint16(1507), uint16(1755),
 20628  	uint16(2003), uint16(2249), uint16(2494), uint16(2738), uint16(2982), uint16(3225), uint16(3469), uint16(3713),
 20629  	uint16(3957), uint16(4202), uint16(4449), uint16(4698), uint16(4949), uint16(5203), uint16(5460), uint16(5720),
 20630  	uint16(5983), uint16(6251), uint16(6522), uint16(6798), uint16(7077), uint16(7361), uint16(7650), uint16(7942),
 20631  	uint16(8238), uint16(8539), uint16(8843), uint16(9150), uint16(9461), uint16(9775), uint16(10092), uint16(10411),
 20632  	uint16(10733), uint16(11057), uint16(11383), uint16(11710), uint16(12039), uint16(12370), uint16(12701), uint16(13034),
 20633  	uint16(13368), uint16(13703), uint16(14040), uint16(14377), uint16(14716), uint16(15056), uint16(15398), uint16(15742),
 20634  	uint16(16087), uint16(16435), uint16(16785), uint16(17137), uint16(17492), uint16(17850), uint16(18212), uint16(18577),
 20635  	uint16(18946), uint16(19318), uint16(19695), uint16(20075), uint16(20460), uint16(20849), uint16(21243), uint16(21640),
 20636  	uint16(22041), uint16(22447), uint16(22856), uint16(23269), uint16(23684), uint16(24103), uint16(24524), uint16(24947),
 20637  	uint16(25372), uint16(25798), uint16(26225), uint16(26652), uint16(27079), uint16(27504), uint16(27929), uint16(28352),
 20638  	uint16(28773), uint16(29191), uint16(29606), uint16(30018), uint16(30427), uint16(30831), uint16(31231), uint16(31627),
 20639  	uint16(32018), uint16(32404), uint16(32786), uint16(33163), uint16(33535), uint16(33902), uint16(34264), uint16(34621),
 20640  	uint16(34973), uint16(35320), uint16(35663), uint16(36000), uint16(36333), uint16(36662), uint16(36985), uint16(37304),
 20641  	uint16(37619), uint16(37929), uint16(38234), uint16(38535), uint16(38831), uint16(39122), uint16(39409), uint16(39692),
 20642  	uint16(39970), uint16(40244), uint16(40513), uint16(40778), uint16(41039), uint16(41295), uint16(41548), uint16(41796),
 20643  	uint16(42041), uint16(42282), uint16(42520), uint16(42754), uint16(42985), uint16(43213), uint16(43438), uint16(43660),
 20644  	uint16(43880), uint16(44097), uint16(44312), uint16(44525), uint16(44736), uint16(44945), uint16(45153), uint16(45359),
 20645  	uint16(45565), uint16(45769), uint16(45972), uint16(46175), uint16(46377), uint16(46578), uint16(46780), uint16(46981),
 20646  	uint16(47182), uint16(47383), uint16(47585), uint16(47787), uint16(47989), uint16(48192), uint16(48395), uint16(48599),
 20647  	uint16(48804), uint16(49009), uint16(49215), uint16(49422), uint16(49630), uint16(49839), uint16(50049), uint16(50259),
 20648  	uint16(50470), uint16(50682), uint16(50894), uint16(51107), uint16(51320), uint16(51533), uint16(51747), uint16(51961),
 20649  	uint16(52175), uint16(52388), uint16(52601), uint16(52813), uint16(53025), uint16(53236), uint16(53446), uint16(53655),
 20650  	uint16(53863), uint16(54069), uint16(54274), uint16(54477), uint16(54679), uint16(54879), uint16(55078), uint16(55274),
 20651  	uint16(55469), uint16(55662), uint16(55853), uint16(56042), uint16(56230), uint16(56415), uint16(56598), uint16(56779),
 20652  	uint16(56959), uint16(57136), uint16(57311), uint16(57484), uint16(57654), uint16(57823), uint16(57989), uint16(58152),
 20653  	uint16(58314), uint16(58473), uint16(58629), uint16(58783), uint16(58935), uint16(59084), uint16(59230), uint16(59373),
 20654  	uint16(59514), uint16(59652), uint16(59787), uint16(59919), uint16(60048), uint16(60174), uint16(60297), uint16(60417),
 20655  	uint16(60533), uint16(60647), uint16(60757), uint16(60865), uint16(60969), uint16(61070), uint16(61167), uint16(61262),
 20656  	uint16(61353), uint16(61442), uint16(61527), uint16(61609), uint16(61689), uint16(61765), uint16(61839), uint16(61910),
 20657  	uint16(61979), uint16(62045), uint16(62109), uint16(62170), uint16(62230), uint16(62287), uint16(62343), uint16(62396),
 20658  	uint16(62448), uint16(62498), uint16(62547), uint16(62594), uint16(62640), uint16(62685), uint16(62728), uint16(62770),
 20659  	uint16(62811), uint16(62852), uint16(62891), uint16(62929), uint16(62967), uint16(63004), uint16(63040), uint16(63075),
 20660  	uint16(63110), uint16(63145), uint16(63178), uint16(63212), uint16(63244), uint16(63277), uint16(63308), uint16(63340),
 20661  	uint16(63371), uint16(63402), uint16(63432), uint16(63462), uint16(63491), uint16(63521), uint16(63550), uint16(63578),
 20662  	uint16(63607), uint16(63635), uint16(63663), uint16(63690), uint16(63718), uint16(63744), uint16(63771), uint16(63798),
 20663  	uint16(63824), uint16(63850), uint16(63875), uint16(63900), uint16(63925), uint16(63950), uint16(63975), uint16(63999),
 20664  	uint16(64023), uint16(64046), uint16(64069), uint16(64092), uint16(64115), uint16(64138), uint16(64160), uint16(64182),
 20665  	uint16(64204), uint16(64225), uint16(64247), uint16(64268), uint16(64289), uint16(64310), uint16(64330), uint16(64351),
 20666  	uint16(64371), uint16(64391), uint16(64411), uint16(64431), uint16(64450), uint16(64470), uint16(64489), uint16(64508),
 20667  	uint16(64527), uint16(64545), uint16(64564), uint16(64582), uint16(64600), uint16(64617), uint16(64635), uint16(64652),
 20668  	uint16(64669), uint16(64686), uint16(64702), uint16(64719), uint16(64735), uint16(64750), uint16(64766), uint16(64782),
 20669  	uint16(64797), uint16(64812), uint16(64827), uint16(64842), uint16(64857), uint16(64872), uint16(64886), uint16(64901),
 20670  	uint16(64915), uint16(64930), uint16(64944), uint16(64959), uint16(64974), uint16(64988), uint16(65003), uint16(65018),
 20671  	uint16(65033), uint16(65048), uint16(65063), uint16(65078), uint16(65094), uint16(65109), uint16(65125), uint16(65141),
 20672  	uint16(65157), uint16(65172), uint16(65188), uint16(65204), uint16(65220), uint16(65236), uint16(65252), uint16(65268),
 20673  	uint16(65283), uint16(65299), uint16(65314), uint16(65330), uint16(65345), uint16(65360), uint16(65375), uint16(65390),
 20674  	uint16(65405), uint16(65419), uint16(65434), uint16(65449), uint16(65463), uint16(65477), uint16(65492), uint16(65506),
 20675  	uint16(65521), uint16(65535),
 20676  } /* SKP_Silk_tables_pitch_lag.c:128:18 */
 20677  
 20678  var SKP_Silk_pitch_lag_SWB_CDF_offset int32 = 128 /* SKP_Silk_tables_pitch_lag.c:180:15 */
 20679  
 20680  var SKP_Silk_pitch_contour_CDF = [35]uint16{
 20681  	uint16(0), uint16(372), uint16(843), uint16(1315), uint16(1836), uint16(2644), uint16(3576), uint16(4719),
 20682  	uint16(6088), uint16(7621), uint16(9396), uint16(11509), uint16(14245), uint16(17618), uint16(20777), uint16(24294),
 20683  	uint16(27992), uint16(33116), uint16(40100), uint16(44329), uint16(47558), uint16(50679), uint16(53130), uint16(55557),
 20684  	uint16(57510), uint16(59022), uint16(60285), uint16(61345), uint16(62316), uint16(63140), uint16(63762), uint16(64321),
 20685  	uint16(64729), uint16(65099), uint16(65535),
 20686  } /* SKP_Silk_tables_pitch_lag.c:183:18 */
 20687  
 20688  var SKP_Silk_pitch_contour_CDF_offset int32 = 17 /* SKP_Silk_tables_pitch_lag.c:191:15 */
 20689  
 20690  var SKP_Silk_pitch_delta_CDF = [23]uint16{
 20691  	uint16(0), uint16(343), uint16(740), uint16(1249), uint16(1889), uint16(2733), uint16(3861), uint16(5396),
 20692  	uint16(7552), uint16(10890), uint16(16053), uint16(24152), uint16(30220), uint16(34680), uint16(37973), uint16(40405),
 20693  	uint16(42243), uint16(43708), uint16(44823), uint16(45773), uint16(46462), uint16(47055), uint16(65535),
 20694  } /* SKP_Silk_tables_pitch_lag.c:193:18 */
 20695  
 20696  var SKP_Silk_pitch_delta_CDF_offset int32 = 11 /* SKP_Silk_tables_pitch_lag.c:199:15 */
 20697  
 20698  var SKP_Silk_max_pulses_table = [4]int32{
 20699  	6, 8, 12, 18,
 20700  } /* SKP_Silk_tables_pulses_per_block.c:30:15 */
 20701  
 20702  var SKP_Silk_pulses_per_block_CDF = [10][21]uint16{
 20703  	{
 20704  		uint16(0), uint16(47113), uint16(61501), uint16(64590), uint16(65125), uint16(65277), uint16(65352), uint16(65407),
 20705  		uint16(65450), uint16(65474), uint16(65488), uint16(65501), uint16(65508), uint16(65514), uint16(65516), uint16(65520),
 20706  		uint16(65521), uint16(65523), uint16(65524), uint16(65526), uint16(65535),
 20707  	},
 20708  	{
 20709  		uint16(0), uint16(26368), uint16(47760), uint16(58803), uint16(63085), uint16(64567), uint16(65113), uint16(65333),
 20710  		uint16(65424), uint16(65474), uint16(65498), uint16(65511), uint16(65517), uint16(65520), uint16(65523), uint16(65525),
 20711  		uint16(65526), uint16(65528), uint16(65529), uint16(65530), uint16(65535),
 20712  	},
 20713  	{
 20714  		uint16(0), uint16(9601), uint16(28014), uint16(45877), uint16(57210), uint16(62560), uint16(64611), uint16(65260),
 20715  		uint16(65447), uint16(65500), uint16(65511), uint16(65519), uint16(65521), uint16(65525), uint16(65526), uint16(65529),
 20716  		uint16(65530), uint16(65531), uint16(65532), uint16(65534), uint16(65535),
 20717  	},
 20718  	{
 20719  		uint16(0), uint16(3351), uint16(12462), uint16(25972), uint16(39782), uint16(50686), uint16(57644), uint16(61525),
 20720  		uint16(63521), uint16(64506), uint16(65009), uint16(65255), uint16(65375), uint16(65441), uint16(65471), uint16(65488),
 20721  		uint16(65497), uint16(65505), uint16(65509), uint16(65512), uint16(65535),
 20722  	},
 20723  	{
 20724  		uint16(0), uint16(488), uint16(2944), uint16(9295), uint16(19712), uint16(32160), uint16(43976), uint16(53121),
 20725  		uint16(59144), uint16(62518), uint16(64213), uint16(65016), uint16(65346), uint16(65470), uint16(65511), uint16(65515),
 20726  		uint16(65525), uint16(65529), uint16(65531), uint16(65534), uint16(65535),
 20727  	},
 20728  	{
 20729  		uint16(0), uint16(17013), uint16(30405), uint16(40812), uint16(48142), uint16(53466), uint16(57166), uint16(59845),
 20730  		uint16(61650), uint16(62873), uint16(63684), uint16(64223), uint16(64575), uint16(64811), uint16(64959), uint16(65051),
 20731  		uint16(65111), uint16(65143), uint16(65165), uint16(65183), uint16(65535),
 20732  	},
 20733  	{
 20734  		uint16(0), uint16(2994), uint16(8323), uint16(15845), uint16(24196), uint16(32300), uint16(39340), uint16(45140),
 20735  		uint16(49813), uint16(53474), uint16(56349), uint16(58518), uint16(60167), uint16(61397), uint16(62313), uint16(62969),
 20736  		uint16(63410), uint16(63715), uint16(63906), uint16(64056), uint16(65535),
 20737  	},
 20738  	{
 20739  		uint16(0), uint16(88), uint16(721), uint16(2795), uint16(7542), uint16(14888), uint16(24420), uint16(34593),
 20740  		uint16(43912), uint16(51484), uint16(56962), uint16(60558), uint16(62760), uint16(64037), uint16(64716), uint16(65069),
 20741  		uint16(65262), uint16(65358), uint16(65398), uint16(65420), uint16(65535),
 20742  	},
 20743  	{
 20744  		uint16(0), uint16(287), uint16(789), uint16(2064), uint16(4398), uint16(8174), uint16(13534), uint16(20151),
 20745  		uint16(27347), uint16(34533), uint16(41295), uint16(47242), uint16(52070), uint16(55772), uint16(58458), uint16(60381),
 20746  		uint16(61679), uint16(62533), uint16(63109), uint16(63519), uint16(65535),
 20747  	},
 20748  	{
 20749  		uint16(0), uint16(1), uint16(3), uint16(91), uint16(4521), uint16(14708), uint16(28329), uint16(41955),
 20750  		uint16(52116), uint16(58375), uint16(61729), uint16(63534), uint16(64459), uint16(64924), uint16(65092), uint16(65164),
 20751  		uint16(65182), uint16(65198), uint16(65203), uint16(65211), uint16(65535),
 20752  	},
 20753  } /* SKP_Silk_tables_pulses_per_block.c:34:18 */
 20754  
 20755  var SKP_Silk_pulses_per_block_CDF_offset int32 = 6 /* SKP_Silk_tables_pulses_per_block.c:88:15 */
 20756  
 20757  var SKP_Silk_pulses_per_block_BITS_Q6 = [9][20]int16{
 20758  	{
 20759  		int16(30), int16(140), int16(282), int16(444), int16(560), int16(625), int16(654), int16(677),
 20760  		int16(731), int16(780), int16(787), int16(844), int16(859), int16(960), int16(896), int16(1024),
 20761  		int16(960), int16(1024), int16(960), int16(821),
 20762  	},
 20763  	{
 20764  		int16(84), int16(103), int16(164), int16(252), int16(350), int16(442), int16(526), int16(607),
 20765  		int16(663), int16(731), int16(787), int16(859), int16(923), int16(923), int16(960), int16(1024),
 20766  		int16(960), int16(1024), int16(1024), int16(875),
 20767  	},
 20768  	{
 20769  		int16(177), int16(117), int16(120), int16(162), int16(231), int16(320), int16(426), int16(541),
 20770  		int16(657), int16(803), int16(832), int16(960), int16(896), int16(1024), int16(923), int16(1024),
 20771  		int16(1024), int16(1024), int16(960), int16(1024),
 20772  	},
 20773  	{
 20774  		int16(275), int16(182), int16(146), int16(144), int16(166), int16(207), int16(261), int16(322),
 20775  		int16(388), int16(450), int16(516), int16(582), int16(637), int16(710), int16(762), int16(821),
 20776  		int16(832), int16(896), int16(923), int16(734),
 20777  	},
 20778  	{
 20779  		int16(452), int16(303), int16(216), int16(170), int16(153), int16(158), int16(182), int16(220),
 20780  		int16(274), int16(337), int16(406), int16(489), int16(579), int16(681), int16(896), int16(811),
 20781  		int16(896), int16(960), int16(923), int16(1024),
 20782  	},
 20783  	{
 20784  		int16(125), int16(147), int16(170), int16(202), int16(232), int16(265), int16(295), int16(332),
 20785  		int16(368), int16(406), int16(443), int16(483), int16(520), int16(563), int16(606), int16(646),
 20786  		int16(704), int16(739), int16(757), int16(483),
 20787  	},
 20788  	{
 20789  		int16(285), int16(232), int16(200), int16(190), int16(193), int16(206), int16(224), int16(244),
 20790  		int16(266), int16(289), int16(315), int16(340), int16(367), int16(394), int16(425), int16(462),
 20791  		int16(496), int16(539), int16(561), int16(350),
 20792  	},
 20793  	{
 20794  		int16(611), int16(428), int16(319), int16(242), int16(202), int16(178), int16(172), int16(180),
 20795  		int16(199), int16(229), int16(268), int16(313), int16(364), int16(422), int16(482), int16(538),
 20796  		int16(603), int16(683), int16(739), int16(586),
 20797  	},
 20798  	{
 20799  		int16(501), int16(450), int16(364), int16(308), int16(264), int16(231), int16(212), int16(204),
 20800  		int16(204), int16(210), int16(222), int16(241), int16(265), int16(295), int16(326), int16(362),
 20801  		int16(401), int16(437), int16(469), int16(321),
 20802  	},
 20803  } /* SKP_Silk_tables_pulses_per_block.c:91:17 */
 20804  
 20805  var SKP_Silk_rate_levels_CDF = [2][10]uint16{
 20806  	{
 20807  		uint16(0), uint16(2005), uint16(12717), uint16(20281), uint16(31328), uint16(36234), uint16(45816), uint16(57753),
 20808  		uint16(63104), uint16(65535),
 20809  	},
 20810  	{
 20811  		uint16(0), uint16(8553), uint16(23489), uint16(36031), uint16(46295), uint16(53519), uint16(56519), uint16(59151),
 20812  		uint16(64185), uint16(65535),
 20813  	},
 20814  } /* SKP_Silk_tables_pulses_per_block.c:140:18 */
 20815  
 20816  var SKP_Silk_rate_levels_CDF_offset int32 = 4 /* SKP_Silk_tables_pulses_per_block.c:152:15 */
 20817  
 20818  var SKP_Silk_rate_levels_BITS_Q6 = [2][9]int16{
 20819  	{
 20820  		int16(322), int16(167), int16(199), int16(164), int16(239), int16(178), int16(157), int16(231),
 20821  		int16(304),
 20822  	},
 20823  	{
 20824  		int16(188), int16(137), int16(153), int16(171), int16(204), int16(285), int16(297), int16(237),
 20825  		int16(358),
 20826  	},
 20827  } /* SKP_Silk_tables_pulses_per_block.c:155:17 */
 20828  
 20829  var SKP_Silk_shell_code_table0 = [33]uint16{
 20830  	uint16(0), uint16(32748), uint16(65535), uint16(0), uint16(9505), uint16(56230), uint16(65535), uint16(0),
 20831  	uint16(4093), uint16(32204), uint16(61720), uint16(65535), uint16(0), uint16(2285), uint16(16207), uint16(48750),
 20832  	uint16(63424), uint16(65535), uint16(0), uint16(1709), uint16(9446), uint16(32026), uint16(55752), uint16(63876),
 20833  	uint16(65535), uint16(0), uint16(1623), uint16(6986), uint16(21845), uint16(45381), uint16(59147), uint16(64186),
 20834  	uint16(65535),
 20835  } /* SKP_Silk_tables_pulses_per_block.c:167:18 */
 20836  
 20837  var SKP_Silk_shell_code_table1 = [52]uint16{
 20838  	uint16(0), uint16(32691), uint16(65535), uint16(0), uint16(12782), uint16(52752), uint16(65535), uint16(0),
 20839  	uint16(4847), uint16(32665), uint16(60899), uint16(65535), uint16(0), uint16(2500), uint16(17305), uint16(47989),
 20840  	uint16(63369), uint16(65535), uint16(0), uint16(1843), uint16(10329), uint16(32419), uint16(55433), uint16(64277),
 20841  	uint16(65535), uint16(0), uint16(1485), uint16(7062), uint16(21465), uint16(43414), uint16(59079), uint16(64623),
 20842  	uint16(65535), uint16(0), uint16(0), uint16(4841), uint16(14797), uint16(31799), uint16(49667), uint16(61309),
 20843  	uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0), uint16(8032), uint16(21695), uint16(41078),
 20844  	uint16(56317), uint16(65535), uint16(65535), uint16(65535),
 20845  } /* SKP_Silk_tables_pulses_per_block.c:175:18 */
 20846  
 20847  var SKP_Silk_shell_code_table2 = [102]uint16{
 20848  	uint16(0), uint16(32615), uint16(65535), uint16(0), uint16(14447), uint16(50912), uint16(65535), uint16(0),
 20849  	uint16(6301), uint16(32587), uint16(59361), uint16(65535), uint16(0), uint16(3038), uint16(18640), uint16(46809),
 20850  	uint16(62852), uint16(65535), uint16(0), uint16(1746), uint16(10524), uint16(32509), uint16(55273), uint16(64278),
 20851  	uint16(65535), uint16(0), uint16(1234), uint16(6360), uint16(21259), uint16(43712), uint16(59651), uint16(64805),
 20852  	uint16(65535), uint16(0), uint16(1020), uint16(4461), uint16(14030), uint16(32286), uint16(51249), uint16(61904),
 20853  	uint16(65100), uint16(65535), uint16(0), uint16(851), uint16(3435), uint16(10006), uint16(23241), uint16(40797),
 20854  	uint16(55444), uint16(63009), uint16(65252), uint16(65535), uint16(0), uint16(0), uint16(2075), uint16(7137),
 20855  	uint16(17119), uint16(31499), uint16(46982), uint16(58723), uint16(63976), uint16(65535), uint16(65535), uint16(0),
 20856  	uint16(0), uint16(0), uint16(3820), uint16(11572), uint16(23038), uint16(37789), uint16(51969), uint16(61243),
 20857  	uint16(65535), uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0), uint16(0), uint16(6882),
 20858  	uint16(16828), uint16(30444), uint16(44844), uint16(57365), uint16(65535), uint16(65535), uint16(65535), uint16(65535),
 20859  	uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(10093), uint16(22963), uint16(38779),
 20860  	uint16(54426), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535),
 20861  } /* SKP_Silk_tables_pulses_per_block.c:185:18 */
 20862  
 20863  var SKP_Silk_shell_code_table3 = [207]uint16{
 20864  	uint16(0), uint16(32324), uint16(65535), uint16(0), uint16(15328), uint16(49505), uint16(65535), uint16(0),
 20865  	uint16(7474), uint16(32344), uint16(57955), uint16(65535), uint16(0), uint16(3944), uint16(19450), uint16(45364),
 20866  	uint16(61873), uint16(65535), uint16(0), uint16(2338), uint16(11698), uint16(32435), uint16(53915), uint16(63734),
 20867  	uint16(65535), uint16(0), uint16(1506), uint16(7074), uint16(21778), uint16(42972), uint16(58861), uint16(64590),
 20868  	uint16(65535), uint16(0), uint16(1027), uint16(4490), uint16(14383), uint16(32264), uint16(50980), uint16(61712),
 20869  	uint16(65043), uint16(65535), uint16(0), uint16(760), uint16(3022), uint16(9696), uint16(23264), uint16(41465),
 20870  	uint16(56181), uint16(63253), uint16(65251), uint16(65535), uint16(0), uint16(579), uint16(2256), uint16(6873),
 20871  	uint16(16661), uint16(31951), uint16(48250), uint16(59403), uint16(64198), uint16(65360), uint16(65535), uint16(0),
 20872  	uint16(464), uint16(1783), uint16(5181), uint16(12269), uint16(24247), uint16(39877), uint16(53490), uint16(61502),
 20873  	uint16(64591), uint16(65410), uint16(65535), uint16(0), uint16(366), uint16(1332), uint16(3880), uint16(9273),
 20874  	uint16(18585), uint16(32014), uint16(45928), uint16(56659), uint16(62616), uint16(64899), uint16(65483), uint16(65535),
 20875  	uint16(0), uint16(286), uint16(1065), uint16(3089), uint16(6969), uint16(14148), uint16(24859), uint16(38274),
 20876  	uint16(50715), uint16(59078), uint16(63448), uint16(65091), uint16(65481), uint16(65535), uint16(0), uint16(0),
 20877  	uint16(482), uint16(2010), uint16(5302), uint16(10408), uint16(18988), uint16(30698), uint16(43634), uint16(54233),
 20878  	uint16(60828), uint16(64119), uint16(65288), uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0),
 20879  	uint16(1006), uint16(3531), uint16(7857), uint16(14832), uint16(24543), uint16(36272), uint16(47547), uint16(56883),
 20880  	uint16(62327), uint16(64746), uint16(65535), uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0),
 20881  	uint16(0), uint16(1863), uint16(4950), uint16(10730), uint16(19284), uint16(29397), uint16(41382), uint16(52335),
 20882  	uint16(59755), uint16(63834), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(0), uint16(0),
 20883  	uint16(0), uint16(0), uint16(0), uint16(2513), uint16(7290), uint16(14487), uint16(24275), uint16(35312),
 20884  	uint16(46240), uint16(55841), uint16(62007), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535),
 20885  	uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(3606), uint16(9573),
 20886  	uint16(18764), uint16(28667), uint16(40220), uint16(51290), uint16(59924), uint16(65535), uint16(65535), uint16(65535),
 20887  	uint16(65535), uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0),
 20888  	uint16(0), uint16(0), uint16(4879), uint16(13091), uint16(23376), uint16(36061), uint16(49395), uint16(59315),
 20889  	uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535),
 20890  } /* SKP_Silk_tables_pulses_per_block.c:201:18 */
 20891  
 20892  var SKP_Silk_shell_code_table_offsets = [19]uint16{
 20893  	uint16(0), uint16(0), uint16(3), uint16(7), uint16(12), uint16(18), uint16(25), uint16(33),
 20894  	uint16(42), uint16(52), uint16(63), uint16(75), uint16(88), uint16(102), uint16(117), uint16(133),
 20895  	uint16(150), uint16(168), uint16(187),
 20896  } /* SKP_Silk_tables_pulses_per_block.c:230:18 */
 20897  
 20898  var SKP_Silk_sign_CDF = [36]uint16{
 20899  	uint16(37840), uint16(36944), uint16(36251), uint16(35304),
 20900  	uint16(34715), uint16(35503), uint16(34529), uint16(34296),
 20901  	uint16(34016), uint16(47659), uint16(44945), uint16(42503),
 20902  	uint16(40235), uint16(38569), uint16(40254), uint16(37851),
 20903  	uint16(37243), uint16(36595), uint16(43410), uint16(44121),
 20904  	uint16(43127), uint16(40978), uint16(38845), uint16(40433),
 20905  	uint16(38252), uint16(37795), uint16(36637), uint16(59159),
 20906  	uint16(55630), uint16(51806), uint16(48073), uint16(45036),
 20907  	uint16(48416), uint16(43857), uint16(42678), uint16(41146),
 20908  } /* SKP_Silk_tables_sign.c:30:18 */
 20909  
 20910  var SKP_Silk_type_offset_CDF = [5]uint16{
 20911  	uint16(0), uint16(37522), uint16(41030), uint16(44212), uint16(65535),
 20912  } /* SKP_Silk_tables_type_offset.c:30:18 */
 20913  
 20914  var SKP_Silk_type_offset_CDF_offset int32 = 2 /* SKP_Silk_tables_type_offset.c:34:15 */
 20915  
 20916  var SKP_Silk_type_offset_joint_CDF = [4][5]uint16{
 20917  	{
 20918  		uint16(0), uint16(57686), uint16(61230), uint16(62358), uint16(65535),
 20919  	},
 20920  	{
 20921  		uint16(0), uint16(18346), uint16(40067), uint16(43659), uint16(65535),
 20922  	},
 20923  	{
 20924  		uint16(0), uint16(22694), uint16(24279), uint16(35507), uint16(65535),
 20925  	},
 20926  	{
 20927  		uint16(0), uint16(6067), uint16(7215), uint16(13010), uint16(65535),
 20928  	},
 20929  } /* SKP_Silk_tables_type_offset.c:37:18 */
 20930  
 20931  /***********************************************************************
 20932  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 20933  Redistribution and use in source and binary forms, with or without
 20934  modification, (subject to the limitations in the disclaimer below)
 20935  are permitted provided that the following conditions are met:
 20936  - Redistributions of source code must retain the above copyright notice,
 20937  this list of conditions and the following disclaimer.
 20938  - Redistributions in binary form must reproduce the above copyright
 20939  notice, this list of conditions and the following disclaimer in the
 20940  documentation and/or other materials provided with the distribution.
 20941  - Neither the name of Skype Limited, nor the names of specific
 20942  contributors, may be used to endorse or promote products derived from
 20943  this software without specific prior written permission.
 20944  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 20945  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 20946  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 20947  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 20948  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 20949  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 20950  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 20951  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 20952  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 20953  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 20954  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 20955  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 20956  ***********************************************************************/
 20957  
 20958  /*******************/
 20959  /* Pitch estimator */
 20960  /*******************/
 20961  
 20962  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
 20963  
 20964  /* Bandwidth expansion for whitening filter in pitch analysis */
 20965  
 20966  /* Threshold used by pitch estimator for early escape */
 20967  
 20968  /*********************/
 20969  /* Linear prediction */
 20970  /*********************/
 20971  
 20972  /* LPC analysis defines: regularization and bandwidth expansion */
 20973  
 20974  /* LTP analysis defines */
 20975  
 20976  /* LTP quantization settings */
 20977  
 20978  /***********************/
 20979  /* High pass filtering */
 20980  /***********************/
 20981  
 20982  /* Smoothing parameters for low end of pitch frequency range estimation */
 20983  
 20984  /* Min and max values for low end of pitch frequency range estimation */
 20985  
 20986  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
 20987  
 20988  /***********/
 20989  /* Various */
 20990  /***********/
 20991  
 20992  /* Required speech activity for counting frame as active */
 20993  
 20994  /* Speech Activity LBRR enable threshold (needs tuning) */
 20995  
 20996  /*************************/
 20997  /* Perceptual parameters */
 20998  /*************************/
 20999  
 21000  /* reduction in coding SNR during low speech activity */
 21001  
 21002  /* factor for reducing quantization noise during voiced speech */
 21003  
 21004  /* factor for reducing quantization noise for unvoiced sparse signals */
 21005  
 21006  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
 21007  
 21008  /* warping control */
 21009  
 21010  /* fraction added to first autocorrelation value */
 21011  
 21012  /* noise shaping filter chirp factor */
 21013  
 21014  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
 21015  
 21016  /* gain reduction for fricatives */
 21017  
 21018  /* extra harmonic boosting (signal shaping) at low bitrates */
 21019  
 21020  /* extra harmonic boosting (signal shaping) for noisy input signals */
 21021  
 21022  /* harmonic noise shaping */
 21023  
 21024  /* extra harmonic noise shaping for high bitrates or noisy input */
 21025  
 21026  /* parameter for shaping noise towards higher frequencies */
 21027  
 21028  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
 21029  
 21030  /* parameter for applying a high-pass tilt to the input signal */
 21031  
 21032  /* parameter for extra high-pass tilt to the input signal at high rates */
 21033  
 21034  /* parameter for reducing noise at the very low frequencies */
 21035  
 21036  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
 21037  
 21038  /* noise floor to put a lower limit on the quantization step size */
 21039  
 21040  /* noise floor relative to active speech gain level */
 21041  
 21042  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
 21043  
 21044  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
 21045  
 21046  /* parameters defining the R/D tradeoff in the residual quantizer */
 21047  
 21048  /**********************************/
 21049  /* Initialization of the Silk VAD */
 21050  /**********************************/
 21051  func SKP_Silk_VAD_Init(tls *libc.TLS, psSilk_VAD uintptr) int32 { /* SKP_Silk_VAD.c:39:9: */
 21052  	var b int32
 21053  	var ret int32 = 0
 21054  
 21055  	/* reset state memory */
 21056  	libc.Xmemset(tls, psSilk_VAD, 0, uint64(unsafe.Sizeof(SKP_Silk_VAD_state{})))
 21057  
 21058  	/* init noise levels */
 21059  	/* Initialize array with approx pink noise levels (psd proportional to inverse of frequency) */
 21060  	for b = 0; b < 4; b++ {
 21061  		*(*int32)(unsafe.Pointer((psSilk_VAD + 92 /* &.NoiseLevelBias */) + uintptr(b)*4)) = SKP_max_32(tls, ((50) / (b + 1)), 1)
 21062  	}
 21063  
 21064  	/* Initialize state */
 21065  	for b = 0; b < 4; b++ {
 21066  		*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4)) = ((100) * (*(*int32)(unsafe.Pointer((psSilk_VAD + 92 /* &.NoiseLevelBias */) + uintptr(b)*4))))
 21067  		*(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(b)*4)) = ((0x7FFFFFFF) / (*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4))))
 21068  	}
 21069  	(*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).Fcounter = 15
 21070  
 21071  	/* init smoothed energy-to-noise ratio*/
 21072  	for b = 0; b < 4; b++ {
 21073  		*(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4)) = (100 * 256) /* 100 * 256 --> 20 dB SNR */
 21074  	}
 21075  
 21076  	return ret
 21077  }
 21078  
 21079  /* Weighting factors for tilt measure */
 21080  var tiltWeights = [4]int32{30000, 6000, -12000, -12000} /* SKP_Silk_VAD.c:70:24 */
 21081  
 21082  /***************************************/
 21083  /* Get the speech activity level in Q8 */
 21084  /***************************************/
 21085  func SKP_Silk_VAD_GetSA_Q8(tls *libc.TLS, psSilk_VAD uintptr, pSA_Q8 uintptr, pSNR_dB_Q7 uintptr, pQuality_Q15 uintptr, pTilt_Q15 uintptr, pIn uintptr, framelength int32) int32 { /* SKP_Silk_VAD.c:75:9: */
 21086  	bp := tls.Alloc(4832)
 21087  	defer tls.Free(4832)
 21088  
 21089  	var SA_Q15 int32
 21090  	var input_tilt int32
 21091  	// var scratch [720]int32 at bp+1920, 2880
 21092  
 21093  	var decimated_framelength int32
 21094  	var dec_subframe_length int32
 21095  	var dec_subframe_offset int32
 21096  	var SNR_Q7 int32
 21097  	var i int32
 21098  	var b int32
 21099  	var s int32
 21100  	var sumSquared int32
 21101  	var smooth_coef_Q16 int32
 21102  	var HPstateTmp int16
 21103  	// var X [4][240]int16 at bp, 1920
 21104  
 21105  	// var Xnrg [4]int32 at bp+4800, 16
 21106  
 21107  	// var NrgToNoiseRatio_Q8 [4]int32 at bp+4816, 16
 21108  
 21109  	var speech_nrg int32
 21110  	var x_tmp int32
 21111  	var ret int32 = 0
 21112  
 21113  	/* Safety checks */
 21114  
 21115  	/***********************/
 21116  	/* Filter and Decimate */
 21117  	/***********************/
 21118  	/* 0-8 kHz to 0-4 kHz and 4-8 kHz */
 21119  	SKP_Silk_ana_filt_bank_1(tls, pIn, (psSilk_VAD /* &.AnaState */), (bp /* &X */), (bp /* &X */ + 3*480), (bp + 1920 /* &scratch */), framelength)
 21120  
 21121  	/* 0-4 kHz to 0-2 kHz and 2-4 kHz */
 21122  	SKP_Silk_ana_filt_bank_1(tls, (bp /* &X */), (psSilk_VAD + 8 /* &.AnaState1 */), (bp /* &X */), (bp /* &X */ + 2*480), (bp + 1920 /* &scratch */), ((framelength) >> (1)))
 21123  
 21124  	/* 0-2 kHz to 0-1 kHz and 1-2 kHz */
 21125  	SKP_Silk_ana_filt_bank_1(tls, (bp /* &X */), (psSilk_VAD + 16 /* &.AnaState2 */), (bp /* &X */), (bp /* &X */ + 1*480), (bp + 1920 /* &scratch */), ((framelength) >> (2)))
 21126  
 21127  	/*********************************************/
 21128  	/* HP filter on lowest band (differentiator) */
 21129  	/*********************************************/
 21130  	decimated_framelength = ((framelength) >> (3))
 21131  	*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((decimated_framelength-1))*2)) = (int16((int32(*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((decimated_framelength-1))*2)))) >> (1)))
 21132  	HPstateTmp = *(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((decimated_framelength-1))*2))
 21133  	for i = (decimated_framelength - 1); i > 0; i-- {
 21134  		*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((i-1))*2)) = (int16((int32(*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((i-1))*2)))) >> (1)))
 21135  		*(*int16)(unsafe.Pointer((bp /* &X */) + uintptr(i)*2)) -= int16((int32(*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((i-1))*2)))))
 21136  	}
 21137  	*(*int16)(unsafe.Pointer((bp /* &X */))) -= int16((int32((*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).FHPstate)))
 21138  	(*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).FHPstate = HPstateTmp
 21139  
 21140  	/*************************************/
 21141  	/* Calculate the energy in each band */
 21142  	/*************************************/
 21143  	for b = 0; b < 4; b++ {
 21144  		/* Find the decimated framelength in the non-uniformly divided bands */
 21145  		decimated_framelength = ((framelength) >> (SKP_min_int(tls, (4 - b), (4 - 1))))
 21146  
 21147  		/* Split length into subframe lengths */
 21148  		dec_subframe_length = ((decimated_framelength) >> (2))
 21149  		dec_subframe_offset = 0
 21150  
 21151  		/* Compute energy per sub-frame */
 21152  		/* initialize with summed energy of last subframe */
 21153  		*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) = *(*int32)(unsafe.Pointer((psSilk_VAD + 24 /* &.XnrgSubfr */) + uintptr(b)*4))
 21154  		for s = 0; s < (int32(1) << 2); s++ {
 21155  			sumSquared = 0
 21156  			for i = 0; i < dec_subframe_length; i++ {
 21157  				/* The energy will be less than dec_subframe_length * ( SKP_int16_MIN / 8 ) ^ 2.            */
 21158  				/* Therefore we can accumulate with no risk of overflow (unless dec_subframe_length > 128)  */
 21159  				x_tmp = ((int32(*(*int16)(unsafe.Pointer((bp /* &X[0] */ + uintptr(b)*480) + uintptr((i+dec_subframe_offset))*2)))) >> (3))
 21160  				sumSquared = ((sumSquared) + ((int32(int16(x_tmp))) * (int32(int16(x_tmp)))))
 21161  
 21162  				/* Safety check */
 21163  
 21164  			}
 21165  
 21166  			/* Add/saturate summed energy of current subframe */
 21167  			if s < ((int32(1) << 2) - 1) {
 21168  				*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) = func() int32 {
 21169  					if ((uint32((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) + (sumSquared))) & 0x80000000) != 0 {
 21170  						return 0x7FFFFFFF
 21171  					}
 21172  					return ((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) + (sumSquared))
 21173  				}()
 21174  			} else {
 21175  				/* Look-ahead subframe */
 21176  				*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) = func() int32 {
 21177  					if ((uint32((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) + ((sumSquared) >> (1)))) & 0x80000000) != 0 {
 21178  						return 0x7FFFFFFF
 21179  					}
 21180  					return ((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) + ((sumSquared) >> (1)))
 21181  				}()
 21182  			}
 21183  
 21184  			dec_subframe_offset = dec_subframe_offset + (dec_subframe_length)
 21185  		}
 21186  		*(*int32)(unsafe.Pointer((psSilk_VAD + 24 /* &.XnrgSubfr */) + uintptr(b)*4)) = sumSquared
 21187  	}
 21188  
 21189  	/********************/
 21190  	/* Noise estimation */
 21191  	/********************/
 21192  	SKP_Silk_VAD_GetNoiseLevels(tls, (bp + 4800 /* &Xnrg */), psSilk_VAD)
 21193  
 21194  	/***********************************************/
 21195  	/* Signal-plus-noise to noise ratio estimation */
 21196  	/***********************************************/
 21197  	sumSquared = 0
 21198  	input_tilt = 0
 21199  	for b = 0; b < 4; b++ {
 21200  		speech_nrg = (*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) - *(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4)))
 21201  		if speech_nrg > 0 {
 21202  			/* Divide, with sufficient resolution */
 21203  			if (uint32(*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) & 0xFF800000) == uint32(0) {
 21204  				*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) = (((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) << (8)) / (*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4)) + 1))
 21205  			} else {
 21206  				*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) = ((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) / (((*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4))) >> (8)) + 1))
 21207  			}
 21208  
 21209  			/* Convert to log domain */
 21210  			SNR_Q7 = (SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4))) - (8 * 128))
 21211  
 21212  			/* Sum-of-squares */
 21213  			sumSquared = ((sumSquared) + ((int32(int16(SNR_Q7))) * (int32(int16(SNR_Q7))))) /* Q14 */
 21214  
 21215  			/* Tilt measure */
 21216  			if speech_nrg < (int32(1) << 20) {
 21217  				/* Scale down SNR value for small subband speech energies */
 21218  				SNR_Q7 = (((((SKP_Silk_SQRT_APPROX(tls, speech_nrg)) << (6)) >> 16) * (int32(int16(SNR_Q7)))) + (((((SKP_Silk_SQRT_APPROX(tls, speech_nrg)) << (6)) & 0x0000FFFF) * (int32(int16(SNR_Q7)))) >> 16))
 21219  			}
 21220  			input_tilt = ((input_tilt) + ((((tiltWeights[b]) >> 16) * (int32(int16(SNR_Q7)))) + ((((tiltWeights[b]) & 0x0000FFFF) * (int32(int16(SNR_Q7)))) >> 16)))
 21221  		} else {
 21222  			*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) = 256
 21223  		}
 21224  	}
 21225  
 21226  	/* Mean-of-squares */
 21227  	sumSquared = ((sumSquared) / (4)) /* Q14 */
 21228  
 21229  	/* Root-mean-square approximation, scale to dBs, and write to output pointer */
 21230  	*(*int32)(unsafe.Pointer(pSNR_dB_Q7)) = int32((int16(3 * SKP_Silk_SQRT_APPROX(tls, sumSquared)))) /* Q7 */
 21231  
 21232  	/*********************************/
 21233  	/* Speech Probability Estimation */
 21234  	/*********************************/
 21235  	SA_Q15 = SKP_Silk_sigm_Q15(tls, ((((int32((45000)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(pSNR_dB_Q7)))))) + ((((45000) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(pSNR_dB_Q7)))))) >> 16)) - 128))
 21236  
 21237  	/**************************/
 21238  	/* Frequency Tilt Measure */
 21239  	/**************************/
 21240  	*(*int32)(unsafe.Pointer(pTilt_Q15)) = ((SKP_Silk_sigm_Q15(tls, input_tilt) - 16384) << (1))
 21241  
 21242  	/**************************************************/
 21243  	/* Scale the sigmoid output based on power levels */
 21244  	/**************************************************/
 21245  	speech_nrg = 0
 21246  	for b = 0; b < 4; b++ {
 21247  		/* Accumulate signal-without-noise energies, higher frequency bands have more weight */
 21248  		speech_nrg = speech_nrg + ((b + 1) * ((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) - *(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4))) >> (4)))
 21249  	}
 21250  
 21251  	/* Power scaling */
 21252  	if speech_nrg <= 0 {
 21253  		SA_Q15 = ((SA_Q15) >> (1))
 21254  	} else if speech_nrg < 32768 {
 21255  		/* square-root */
 21256  		speech_nrg = SKP_Silk_SQRT_APPROX(tls, ((speech_nrg) << (15)))
 21257  		SA_Q15 = ((((32768 + speech_nrg) >> 16) * (int32(int16(SA_Q15)))) + ((((32768 + speech_nrg) & 0x0000FFFF) * (int32(int16(SA_Q15)))) >> 16))
 21258  	}
 21259  
 21260  	/* Copy the resulting speech activity in Q8 to *pSA_Q8 */
 21261  	*(*int32)(unsafe.Pointer(pSA_Q8)) = SKP_min_int(tls, ((SA_Q15) >> (7)), 0xFF)
 21262  
 21263  	/***********************************/
 21264  	/* Energy Level and SNR estimation */
 21265  	/***********************************/
 21266  	/* Smoothing coefficient */
 21267  	smooth_coef_Q16 = (((int32((4096)) >> 16) * (int32((int16((((SA_Q15) >> 16) * (int32(int16(SA_Q15)))) + ((((SA_Q15) & 0x0000FFFF) * (int32(int16(SA_Q15)))) >> 16)))))) + ((((4096) & 0x0000FFFF) * (int32((int16((((SA_Q15) >> 16) * (int32(int16(SA_Q15)))) + ((((SA_Q15) & 0x0000FFFF) * (int32(int16(SA_Q15)))) >> 16)))))) >> 16))
 21268  	for b = 0; b < 4; b++ {
 21269  		/* compute smoothed energy-to-noise ratio per band */
 21270  		*(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4)) = ((*(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4))) + ((((*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) - *(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4))) >> 16) * (int32(int16(smooth_coef_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) - *(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4))) & 0x0000FFFF) * (int32(int16(smooth_coef_Q16)))) >> 16)))
 21271  
 21272  		/* signal to noise ratio in dB per band */
 21273  		SNR_Q7 = (3 * (SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4))) - (8 * 128)))
 21274  		/* quality = sigmoid( 0.25 * ( SNR_dB - 16 ) ); */
 21275  		*(*int32)(unsafe.Pointer(pQuality_Q15 + uintptr(b)*4)) = SKP_Silk_sigm_Q15(tls, ((SNR_Q7 - (16 * 128)) >> (4)))
 21276  	}
 21277  
 21278  	return ret
 21279  }
 21280  
 21281  /**************************/
 21282  /* Noise level estimation */
 21283  /**************************/
 21284  func SKP_Silk_VAD_GetNoiseLevels(tls *libc.TLS, pX uintptr, psSilk_VAD uintptr) { /* SKP_Silk_VAD.c:262:6: */
 21285  	var k int32
 21286  	var nl int32
 21287  	var nrg int32
 21288  	var inv_nrg int32
 21289  	var coef int32
 21290  	var min_coef int32
 21291  
 21292  	/* Initially faster smoothing */
 21293  	if (*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).Fcounter < 1000 { /* 1000 = 20 sec */
 21294  		min_coef = ((0x7FFF) / ((((*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).Fcounter) >> (4)) + 1))
 21295  	} else {
 21296  		min_coef = 0
 21297  	}
 21298  
 21299  	for k = 0; k < 4; k++ {
 21300  		/* Get old noise level estimate for current band */
 21301  		nl = *(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(k)*4))
 21302  
 21303  		/* Add bias */
 21304  		nrg = func() int32 {
 21305  			if ((uint32((*(*int32)(unsafe.Pointer(pX + uintptr(k)*4))) + (*(*int32)(unsafe.Pointer((psSilk_VAD + 92 /* &.NoiseLevelBias */) + uintptr(k)*4))))) & 0x80000000) != 0 {
 21306  				return 0x7FFFFFFF
 21307  			}
 21308  			return ((*(*int32)(unsafe.Pointer(pX + uintptr(k)*4))) + (*(*int32)(unsafe.Pointer((psSilk_VAD + 92 /* &.NoiseLevelBias */) + uintptr(k)*4))))
 21309  		}()
 21310  
 21311  		/* Invert energies */
 21312  		inv_nrg = ((0x7FFFFFFF) / (nrg))
 21313  
 21314  		/* Less update when subband energy is high */
 21315  		if nrg > ((nl) << (3)) {
 21316  			coef = (int32(1024) >> 3)
 21317  		} else if nrg < nl {
 21318  			coef = 1024
 21319  		} else {
 21320  			coef = ((((((((inv_nrg) >> 16) * (int32(int16(nl)))) + ((((inv_nrg) & 0x0000FFFF) * (int32(int16(nl)))) >> 16)) + ((inv_nrg) * (func() int32 {
 21321  				if (16) == 1 {
 21322  					return (((nl) >> 1) + ((nl) & 1))
 21323  				}
 21324  				return ((((nl) >> ((16) - 1)) + 1) >> 1)
 21325  			}()))) >> 16) * (int32((int16(int32(1024) << 1))))) + ((((((((inv_nrg) >> 16) * (int32(int16(nl)))) + ((((inv_nrg) & 0x0000FFFF) * (int32(int16(nl)))) >> 16)) + ((inv_nrg) * (func() int32 {
 21326  				if (16) == 1 {
 21327  					return (((nl) >> 1) + ((nl) & 1))
 21328  				}
 21329  				return ((((nl) >> ((16) - 1)) + 1) >> 1)
 21330  			}()))) & 0x0000FFFF) * (int32((int16(int32(1024) << 1))))) >> 16))
 21331  		}
 21332  
 21333  		/* Initially faster smoothing */
 21334  		coef = SKP_max_int(tls, coef, min_coef)
 21335  
 21336  		/* Smooth inverse energies */
 21337  		*(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4))) + ((((inv_nrg - *(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4))) >> 16) * (int32(int16(coef)))) + ((((inv_nrg - *(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(coef)))) >> 16)))
 21338  
 21339  		/* Compute noise level by inverting again */
 21340  		nl = ((0x7FFFFFFF) / (*(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4))))
 21341  
 21342  		/* Limit noise levels (guarantee 7 bits of head room) */
 21343  		nl = func() int32 {
 21344  			if (nl) < (0x00FFFFFF) {
 21345  				return nl
 21346  			}
 21347  			return 0x00FFFFFF
 21348  		}()
 21349  
 21350  		/* Store as part of state */
 21351  		*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(k)*4)) = nl
 21352  	}
 21353  
 21354  	/* Increment frame counter */
 21355  	(*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).Fcounter++
 21356  }
 21357  
 21358  /* Entropy constrained MATRIX-weighted VQ, hard-coded to 5-element vectors, for a single input data vector */
 21359  func SKP_Silk_VQ_WMat_EC_FIX(tls *libc.TLS, ind uintptr, rate_dist_Q14 uintptr, in_Q14 uintptr, W_Q18 uintptr, cb_Q14 uintptr, cl_Q6 uintptr, mu_Q8 int32, L int32) { /* SKP_Silk_VQ_nearest_neighbor_FIX.c:31:6: */
 21360  	bp := tls.Alloc(10)
 21361  	defer tls.Free(10)
 21362  
 21363  	var k int32
 21364  	var cb_row_Q14 uintptr
 21365  	// var diff_Q14 [5]int16 at bp, 10
 21366  
 21367  	var sum1_Q14 int32
 21368  	var sum2_Q16 int32
 21369  
 21370  	/* Loop over codebook */
 21371  	*(*int32)(unsafe.Pointer(rate_dist_Q14)) = 0x7FFFFFFF
 21372  	cb_row_Q14 = cb_Q14
 21373  	for k = 0; k < L; k++ {
 21374  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14)))))
 21375  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14 + 1*2))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14 + 1*2)))))
 21376  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14 + 2*2))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14 + 2*2)))))
 21377  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14 + 3*2))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14 + 3*2)))))
 21378  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14 + 4*2))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14 + 4*2)))))
 21379  
 21380  		/* Weighted rate */
 21381  		sum1_Q14 = ((int32(int16(mu_Q8))) * (int32(*(*int16)(unsafe.Pointer(cl_Q6 + uintptr(k)*2)))))
 21382  
 21383  		/* first row of W_Q18 */
 21384  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 1*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 1*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) >> 16))
 21385  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 2*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 2*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) >> 16)))
 21386  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 3*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 3*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16)))
 21387  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 4*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 4*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16)))
 21388  		sum2_Q16 = ((sum2_Q16) << (1))
 21389  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */))))) + ((((*(*int32)(unsafe.Pointer(W_Q18))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */))))) >> 16)))
 21390  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */))))) >> 16)))
 21391  
 21392  		/* second row of W_Q18 */
 21393  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 7*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 7*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) >> 16))
 21394  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 8*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 8*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16)))
 21395  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 9*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 9*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16)))
 21396  		sum2_Q16 = ((sum2_Q16) << (1))
 21397  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 6*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 6*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) >> 16)))
 21398  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) >> 16)))
 21399  
 21400  		/* third row of W_Q18 */
 21401  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 13*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 13*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16))
 21402  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 14*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 14*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16)))
 21403  		sum2_Q16 = ((sum2_Q16) << (1))
 21404  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 12*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 12*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) >> 16)))
 21405  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) >> 16)))
 21406  
 21407  		/* fourth row of W_Q18 */
 21408  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 19*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 19*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16))
 21409  		sum2_Q16 = ((sum2_Q16) << (1))
 21410  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 18*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 18*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16)))
 21411  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16)))
 21412  
 21413  		/* last row of W_Q18 */
 21414  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 24*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 24*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16))
 21415  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16)))
 21416  
 21417  		/* find best */
 21418  		if sum1_Q14 < *(*int32)(unsafe.Pointer(rate_dist_Q14)) {
 21419  			*(*int32)(unsafe.Pointer(rate_dist_Q14)) = sum1_Q14
 21420  			*(*int32)(unsafe.Pointer(ind)) = k
 21421  		}
 21422  
 21423  		/* Go to next cbk vector */
 21424  		cb_row_Q14 += 2 * (uintptr(5))
 21425  	}
 21426  }
 21427  
 21428  /* Autocorrelations for a warped frequency axis */
 21429  func SKP_Silk_warped_autocorrelation_FIX(tls *libc.TLS, corr uintptr, scale uintptr, input uintptr, warping_Q16 int16, length int32, order int32) { /* SKP_Silk_warped_autocorrelation_FIX.c:35:6: */
 21430  	bp := tls.Alloc(208)
 21431  	defer tls.Free(208)
 21432  
 21433  	var n int32
 21434  	var i int32
 21435  	var lsh int32
 21436  	var tmp1_QS int32
 21437  	var tmp2_QS int32
 21438  	*(*[17]int32)(unsafe.Pointer(bp /* state_QS */)) = [17]int32{0: 0}
 21439  	*(*[17]int64_t)(unsafe.Pointer(bp + 72 /* corr_QC */)) = [17]int64_t{0: int64(0)}
 21440  
 21441  	/* Order must be even */
 21442  
 21443  	/* Loop over samples */
 21444  	for n = 0; n < length; n++ {
 21445  		tmp1_QS = ((int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2)))) << (14))
 21446  		/* Loop over allpass sections */
 21447  		for i = 0; i < order; i = i + (2) {
 21448  			/* Output of allpass section */
 21449  			tmp2_QS = ((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr(i)*4))) + ((((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+1))*4)) - tmp1_QS) >> 16) * (int32(warping_Q16))) + ((((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+1))*4)) - tmp1_QS) & 0x0000FFFF) * (int32(warping_Q16))) >> 16)))
 21450  			*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr(i)*4)) = tmp1_QS
 21451  			*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC */ + uintptr(i)*8)) += (((int64_t(tmp1_QS)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */))))) >> ((2 * 14) - 10))
 21452  			/* Output of allpass section */
 21453  			tmp1_QS = ((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+1))*4))) + ((((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+2))*4)) - tmp2_QS) >> 16) * (int32(warping_Q16))) + ((((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+2))*4)) - tmp2_QS) & 0x0000FFFF) * (int32(warping_Q16))) >> 16)))
 21454  			*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+1))*4)) = tmp2_QS
 21455  			*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC */ + uintptr((i+1))*8)) += (((int64_t(tmp2_QS)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */))))) >> ((2 * 14) - 10))
 21456  		}
 21457  		*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr(order)*4)) = tmp1_QS
 21458  		*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC */ + uintptr(order)*8)) += (((int64_t(tmp1_QS)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */))))) >> ((2 * 14) - 10))
 21459  	}
 21460  
 21461  	lsh = (SKP_Silk_CLZ64(tls, *(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC[0] */))) - 35)
 21462  	lsh = func() int32 {
 21463  		if (-12 - 10) > (30 - 10) {
 21464  			return func() int32 {
 21465  				if (lsh) > (-12 - 10) {
 21466  					return (-12 - 10)
 21467  				}
 21468  				return func() int32 {
 21469  					if (lsh) < (30 - 10) {
 21470  						return (30 - 10)
 21471  					}
 21472  					return lsh
 21473  				}()
 21474  			}()
 21475  		}
 21476  		return func() int32 {
 21477  			if (lsh) > (30 - 10) {
 21478  				return (30 - 10)
 21479  			}
 21480  			return func() int32 {
 21481  				if (lsh) < (-12 - 10) {
 21482  					return (-12 - 10)
 21483  				}
 21484  				return lsh
 21485  			}()
 21486  		}()
 21487  	}()
 21488  	*(*int32)(unsafe.Pointer(scale)) = -(10 + lsh)
 21489  
 21490  	if lsh >= 0 {
 21491  		for i = 0; i < (order + 1); i++ {
 21492  			*(*int32)(unsafe.Pointer(corr + uintptr(i)*4)) = (int32((*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC[0] */ + uintptr(i)*8))) << (lsh)))
 21493  		}
 21494  	} else {
 21495  		for i = 0; i < (order + 1); i++ {
 21496  			*(*int32)(unsafe.Pointer(corr + uintptr(i)*4)) = (int32((*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC[0] */ + uintptr(i)*8))) >> (-lsh)))
 21497  		}
 21498  	}
 21499  	// If breaking, decrease QC
 21500  }
 21501  
 21502  func init() {
 21503  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 0)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_0))                                     // SKP_Silk_tables_LTP.c:89:5:
 21504  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 8)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_1))                                     // SKP_Silk_tables_LTP.c:90:5:
 21505  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 16)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_2))                                    // SKP_Silk_tables_LTP.c:91:5:
 21506  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 0)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_0))                                     // SKP_Silk_tables_LTP.c:89:5:
 21507  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 8)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_1))                                     // SKP_Silk_tables_LTP.c:90:5:
 21508  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 16)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_2))                                    // SKP_Silk_tables_LTP.c:91:5:
 21509  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 0)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_0))                                             // SKP_Silk_tables_LTP.c:83:5:
 21510  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 8)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_1))                                             // SKP_Silk_tables_LTP.c:84:5:
 21511  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 16)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_2))                                            // SKP_Silk_tables_LTP.c:85:5:
 21512  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 0)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_0))                                             // SKP_Silk_tables_LTP.c:83:5:
 21513  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 8)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_1))                                             // SKP_Silk_tables_LTP.c:84:5:
 21514  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 16)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_2))                                            // SKP_Silk_tables_LTP.c:85:5:
 21515  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_0_Q14)))                                          // SKP_Silk_tables_LTP.c:317:5:
 21516  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_1_Q14)))                                          // SKP_Silk_tables_LTP.c:318:5:
 21517  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_2_Q14)))                                         // SKP_Silk_tables_LTP.c:319:5:
 21518  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_0_Q14)))                                          // SKP_Silk_tables_LTP.c:317:5:
 21519  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_1_Q14)))                                          // SKP_Silk_tables_LTP.c:318:5:
 21520  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_2_Q14)))                                         // SKP_Silk_tables_LTP.c:319:5:
 21521  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 8 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info))                           // SKP_Silk_tables_NLSF_CB0_10.c:884:9:
 21522  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 16 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_ndelta_min_Q15))            // SKP_Silk_tables_NLSF_CB0_10.c:885:9:
 21523  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 24 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF))                                 // SKP_Silk_tables_NLSF_CB0_10.c:886:9:
 21524  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 32 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB0_10.c:887:9:
 21525  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 40 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB0_10.c:888:9:
 21526  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 8 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info))                           // SKP_Silk_tables_NLSF_CB0_10.c:884:9:
 21527  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 16 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_ndelta_min_Q15))            // SKP_Silk_tables_NLSF_CB0_10.c:885:9:
 21528  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 24 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF))                                 // SKP_Silk_tables_NLSF_CB0_10.c:886:9:
 21529  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 32 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB0_10.c:887:9:
 21530  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 40 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB0_10.c:888:9:
 21531  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 8 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)))             // SKP_Silk_tables_NLSF_CB0_10.c:873:16:
 21532  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 16 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)))          // SKP_Silk_tables_NLSF_CB0_10.c:873:60:
 21533  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 32 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 640*2)    // SKP_Silk_tables_NLSF_CB0_10.c:874:16:
 21534  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 40 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 64*2)   // SKP_Silk_tables_NLSF_CB0_10.c:874:60:
 21535  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 56 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 800*2)    // SKP_Silk_tables_NLSF_CB0_10.c:875:16:
 21536  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 64 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 80*2)   // SKP_Silk_tables_NLSF_CB0_10.c:875:60:
 21537  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 80 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 880*2)    // SKP_Silk_tables_NLSF_CB0_10.c:876:16:
 21538  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 88 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 88*2)   // SKP_Silk_tables_NLSF_CB0_10.c:876:60:
 21539  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 104 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 960*2)   // SKP_Silk_tables_NLSF_CB0_10.c:877:16:
 21540  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 112 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 96*2)  // SKP_Silk_tables_NLSF_CB0_10.c:877:60:
 21541  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 128 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 1040*2)  // SKP_Silk_tables_NLSF_CB0_10.c:878:16:
 21542  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 136 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 104*2) // SKP_Silk_tables_NLSF_CB0_10.c:878:60:
 21543  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 8 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info))                           // SKP_Silk_tables_NLSF_CB0_16.c:1314:9:
 21544  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 16 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_ndelta_min_Q15))            // SKP_Silk_tables_NLSF_CB0_16.c:1315:9:
 21545  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 24 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF))                                 // SKP_Silk_tables_NLSF_CB0_16.c:1316:9:
 21546  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 32 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB0_16.c:1317:9:
 21547  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 40 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB0_16.c:1318:9:
 21548  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 8 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info))                           // SKP_Silk_tables_NLSF_CB0_16.c:1314:9:
 21549  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 16 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_ndelta_min_Q15))            // SKP_Silk_tables_NLSF_CB0_16.c:1315:9:
 21550  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 24 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF))                                 // SKP_Silk_tables_NLSF_CB0_16.c:1316:9:
 21551  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 32 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB0_16.c:1317:9:
 21552  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 40 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB0_16.c:1318:9:
 21553  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 8 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)))             // SKP_Silk_tables_NLSF_CB0_16.c:1299:16:
 21554  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 16 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)))          // SKP_Silk_tables_NLSF_CB0_16.c:1299:60:
 21555  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 32 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2048*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1300:16:
 21556  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 40 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 128*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1300:60:
 21557  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 56 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2304*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1301:16:
 21558  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 64 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 144*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1301:60:
 21559  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 80 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2432*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1302:16:
 21560  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 88 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 152*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1302:60:
 21561  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 104 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2560*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1303:16:
 21562  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 112 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 160*2) // SKP_Silk_tables_NLSF_CB0_16.c:1303:60:
 21563  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 128 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2688*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1304:16:
 21564  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 136 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 168*2) // SKP_Silk_tables_NLSF_CB0_16.c:1304:60:
 21565  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 152 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2816*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1305:16:
 21566  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 160 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 176*2) // SKP_Silk_tables_NLSF_CB0_16.c:1305:60:
 21567  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 176 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2944*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1306:16:
 21568  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 184 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 184*2) // SKP_Silk_tables_NLSF_CB0_16.c:1306:60:
 21569  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 200 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 3072*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1307:16:
 21570  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 208 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 192*2) // SKP_Silk_tables_NLSF_CB0_16.c:1307:60:
 21571  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 224 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 3200*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1308:16:
 21572  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 232 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 200*2) // SKP_Silk_tables_NLSF_CB0_16.c:1308:60:
 21573  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 8 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info))                           // SKP_Silk_tables_NLSF_CB1_10.c:572:9:
 21574  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 16 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_ndelta_min_Q15))            // SKP_Silk_tables_NLSF_CB1_10.c:573:9:
 21575  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 24 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF))                                 // SKP_Silk_tables_NLSF_CB1_10.c:574:9:
 21576  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 32 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB1_10.c:575:9:
 21577  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 40 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB1_10.c:576:9:
 21578  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 8 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info))                           // SKP_Silk_tables_NLSF_CB1_10.c:572:9:
 21579  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 16 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_ndelta_min_Q15))            // SKP_Silk_tables_NLSF_CB1_10.c:573:9:
 21580  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 24 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF))                                 // SKP_Silk_tables_NLSF_CB1_10.c:574:9:
 21581  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 32 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB1_10.c:575:9:
 21582  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 40 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB1_10.c:576:9:
 21583  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 8 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)))             // SKP_Silk_tables_NLSF_CB1_10.c:561:16:
 21584  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 16 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)))          // SKP_Silk_tables_NLSF_CB1_10.c:561:60:
 21585  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 32 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 320*2)    // SKP_Silk_tables_NLSF_CB1_10.c:562:16:
 21586  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 40 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 32*2)   // SKP_Silk_tables_NLSF_CB1_10.c:562:60:
 21587  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 56 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 400*2)    // SKP_Silk_tables_NLSF_CB1_10.c:563:16:
 21588  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 64 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 40*2)   // SKP_Silk_tables_NLSF_CB1_10.c:563:60:
 21589  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 80 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 480*2)    // SKP_Silk_tables_NLSF_CB1_10.c:564:16:
 21590  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 88 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 48*2)   // SKP_Silk_tables_NLSF_CB1_10.c:564:60:
 21591  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 104 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 560*2)   // SKP_Silk_tables_NLSF_CB1_10.c:565:16:
 21592  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 112 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 56*2)  // SKP_Silk_tables_NLSF_CB1_10.c:565:60:
 21593  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 128 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 640*2)   // SKP_Silk_tables_NLSF_CB1_10.c:566:16:
 21594  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 136 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 64*2)  // SKP_Silk_tables_NLSF_CB1_10.c:566:60:
 21595  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 8 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info))                           // SKP_Silk_tables_NLSF_CB1_16.c:698:9:
 21596  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 16 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_ndelta_min_Q15))            // SKP_Silk_tables_NLSF_CB1_16.c:699:9:
 21597  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 24 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF))                                 // SKP_Silk_tables_NLSF_CB1_16.c:700:9:
 21598  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 32 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB1_16.c:701:9:
 21599  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 40 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB1_16.c:702:9:
 21600  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 8 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info))                           // SKP_Silk_tables_NLSF_CB1_16.c:698:9:
 21601  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 16 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_ndelta_min_Q15))            // SKP_Silk_tables_NLSF_CB1_16.c:699:9:
 21602  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 24 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF))                                 // SKP_Silk_tables_NLSF_CB1_16.c:700:9:
 21603  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 32 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB1_16.c:701:9:
 21604  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 40 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB1_16.c:702:9:
 21605  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 8 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)))             // SKP_Silk_tables_NLSF_CB1_16.c:683:16:
 21606  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 16 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)))          // SKP_Silk_tables_NLSF_CB1_16.c:683:60:
 21607  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 32 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 512*2)    // SKP_Silk_tables_NLSF_CB1_16.c:684:16:
 21608  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 40 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 32*2)   // SKP_Silk_tables_NLSF_CB1_16.c:684:60:
 21609  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 56 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 640*2)    // SKP_Silk_tables_NLSF_CB1_16.c:685:16:
 21610  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 64 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 40*2)   // SKP_Silk_tables_NLSF_CB1_16.c:685:60:
 21611  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 80 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 768*2)    // SKP_Silk_tables_NLSF_CB1_16.c:686:16:
 21612  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 88 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 48*2)   // SKP_Silk_tables_NLSF_CB1_16.c:686:60:
 21613  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 104 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 896*2)   // SKP_Silk_tables_NLSF_CB1_16.c:687:16:
 21614  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 112 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 56*2)  // SKP_Silk_tables_NLSF_CB1_16.c:687:60:
 21615  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 128 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1024*2)  // SKP_Silk_tables_NLSF_CB1_16.c:688:16:
 21616  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 136 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 64*2)  // SKP_Silk_tables_NLSF_CB1_16.c:688:60:
 21617  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 152 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1152*2)  // SKP_Silk_tables_NLSF_CB1_16.c:689:16:
 21618  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 160 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 72*2)  // SKP_Silk_tables_NLSF_CB1_16.c:689:60:
 21619  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 176 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1280*2)  // SKP_Silk_tables_NLSF_CB1_16.c:690:16:
 21620  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 184 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 80*2)  // SKP_Silk_tables_NLSF_CB1_16.c:690:60:
 21621  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 200 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1408*2)  // SKP_Silk_tables_NLSF_CB1_16.c:691:16:
 21622  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 208 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 88*2)  // SKP_Silk_tables_NLSF_CB1_16.c:691:60:
 21623  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 224 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1536*2)  // SKP_Silk_tables_NLSF_CB1_16.c:692:16:
 21624  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 232 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 96*2)  // SKP_Silk_tables_NLSF_CB1_16.c:692:60:
 21625  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)))                        // SKP_Silk_tables_NLSF_CB0_10.c:170:6:
 21626  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 65*2)                 // SKP_Silk_tables_NLSF_CB0_10.c:171:6:
 21627  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 82*2)                // SKP_Silk_tables_NLSF_CB0_10.c:172:6:
 21628  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 91*2)                // SKP_Silk_tables_NLSF_CB0_10.c:173:6:
 21629  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 100*2)               // SKP_Silk_tables_NLSF_CB0_10.c:174:6:
 21630  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 40)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 109*2)               // SKP_Silk_tables_NLSF_CB0_10.c:175:6:
 21631  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)))                        // SKP_Silk_tables_NLSF_CB0_10.c:170:6:
 21632  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 65*2)                 // SKP_Silk_tables_NLSF_CB0_10.c:171:6:
 21633  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 82*2)                // SKP_Silk_tables_NLSF_CB0_10.c:172:6:
 21634  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 91*2)                // SKP_Silk_tables_NLSF_CB0_10.c:173:6:
 21635  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 100*2)               // SKP_Silk_tables_NLSF_CB0_10.c:174:6:
 21636  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 40)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 109*2)               // SKP_Silk_tables_NLSF_CB0_10.c:175:6:
 21637  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)))                        // SKP_Silk_tables_NLSF_CB0_16.c:270:6:
 21638  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 129*2)                // SKP_Silk_tables_NLSF_CB0_16.c:271:6:
 21639  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 146*2)               // SKP_Silk_tables_NLSF_CB0_16.c:272:6:
 21640  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 155*2)               // SKP_Silk_tables_NLSF_CB0_16.c:273:6:
 21641  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 164*2)               // SKP_Silk_tables_NLSF_CB0_16.c:274:6:
 21642  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 40)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 173*2)               // SKP_Silk_tables_NLSF_CB0_16.c:275:6:
 21643  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 48)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 182*2)               // SKP_Silk_tables_NLSF_CB0_16.c:276:6:
 21644  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 56)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 191*2)               // SKP_Silk_tables_NLSF_CB0_16.c:277:6:
 21645  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 64)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 200*2)               // SKP_Silk_tables_NLSF_CB0_16.c:278:6:
 21646  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 72)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 209*2)               // SKP_Silk_tables_NLSF_CB0_16.c:279:6:
 21647  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)))                        // SKP_Silk_tables_NLSF_CB0_16.c:270:6:
 21648  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 129*2)                // SKP_Silk_tables_NLSF_CB0_16.c:271:6:
 21649  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 146*2)               // SKP_Silk_tables_NLSF_CB0_16.c:272:6:
 21650  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 155*2)               // SKP_Silk_tables_NLSF_CB0_16.c:273:6:
 21651  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 164*2)               // SKP_Silk_tables_NLSF_CB0_16.c:274:6:
 21652  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 40)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 173*2)               // SKP_Silk_tables_NLSF_CB0_16.c:275:6:
 21653  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 48)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 182*2)               // SKP_Silk_tables_NLSF_CB0_16.c:276:6:
 21654  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 56)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 191*2)               // SKP_Silk_tables_NLSF_CB0_16.c:277:6:
 21655  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 64)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 200*2)               // SKP_Silk_tables_NLSF_CB0_16.c:278:6:
 21656  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 72)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 209*2)               // SKP_Silk_tables_NLSF_CB0_16.c:279:6:
 21657  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)))                        // SKP_Silk_tables_NLSF_CB1_10.c:122:6:
 21658  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 33*2)                 // SKP_Silk_tables_NLSF_CB1_10.c:123:6:
 21659  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 42*2)                // SKP_Silk_tables_NLSF_CB1_10.c:124:6:
 21660  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 51*2)                // SKP_Silk_tables_NLSF_CB1_10.c:125:6:
 21661  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 60*2)                // SKP_Silk_tables_NLSF_CB1_10.c:126:6:
 21662  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 40)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 69*2)                // SKP_Silk_tables_NLSF_CB1_10.c:127:6:
 21663  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)))                        // SKP_Silk_tables_NLSF_CB1_10.c:122:6:
 21664  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 33*2)                 // SKP_Silk_tables_NLSF_CB1_10.c:123:6:
 21665  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 42*2)                // SKP_Silk_tables_NLSF_CB1_10.c:124:6:
 21666  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 51*2)                // SKP_Silk_tables_NLSF_CB1_10.c:125:6:
 21667  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 60*2)                // SKP_Silk_tables_NLSF_CB1_10.c:126:6:
 21668  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 40)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 69*2)                // SKP_Silk_tables_NLSF_CB1_10.c:127:6:
 21669  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)))                        // SKP_Silk_tables_NLSF_CB1_16.c:158:6:
 21670  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 33*2)                 // SKP_Silk_tables_NLSF_CB1_16.c:159:6:
 21671  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 42*2)                // SKP_Silk_tables_NLSF_CB1_16.c:160:6:
 21672  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 51*2)                // SKP_Silk_tables_NLSF_CB1_16.c:161:6:
 21673  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 60*2)                // SKP_Silk_tables_NLSF_CB1_16.c:162:6:
 21674  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 40)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 69*2)                // SKP_Silk_tables_NLSF_CB1_16.c:163:6:
 21675  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 48)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 78*2)                // SKP_Silk_tables_NLSF_CB1_16.c:164:6:
 21676  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 56)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 87*2)                // SKP_Silk_tables_NLSF_CB1_16.c:165:6:
 21677  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 64)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 96*2)                // SKP_Silk_tables_NLSF_CB1_16.c:166:6:
 21678  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 72)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 105*2)               // SKP_Silk_tables_NLSF_CB1_16.c:167:6:
 21679  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)))                        // SKP_Silk_tables_NLSF_CB1_16.c:158:6:
 21680  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 33*2)                 // SKP_Silk_tables_NLSF_CB1_16.c:159:6:
 21681  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 42*2)                // SKP_Silk_tables_NLSF_CB1_16.c:160:6:
 21682  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 51*2)                // SKP_Silk_tables_NLSF_CB1_16.c:161:6:
 21683  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 60*2)                // SKP_Silk_tables_NLSF_CB1_16.c:162:6:
 21684  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 40)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 69*2)                // SKP_Silk_tables_NLSF_CB1_16.c:163:6:
 21685  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 48)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 78*2)                // SKP_Silk_tables_NLSF_CB1_16.c:164:6:
 21686  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 56)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 87*2)                // SKP_Silk_tables_NLSF_CB1_16.c:165:6:
 21687  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 64)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 96*2)                // SKP_Silk_tables_NLSF_CB1_16.c:166:6:
 21688  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 72)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 105*2)               // SKP_Silk_tables_NLSF_CB1_16.c:167:6:
 21689  }
 21690  
 21691  var ts1 = "1.0.9\x00"
 21692  var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data