github.com/wdvxdr1123/go-silk@v0.0.0-20210316130616-d47b553def60/sdk/skype_silk_sdk_32.go (about)

     1  // Code generated by 'ccgo -host-config-cmd i686-w64-mingw32-cpp -export-fields F -pkgname silk -replace-fd-zero -compiledb -trace-translation-units "" -o silk_windows_386.go SKP_Silk_typedef.h SKP_Silk_A2NLSF.c SKP_Silk_ana_filt_bank_1.c SKP_Silk_apply_sine_window.c SKP_Silk_array_maxabs.c SKP_Silk_autocorr.c SKP_Silk_biquad.c SKP_Silk_biquad_alt.c SKP_Silk_burg_modified.c SKP_Silk_bwexpander.c SKP_Silk_bwexpander_32.c SKP_Silk_CNG.c SKP_Silk_code_signs.c SKP_Silk_common_pitch_est_defines.h SKP_Silk_control.h SKP_Silk_control_audio_bandwidth.c SKP_Silk_control_codec_FIX.c SKP_Silk_corrMatrix_FIX.c SKP_Silk_create_init_destroy.c SKP_Silk_decoder_set_fs.c SKP_Silk_decode_core.c SKP_Silk_decode_frame.c SKP_Silk_decode_parameters.c SKP_Silk_decode_pitch.c SKP_Silk_decode_pulses.c SKP_Silk_dec_API.c SKP_Silk_define.h SKP_Silk_detect_SWB_input.c SKP_Silk_encode_frame_FIX.c SKP_Silk_encode_parameters.c SKP_Silk_encode_pulses.c SKP_Silk_enc_API.c SKP_Silk_errors.h SKP_Silk_find_LPC_FIX.c SKP_Silk_find_LTP_FIX.c SKP_Silk_find_pitch_lags_FIX.c SKP_Silk_find_pred_coefs_FIX.c SKP_Silk_gain_quant.c SKP_Silk_HP_variable_cutoff_FIX.c SKP_Silk_init_encoder_FIX.c SKP_Silk_Inlines.h SKP_Silk_inner_prod_aligned.c SKP_Silk_interpolate.c SKP_Silk_k2a.c SKP_Silk_k2a_Q16.c SKP_Silk_LBRR_reset.c SKP_Silk_lin2log.c SKP_Silk_log2lin.c SKP_Silk_LPC_inv_pred_gain.c SKP_Silk_LPC_synthesis_filter.c SKP_Silk_LPC_synthesis_order16.c SKP_Silk_LP_variable_cutoff.c SKP_Silk_LSF_cos_table.c SKP_Silk_LTP_analysis_filter_FIX.c SKP_Silk_LTP_scale_ctrl_FIX.c SKP_Silk_MA.c SKP_Silk_macros.h SKP_Silk_main.h SKP_Silk_main_FIX.h SKP_Silk_NLSF2A.c SKP_Silk_NLSF2A_stable.c SKP_Silk_NLSF_MSVQ_decode.c SKP_Silk_NLSF_MSVQ_encode_FIX.c SKP_Silk_NLSF_stabilize.c SKP_Silk_NLSF_VQ_rate_distortion_FIX.c SKP_Silk_NLSF_VQ_sum_error_FIX.c SKP_Silk_NLSF_VQ_weights_laroia.c SKP_Silk_noise_shape_analysis_FIX.c SKP_Silk_NSQ.c SKP_Silk_NSQ_del_dec.c SKP_Silk_pitch_analysis_core.c SKP_Silk_pitch_est_defines.h SKP_Silk_pitch_est_tables.c SKP_Silk_PLC.c SKP_Silk_PLC.h SKP_Silk_prefilter_FIX.c SKP_Silk_process_gains_FIX.c SKP_Silk_process_NLSFs_FIX.c SKP_Silk_quant_LTP_gains_FIX.c SKP_Silk_range_coder.c SKP_Silk_regularize_correlations_FIX.c SKP_Silk_resampler.c SKP_Silk_resampler_down2.c SKP_Silk_resampler_down2_3.c SKP_Silk_resampler_down3.c SKP_Silk_resampler_private.h SKP_Silk_resampler_private_AR2.c SKP_Silk_resampler_private_ARMA4.c SKP_Silk_resampler_private_copy.c SKP_Silk_resampler_private_down4.c SKP_Silk_resampler_private_down_FIR.c SKP_Silk_resampler_private_IIR_FIR.c SKP_Silk_resampler_private_up2_HQ.c SKP_Silk_resampler_private_up4.c SKP_Silk_resampler_rom.c SKP_Silk_resampler_rom.h SKP_Silk_resampler_structs.h SKP_Silk_resampler_up2.c SKP_Silk_residual_energy16_FIX.c SKP_Silk_residual_energy_FIX.c SKP_Silk_scale_copy_vector16.c SKP_Silk_scale_vector.c SKP_Silk_schur.c SKP_Silk_schur64.c SKP_Silk_SDK_API.h SKP_Silk_setup_complexity.h SKP_Silk_shell_coder.c SKP_Silk_sigm_Q15.c SKP_Silk_SigProc_FIX.h SKP_Silk_solve_LS_FIX.c SKP_Silk_sort.c SKP_Silk_structs.h SKP_Silk_structs_FIX.h SKP_Silk_sum_sqr_shift.c SKP_Silk_tables.h SKP_Silk_tables_gain.c SKP_Silk_tables_LTP.c SKP_Silk_tables_NLSF_CB0_10.c SKP_Silk_tables_NLSF_CB0_10.h SKP_Silk_tables_NLSF_CB0_16.c SKP_Silk_tables_NLSF_CB0_16.h SKP_Silk_tables_NLSF_CB1_10.c SKP_Silk_tables_NLSF_CB1_10.h SKP_Silk_tables_NLSF_CB1_16.c SKP_Silk_tables_NLSF_CB1_16.h SKP_Silk_tables_other.c SKP_Silk_tables_pitch_lag.c SKP_Silk_tables_pulses_per_block.c SKP_Silk_tables_sign.c SKP_Silk_tables_type_offset.c SKP_Silk_tuning_parameters.h SKP_Silk_VAD.c SKP_Silk_VQ_nearest_neighbor_FIX.c SKP_Silk_warped_autocorrelation_FIX.c "" -DNDEBUG', DO NOT EDIT.
     2  //+build 386 arm
     3  
     4  package sdk
     5  
     6  import (
     7  	"math"
     8  	"reflect"
     9  	"sync/atomic"
    10  	"unsafe"
    11  
    12  	"modernc.org/libc"
    13  	"modernc.org/libc/sys/types"
    14  )
    15  
    16  var _ = math.Pi
    17  var _ reflect.Kind
    18  var _ atomic.Value
    19  var _ unsafe.Pointer
    20  var _ types.Size_t
    21  
    22  type ptrdiff_t = int32 /* <builtin>:3:26 */
    23  
    24  type size_t = uint32 /* <builtin>:9:23 */
    25  
    26  type wchar_t = uint16 /* <builtin>:15:24 */
    27  
    28  type va_list = uintptr /* <builtin>:50:27 */
    29  
    30  type ssize_t = int32 /* crtdefs.h:47:13 */
    31  
    32  type rsize_t = size_t /* crtdefs.h:52:16 */
    33  
    34  type intptr_t = int32 /* crtdefs.h:64:13 */
    35  
    36  type uintptr_t = uint32 /* crtdefs.h:77:22 */
    37  
    38  type wint_t = uint16   /* crtdefs.h:106:24 */
    39  type wctype_t = uint16 /* crtdefs.h:107:24 */
    40  
    41  type errno_t = int32 /* crtdefs.h:113:13 */
    42  
    43  type time_t = int32 /* crtdefs.h:136:20 */
    44  
    45  type threadlocaleinfostruct = struct {
    46  	Frefcount      int32
    47  	Flc_codepage   uint32
    48  	Flc_collate_cp uint32
    49  	Flc_handle     [6]uint32
    50  	Flc_id         [6]LC_ID
    51  	Flc_category   [6]struct {
    52  		Flocale    uintptr
    53  		Fwlocale   uintptr
    54  		Frefcount  uintptr
    55  		Fwrefcount uintptr
    56  	}
    57  	Flc_clike            int32
    58  	Fmb_cur_max          int32
    59  	Flconv_intl_refcount uintptr
    60  	Flconv_num_refcount  uintptr
    61  	Flconv_mon_refcount  uintptr
    62  	Flconv               uintptr
    63  	Fctype1_refcount     uintptr
    64  	Fctype1              uintptr
    65  	Fpctype              uintptr
    66  	Fpclmap              uintptr
    67  	Fpcumap              uintptr
    68  	Flc_time_curr        uintptr
    69  } /* crtdefs.h:422:1 */
    70  
    71  type pthreadlocinfo = uintptr /* crtdefs.h:424:39 */
    72  type pthreadmbcinfo = uintptr /* crtdefs.h:425:36 */
    73  
    74  type localeinfo_struct = struct {
    75  	Flocinfo pthreadlocinfo
    76  	Fmbcinfo pthreadmbcinfo
    77  } /* crtdefs.h:428:9 */
    78  
    79  type _locale_tstruct = localeinfo_struct /* crtdefs.h:431:3 */
    80  type _locale_t = uintptr                 /* crtdefs.h:431:19 */
    81  
    82  type tagLC_ID = struct {
    83  	FwLanguage uint16
    84  	FwCountry  uint16
    85  	FwCodePage uint16
    86  } /* crtdefs.h:422:1 */
    87  
    88  type LC_ID = tagLC_ID  /* crtdefs.h:439:3 */
    89  type LPLC_ID = uintptr /* crtdefs.h:439:9 */
    90  
    91  type threadlocinfo = threadlocaleinfostruct /* crtdefs.h:468:3 */
    92  
    93  // ISO C Standard:  7.17  Common definitions  <stddef.h>
    94  
    95  // Any one of these symbols __need_* means that GNU libc
    96  //    wants us just to define one data type.  So don't define
    97  //    the symbols that indicate this file's entire job has been done.
    98  
    99  // In 4.3bsd-net2, machine/ansi.h defines these symbols, which are
   100  //    defined if the corresponding type is *not* defined.
   101  //    FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_
   102  
   103  // Sequent's header files use _PTRDIFF_T_ in some conflicting way.
   104  //    Just ignore it.
   105  
   106  // On VxWorks, <type/vxTypesBase.h> may have defined macros like
   107  //    _TYPE_size_t which will typedef size_t.  fixincludes patched the
   108  //    vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is
   109  //    not defined, and so that defining this macro defines _GCC_SIZE_T.
   110  //    If we find that the macros are still defined at this point, we must
   111  //    invoke them so that the type is defined as expected.
   112  
   113  // In case nobody has defined these types, but we aren't running under
   114  //    GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and
   115  //    __WCHAR_TYPE__ have reasonable values.  This can happen if the
   116  //    parts of GCC is compiled by an older compiler, that actually
   117  //    include gstddef.h, such as collect2.
   118  
   119  // Signed type of difference of two pointers.
   120  
   121  // Define this type if we are doing the whole job,
   122  //    or if we want this type in particular.
   123  
   124  // Unsigned type of `sizeof' something.
   125  
   126  // Define this type if we are doing the whole job,
   127  //    or if we want this type in particular.
   128  
   129  // Wide character type.
   130  //    Locale-writers should change this as necessary to
   131  //    be big enough to hold unique values not between 0 and 127,
   132  //    and not (wchar_t) -1, for each defined multibyte character.
   133  
   134  // Define this type if we are doing the whole job,
   135  //    or if we want this type in particular.
   136  
   137  //  In 4.3bsd-net2, leave these undefined to indicate that size_t, etc.
   138  //     are already defined.
   139  //  BSD/OS 3.1 and FreeBSD [23].x require the MACHINE_ANSI_H check here.
   140  
   141  // A null pointer constant.
   142  
   143  // Copyright (C) 1989-2018 Free Software Foundation, Inc.
   144  //
   145  // This file is part of GCC.
   146  //
   147  // GCC is free software; you can redistribute it and/or modify
   148  // it under the terms of the GNU General Public License as published by
   149  // the Free Software Foundation; either version 3, or (at your option)
   150  // any later version.
   151  //
   152  // GCC is distributed in the hope that it will be useful,
   153  // but WITHOUT ANY WARRANTY; without even the implied warranty of
   154  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   155  // GNU General Public License for more details.
   156  //
   157  // Under Section 7 of GPL version 3, you are granted additional
   158  // permissions described in the GCC Runtime Library Exception, version
   159  // 3.1, as published by the Free Software Foundation.
   160  //
   161  // You should have received a copy of the GNU General Public License and
   162  // a copy of the GCC Runtime Library Exception along with this program;
   163  // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
   164  // <http://www.gnu.org/licenses/>.
   165  
   166  // ISO C Standard:  7.17  Common definitions  <stddef.h>
   167  
   168  // Any one of these symbols __need_* means that GNU libc
   169  //    wants us just to define one data type.  So don't define
   170  //    the symbols that indicate this file's entire job has been done.
   171  // snaroff@next.com says the NeXT needs this.
   172  
   173  // This avoids lossage on SunOS but only if stdtypes.h comes first.
   174  //    There's no way to win with the other order!  Sun lossage.
   175  
   176  // On 4.3bsd-net2, make sure ansi.h is included, so we have
   177  //    one less case to deal with in the following.
   178  // On FreeBSD 5, machine/ansi.h does not exist anymore...
   179  
   180  // In 4.3bsd-net2, machine/ansi.h defines these symbols, which are
   181  //    defined if the corresponding type is *not* defined.
   182  //    FreeBSD-2.1 defines _MACHINE_ANSI_H_ instead of _ANSI_H_.
   183  //    NetBSD defines _I386_ANSI_H_ and _X86_64_ANSI_H_ instead of _ANSI_H_
   184  
   185  // Sequent's header files use _PTRDIFF_T_ in some conflicting way.
   186  //    Just ignore it.
   187  
   188  // On VxWorks, <type/vxTypesBase.h> may have defined macros like
   189  //    _TYPE_size_t which will typedef size_t.  fixincludes patched the
   190  //    vxTypesBase.h so that this macro is only defined if _GCC_SIZE_T is
   191  //    not defined, and so that defining this macro defines _GCC_SIZE_T.
   192  //    If we find that the macros are still defined at this point, we must
   193  //    invoke them so that the type is defined as expected.
   194  
   195  // In case nobody has defined these types, but we aren't running under
   196  //    GCC 2.00, make sure that __PTRDIFF_TYPE__, __SIZE_TYPE__, and
   197  //    __WCHAR_TYPE__ have reasonable values.  This can happen if the
   198  //    parts of GCC is compiled by an older compiler, that actually
   199  //    include gstddef.h, such as collect2.
   200  
   201  // Signed type of difference of two pointers.
   202  
   203  // Define this type if we are doing the whole job,
   204  //    or if we want this type in particular.
   205  
   206  // If this symbol has done its job, get rid of it.
   207  
   208  // Unsigned type of `sizeof' something.
   209  
   210  // Define this type if we are doing the whole job,
   211  //    or if we want this type in particular.
   212  
   213  // Wide character type.
   214  //    Locale-writers should change this as necessary to
   215  //    be big enough to hold unique values not between 0 and 127,
   216  //    and not (wchar_t) -1, for each defined multibyte character.
   217  
   218  // Define this type if we are doing the whole job,
   219  //    or if we want this type in particular.
   220  
   221  //  In 4.3bsd-net2, leave these undefined to indicate that size_t, etc.
   222  //     are already defined.
   223  //  BSD/OS 3.1 and FreeBSD [23].x require the MACHINE_ANSI_H check here.
   224  //  NetBSD 5 requires the I386_ANSI_H and X86_64_ANSI_H checks here.
   225  
   226  // A null pointer constant.
   227  
   228  // Offset of member MEMBER in a struct of type TYPE.
   229  
   230  // Type whose alignment is supported in every context and is at least
   231  //    as great as that of any standard type not using alignment
   232  //    specifiers.
   233  type max_align_t = struct {
   234  	F__max_align_ll   int64
   235  	F__max_align_ld   float64
   236  	F__max_align_f128 float64
   237  } /* stddef.h:438:3 */
   238  
   239  // 7.18.1.1  Exact-width integer types
   240  type int8_t = int8     /* stdint.h:35:21 */
   241  type uint8_t = uint8   /* stdint.h:36:25 */
   242  type int16_t = int16   /* stdint.h:37:16 */
   243  type uint16_t = uint16 /* stdint.h:38:25 */
   244  type int32_t = int32   /* stdint.h:39:14 */
   245  type uint32_t = uint32 /* stdint.h:40:20 */
   246  type int64_t = int64   /* stdint.h:41:38 */
   247  type uint64_t = uint64 /* stdint.h:42:48 */
   248  
   249  // 7.18.1.2  Minimum-width integer types
   250  type int_least8_t = int8     /* stdint.h:45:21 */
   251  type uint_least8_t = uint8   /* stdint.h:46:25 */
   252  type int_least16_t = int16   /* stdint.h:47:16 */
   253  type uint_least16_t = uint16 /* stdint.h:48:25 */
   254  type int_least32_t = int32   /* stdint.h:49:14 */
   255  type uint_least32_t = uint32 /* stdint.h:50:20 */
   256  type int_least64_t = int64   /* stdint.h:51:38 */
   257  type uint_least64_t = uint64 /* stdint.h:52:48 */
   258  
   259  // 7.18.1.3  Fastest minimum-width integer types
   260  //  Not actually guaranteed to be fastest for all purposes
   261  //  Here we use the exact-width types for 8 and 16-bit ints.
   262  type int_fast8_t = int8     /* stdint.h:58:21 */
   263  type uint_fast8_t = uint8   /* stdint.h:59:23 */
   264  type int_fast16_t = int16   /* stdint.h:60:16 */
   265  type uint_fast16_t = uint16 /* stdint.h:61:25 */
   266  type int_fast32_t = int32   /* stdint.h:62:14 */
   267  type uint_fast32_t = uint32 /* stdint.h:63:24 */
   268  type int_fast64_t = int64   /* stdint.h:64:38 */
   269  type uint_fast64_t = uint64 /* stdint.h:65:48 */
   270  
   271  // 7.18.1.5  Greatest-width integer types
   272  type intmax_t = int64   /* stdint.h:68:38 */
   273  type uintmax_t = uint64 /* stdint.h:69:48 */
   274  
   275  // 7.18.2  Limits of specified-width integer types
   276  
   277  // 7.18.2.1  Limits of exact-width integer types
   278  
   279  // 7.18.2.2  Limits of minimum-width integer types
   280  
   281  // 7.18.2.3  Limits of fastest minimum-width integer types
   282  
   283  // 7.18.2.4  Limits of integer types capable of holding
   284  //     object pointers
   285  
   286  // 7.18.2.5  Limits of greatest-width integer types
   287  
   288  // 7.18.3  Limits of other integer types
   289  
   290  // wint_t is unsigned short for compatibility with MS runtime
   291  
   292  // 7.18.4  Macros for integer constants
   293  
   294  // 7.18.4.1  Macros for minimum-width integer constants
   295  //
   296  //     Accoding to Douglas Gwyn <gwyn@arl.mil>:
   297  // 	"This spec was changed in ISO/IEC 9899:1999 TC1; in ISO/IEC
   298  // 	9899:1999 as initially published, the expansion was required
   299  // 	to be an integer constant of precisely matching type, which
   300  // 	is impossible to accomplish for the shorter types on most
   301  // 	platforms, because C99 provides no standard way to designate
   302  // 	an integer constant with width less than that of type int.
   303  // 	TC1 changed this to require just an integer constant
   304  // 	*expression* with *promoted* type."
   305  //
   306  // 	The trick used here is from Clive D W Feather.
   307  
   308  //  The 'trick' doesn't work in C89 for long long because, without
   309  //     suffix, (val) will be evaluated as int, not intmax_t
   310  
   311  // 7.18.4.2  Macros for greatest-width integer constants
   312  
   313  /* assertions */
   314  
   315  // *
   316  // This file has no copyright assigned and is placed in the Public Domain.
   317  // This file is part of the mingw-w64 runtime package.
   318  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   319  
   320  // *
   321  // This file has no copyright assigned and is placed in the Public Domain.
   322  // This file is part of the mingw-w64 runtime package.
   323  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   324  
   325  // Copyright (C) 1992-2018 Free Software Foundation, Inc.
   326  //
   327  // This file is part of GCC.
   328  //
   329  // GCC is free software; you can redistribute it and/or modify it under
   330  // the terms of the GNU General Public License as published by the Free
   331  // Software Foundation; either version 3, or (at your option) any later
   332  // version.
   333  //
   334  // GCC is distributed in the hope that it will be useful, but WITHOUT ANY
   335  // WARRANTY; without even the implied warranty of MERCHANTABILITY or
   336  // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   337  // for more details.
   338  //
   339  // Under Section 7 of GPL version 3, you are granted additional
   340  // permissions described in the GCC Runtime Library Exception, version
   341  // 3.1, as published by the Free Software Foundation.
   342  //
   343  // You should have received a copy of the GNU General Public License and
   344  // a copy of the GCC Runtime Library Exception along with this program;
   345  // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
   346  // <http://www.gnu.org/licenses/>.
   347  
   348  // This administrivia gets added to the beginning of limits.h
   349  //    if the system has its own version of limits.h.
   350  
   351  // We use _GCC_LIMITS_H_ because we want this not to match
   352  //    any macros that the system's limits.h uses for its own purposes.
   353  
   354  // Use "..." so that we find syslimits.h only in this same directory.
   355  // syslimits.h stands for the system's own limits.h file.
   356  //    If we can use it ok unmodified, then we install this text.
   357  //    If fixincludes fixes it, then the fixed version is installed
   358  //    instead of this text.
   359  
   360  // *
   361  // This file has no copyright assigned and is placed in the Public Domain.
   362  // This file is part of the mingw-w64 runtime package.
   363  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   364  // *
   365  // This file has no copyright assigned and is placed in the Public Domain.
   366  // This file is part of the mingw-w64 runtime package.
   367  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   368  
   369  // File system limits
   370  //
   371  // NOTE: Apparently the actual size of PATH_MAX is 260, but a space is
   372  //       required for the NUL. TODO: Test?
   373  // NOTE: PATH_MAX is the POSIX equivalent for Microsoft's MAX_PATH; the two
   374  //       are semantically identical, with a limit of 259 characters for the
   375  //       path name, plus one for a terminating NUL, for a total of 260.
   376  
   377  // Copyright (C) 1991-2018 Free Software Foundation, Inc.
   378  //
   379  // This file is part of GCC.
   380  //
   381  // GCC is free software; you can redistribute it and/or modify it under
   382  // the terms of the GNU General Public License as published by the Free
   383  // Software Foundation; either version 3, or (at your option) any later
   384  // version.
   385  //
   386  // GCC is distributed in the hope that it will be useful, but WITHOUT ANY
   387  // WARRANTY; without even the implied warranty of MERCHANTABILITY or
   388  // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   389  // for more details.
   390  //
   391  // Under Section 7 of GPL version 3, you are granted additional
   392  // permissions described in the GCC Runtime Library Exception, version
   393  // 3.1, as published by the Free Software Foundation.
   394  //
   395  // You should have received a copy of the GNU General Public License and
   396  // a copy of the GCC Runtime Library Exception along with this program;
   397  // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
   398  // <http://www.gnu.org/licenses/>.
   399  
   400  // Number of bits in a `char'.
   401  
   402  // Maximum length of a multibyte character.
   403  
   404  // Minimum and maximum values a `signed char' can hold.
   405  
   406  // Maximum value an `unsigned char' can hold.  (Minimum is 0).
   407  
   408  // Minimum and maximum values a `char' can hold.
   409  
   410  // Minimum and maximum values a `signed short int' can hold.
   411  
   412  // Maximum value an `unsigned short int' can hold.  (Minimum is 0).
   413  
   414  // Minimum and maximum values a `signed int' can hold.
   415  
   416  // Maximum value an `unsigned int' can hold.  (Minimum is 0).
   417  
   418  // Minimum and maximum values a `signed long int' can hold.
   419  //    (Same as `int').
   420  
   421  // Maximum value an `unsigned long int' can hold.  (Minimum is 0).
   422  
   423  // Minimum and maximum values a `signed long long int' can hold.
   424  
   425  // Maximum value an `unsigned long long int' can hold.  (Minimum is 0).
   426  
   427  // Minimum and maximum values a `signed long long int' can hold.
   428  
   429  // Maximum value an `unsigned long long int' can hold.  (Minimum is 0).
   430  
   431  // This administrivia gets added to the end of limits.h
   432  //    if the system has its own version of limits.h.
   433  
   434  type _onexit_t = uintptr /* stdlib.h:49:15 */
   435  
   436  type _div_t = struct {
   437  	Fquot int32
   438  	Frem  int32
   439  } /* stdlib.h:59:11 */
   440  
   441  type div_t = _div_t /* stdlib.h:62:5 */
   442  
   443  type _ldiv_t = struct {
   444  	Fquot int32
   445  	Frem  int32
   446  } /* stdlib.h:64:11 */
   447  
   448  type ldiv_t = _ldiv_t /* stdlib.h:67:5 */
   449  
   450  type _LDOUBLE = struct{ Fld [10]uint8 } /* stdlib.h:76:5 */
   451  
   452  type _CRT_DOUBLE = struct{ Fx float64 } /* stdlib.h:83:5 */
   453  
   454  type _CRT_FLOAT = struct{ Ff float32 } /* stdlib.h:87:5 */
   455  
   456  type _LONGDOUBLE = struct{ Fx float64 } /* stdlib.h:94:5 */
   457  
   458  type _LDBL12 = struct{ Fld12 [12]uint8 } /* stdlib.h:101:5 */
   459  
   460  type _purecall_handler = uintptr /* stdlib.h:142:16 */
   461  
   462  type _invalid_parameter_handler = uintptr /* stdlib.h:147:16 */
   463  
   464  type lldiv_t = struct {
   465  	Fquot int64
   466  	Frem  int64
   467  } /* stdlib.h:699:61 */
   468  
   469  // *
   470  // This file has no copyright assigned and is placed in the Public Domain.
   471  // This file is part of the mingw-w64 runtime package.
   472  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   473  
   474  // *
   475  // This file has no copyright assigned and is placed in the Public Domain.
   476  // This file is part of the mingw-w64 runtime package.
   477  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   478  
   479  // Return codes for _heapwalk()
   480  
   481  // Values for _heapinfo.useflag
   482  
   483  // The structure used to walk through the heap with _heapwalk.
   484  type _heapinfo = struct {
   485  	F_pentry  uintptr
   486  	F_size    size_t
   487  	F_useflag int32
   488  } /* malloc.h:46:11 */
   489  
   490  // *
   491  // This file has no copyright assigned and is placed in the Public Domain.
   492  // This file is part of the mingw-w64 runtime package.
   493  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   494  
   495  // *
   496  // This file has no copyright assigned and is placed in the Public Domain.
   497  // This file is part of the mingw-w64 runtime package.
   498  // No warranty is given; refer to the file DISCLAIMER.PD within this package.
   499  
   500  // Return codes for _heapwalk()
   501  
   502  // Values for _heapinfo.useflag
   503  
   504  // The structure used to walk through the heap with _heapwalk.
   505  type _HEAPINFO = _heapinfo /* malloc.h:50:5 */
   506  
   507  /***********************************************************************
   508  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   509  Redistribution and use in source and binary forms, with or without
   510  modification, (subject to the limitations in the disclaimer below)
   511  are permitted provided that the following conditions are met:
   512  - Redistributions of source code must retain the above copyright notice,
   513  this list of conditions and the following disclaimer.
   514  - Redistributions in binary form must reproduce the above copyright
   515  notice, this list of conditions and the following disclaimer in the
   516  documentation and/or other materials provided with the distribution.
   517  - Neither the name of Skype Limited, nor the names of specific
   518  contributors, may be used to endorse or promote products derived from
   519  this software without specific prior written permission.
   520  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   521  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   522  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   523  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   524  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   525  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   526  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   527  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   528  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   529  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   530  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   531  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   532  ***********************************************************************/
   533  
   534  /*																		*
   535   * File Name:	SKP_Silk_resampler_structs.h							*
   536   *																		*
   537   * Description: Structs for IIR/FIR resamplers							*
   538   *                                                                      *
   539   * Copyright 2010 (c), Skype Limited                                    *
   540   * All rights reserved.													*
   541   *																		*
   542   *                                                                      */
   543  
   544  /***********************************************************************
   545  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   546  Redistribution and use in source and binary forms, with or without
   547  modification, (subject to the limitations in the disclaimer below)
   548  are permitted provided that the following conditions are met:
   549  - Redistributions of source code must retain the above copyright notice,
   550  this list of conditions and the following disclaimer.
   551  - Redistributions in binary form must reproduce the above copyright
   552  notice, this list of conditions and the following disclaimer in the
   553  documentation and/or other materials provided with the distribution.
   554  - Neither the name of Skype Limited, nor the names of specific
   555  contributors, may be used to endorse or promote products derived from
   556  this software without specific prior written permission.
   557  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   558  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   559  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   560  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   561  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   562  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   563  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   564  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   565  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   566  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   567  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   568  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   569  ***********************************************************************/
   570  
   571  /* Flag to enable support for input/output sampling rates above 48 kHz. Turn off for embedded devices */
   572  
   573  type _SKP_Silk_resampler_state_struct = struct {
   574  	FsIIR               [6]int32
   575  	FsFIR               [16]int32
   576  	FsDown2             [2]int32
   577  	Fresampler_function uintptr
   578  	Fup2_function       uintptr
   579  	FbatchSize          int32
   580  	FinvRatio_Q16       int32
   581  	FFIR_Fracs          int32
   582  	Finput2x            int32
   583  	FCoefs              uintptr
   584  	FsDownPre           [2]int32
   585  	FsUpPost            [2]int32
   586  	Fdown_pre_function  uintptr
   587  	Fup_post_function   uintptr
   588  	FbatchSizePrePost   int32
   589  	Fratio_Q16          int32
   590  	FnPreDownsamplers   int32
   591  	FnPostUpsamplers    int32
   592  	Fmagic_number       int32
   593  } /* SKP_Silk_resampler_structs.h:53:9 */
   594  
   595  /***********************************************************************
   596  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   597  Redistribution and use in source and binary forms, with or without
   598  modification, (subject to the limitations in the disclaimer below)
   599  are permitted provided that the following conditions are met:
   600  - Redistributions of source code must retain the above copyright notice,
   601  this list of conditions and the following disclaimer.
   602  - Redistributions in binary form must reproduce the above copyright
   603  notice, this list of conditions and the following disclaimer in the
   604  documentation and/or other materials provided with the distribution.
   605  - Neither the name of Skype Limited, nor the names of specific
   606  contributors, may be used to endorse or promote products derived from
   607  this software without specific prior written permission.
   608  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   609  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   610  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   611  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   612  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   613  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   614  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   615  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   616  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   617  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   618  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   619  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   620  ***********************************************************************/
   621  
   622  /*																		*
   623   * File Name:	SKP_Silk_resampler_structs.h							*
   624   *																		*
   625   * Description: Structs for IIR/FIR resamplers							*
   626   *                                                                      *
   627   * Copyright 2010 (c), Skype Limited                                    *
   628   * All rights reserved.													*
   629   *																		*
   630   *                                                                      */
   631  
   632  /***********************************************************************
   633  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   634  Redistribution and use in source and binary forms, with or without
   635  modification, (subject to the limitations in the disclaimer below)
   636  are permitted provided that the following conditions are met:
   637  - Redistributions of source code must retain the above copyright notice,
   638  this list of conditions and the following disclaimer.
   639  - Redistributions in binary form must reproduce the above copyright
   640  notice, this list of conditions and the following disclaimer in the
   641  documentation and/or other materials provided with the distribution.
   642  - Neither the name of Skype Limited, nor the names of specific
   643  contributors, may be used to endorse or promote products derived from
   644  this software without specific prior written permission.
   645  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   646  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   647  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   648  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   649  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   650  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   651  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   652  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   653  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   654  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   655  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   656  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   657  ***********************************************************************/
   658  
   659  /* Flag to enable support for input/output sampling rates above 48 kHz. Turn off for embedded devices */
   660  
   661  type SKP_Silk_resampler_state_struct = _SKP_Silk_resampler_state_struct /* SKP_Silk_resampler_structs.h:75:3 */
   662  
   663  /***********************************************************************
   664  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   665  Redistribution and use in source and binary forms, with or without
   666  modification, (subject to the limitations in the disclaimer below)
   667  are permitted provided that the following conditions are met:
   668  - Redistributions of source code must retain the above copyright notice,
   669  this list of conditions and the following disclaimer.
   670  - Redistributions in binary form must reproduce the above copyright
   671  notice, this list of conditions and the following disclaimer in the
   672  documentation and/or other materials provided with the distribution.
   673  - Neither the name of Skype Limited, nor the names of specific
   674  contributors, may be used to endorse or promote products derived from
   675  this software without specific prior written permission.
   676  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   677  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   678  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   679  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   680  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   681  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   682  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   683  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   684  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   685  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   686  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   687  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   688  ***********************************************************************/
   689  
   690  /***********************************************************************
   691  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   692  Redistribution and use in source and binary forms, with or without
   693  modification, (subject to the limitations in the disclaimer below)
   694  are permitted provided that the following conditions are met:
   695  - Redistributions of source code must retain the above copyright notice,
   696  this list of conditions and the following disclaimer.
   697  - Redistributions in binary form must reproduce the above copyright
   698  notice, this list of conditions and the following disclaimer in the
   699  documentation and/or other materials provided with the distribution.
   700  - Neither the name of Skype Limited, nor the names of specific
   701  contributors, may be used to endorse or promote products derived from
   702  this software without specific prior written permission.
   703  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   704  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   705  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   706  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   707  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   708  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   709  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   710  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   711  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   712  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   713  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   714  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   715  ***********************************************************************/
   716  
   717  // This is an inline header file for general platform.
   718  
   719  // (a32 * (SKP_int32)((SKP_int16)(b32))) >> 16 output have to be 32bit int
   720  
   721  // a32 + (b32 * (SKP_int32)((SKP_int16)(c32))) >> 16 output have to be 32bit int
   722  
   723  // (a32 * (b32 >> 16)) >> 16
   724  
   725  // a32 + (b32 * (c32 >> 16)) >> 16
   726  
   727  // (SKP_int32)((SKP_int16)(a3))) * (SKP_int32)((SKP_int16)(b32)) output have to be 32bit int
   728  
   729  // a32 + (SKP_int32)((SKP_int16)(b32)) * (SKP_int32)((SKP_int16)(c32)) output have to be 32bit int
   730  
   731  // (SKP_int32)((SKP_int16)(a32)) * (b32 >> 16)
   732  
   733  // a32 + (SKP_int32)((SKP_int16)(b32)) * (c32 >> 16)
   734  
   735  // a64 + (b32 * c32)
   736  
   737  // (a32 * b32) >> 16
   738  
   739  // a32 + ((b32 * c32) >> 16)
   740  
   741  // (SKP_int32)(((SKP_int64)a32 * b32) >> 32)
   742  
   743  /* add/subtract with output saturated */
   744  
   745  func SKP_Silk_CLZ16(tls *libc.TLS, in16 int16) int32 { /* SKP_Silk_macros.h:79:22: */
   746  	var out32 int32 = 0
   747  	if int32(in16) == 0 {
   748  		return 16
   749  	}
   750  	/* test nibbles */
   751  	if (int32(in16) & 0xFF00) != 0 {
   752  		if (int32(in16) & 0xF000) != 0 {
   753  			in16 >>= 12
   754  		} else {
   755  			out32 = out32 + (4)
   756  			in16 >>= 8
   757  		}
   758  	} else {
   759  		if (int32(in16) & 0xFFF0) != 0 {
   760  			out32 = out32 + (8)
   761  			in16 >>= 4
   762  		} else {
   763  			out32 = out32 + (12)
   764  		}
   765  	}
   766  	/* test bits and return */
   767  	if (int32(in16) & 0xC) != 0 {
   768  		if (int32(in16) & 0x8) != 0 {
   769  			return (out32 + 0)
   770  		} else {
   771  			return (out32 + 1)
   772  		}
   773  	} else {
   774  		if (int32(in16) & 0xE) != 0 {
   775  			return (out32 + 2)
   776  		} else {
   777  			return (out32 + 3)
   778  		}
   779  	}
   780  	return int32(0)
   781  }
   782  
   783  func SKP_Silk_CLZ32(tls *libc.TLS, in32 int32) int32 { /* SKP_Silk_macros.h:115:22: */
   784  	/* test highest 16 bits and convert to SKP_int16 */
   785  	if (uint32(in32) & 0xFFFF0000) != 0 {
   786  		return SKP_Silk_CLZ16(tls, (int16(in32 >> 16)))
   787  	} else {
   788  		return (SKP_Silk_CLZ16(tls, int16(in32)) + 16)
   789  	}
   790  	return int32(0)
   791  }
   792  
   793  /********************************************************************/
   794  /*                                MACROS                            */
   795  /********************************************************************/
   796  
   797  /* Rotate a32 right by 'rot' bits. Negative rot values result in rotating
   798     left. Output is 32bit int.
   799     Note: contemporary compilers recognize the C expressions below and
   800     compile them into 'ror' instructions if available. No need for inline ASM! */
   801  /* PPC must use this generic implementation. */
   802  func SKP_ROR32(tls *libc.TLS, a32 int32, rot int32) int32 { /* SKP_Silk_SigProc_FIX.h:456:22: */
   803  	var x uint32 = uint32(a32)
   804  	var r uint32 = uint32(rot)
   805  	var m uint32 = uint32(-rot)
   806  	if rot <= 0 {
   807  		return (int32((x << m) | (x >> (uint32(32) - m))))
   808  	} else {
   809  		return (int32((x << (uint32(32) - r)) | (x >> r)))
   810  	}
   811  	return int32(0)
   812  }
   813  
   814  /* Allocate SKP_int16 alligned to 4-byte memory address */
   815  
   816  /* Useful Macros that can be adjusted to other platforms */
   817  /* fixed point macros */
   818  
   819  // (a32 * b32) output have to be 32bit int
   820  
   821  // (a32 * b32) output have to be 32bit uint
   822  
   823  // a32 + (b32 * c32) output have to be 32bit int
   824  
   825  /* ((a32 >> 16)  * (b32 >> 16)) output have to be 32bit int */
   826  
   827  /* a32 + ((a32 >> 16)  * (b32 >> 16)) output have to be 32bit int */
   828  
   829  // (a32 * b32)
   830  
   831  /* Adds two signed 32-bit values in a way that can overflow, while not relying on undefined behaviour
   832     (just standard two's complement implementation-specific behaviour) */
   833  /* Subtractss two signed 32-bit values in a way that can overflow, while not relying on undefined behaviour
   834     (just standard two's complement implementation-specific behaviour) */
   835  
   836  /* Multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode) */
   837  
   838  /* Add with saturation for positive input values */
   839  
   840  /* saturates before shifting */
   841  
   842  /* Requires that shift > 0 */
   843  
   844  /* Number of rightshift required to fit the multiplication */
   845  
   846  /* Macro to convert floating-point constants to fixed-point */
   847  func SKP_FIX_CONST(tls *libc.TLS, C float64, Q int32) int32 { /* SKP_Silk_SigProc_FIX.h:568:5: */
   848  	return (int32(((C) * (float64(int64(int64(1)) << (Q)))) + 0.5))
   849  }
   850  
   851  /* SKP_min() versions with typecast in the function call */
   852  func SKP_min_int(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_SigProc_FIX.h:573:20: */
   853  	return func() int32 {
   854  		if (a) < (b) {
   855  			return a
   856  		}
   857  		return b
   858  	}()
   859  }
   860  
   861  func SKP_min_32(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_SigProc_FIX.h:578:22: */
   862  	return func() int32 {
   863  		if (a) < (b) {
   864  			return a
   865  		}
   866  		return b
   867  	}()
   868  }
   869  
   870  /* SKP_min() versions with typecast in the function call */
   871  func SKP_max_int(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_SigProc_FIX.h:584:20: */
   872  	return func() int32 {
   873  		if (a) > (b) {
   874  			return a
   875  		}
   876  		return b
   877  	}()
   878  }
   879  
   880  func SKP_max_16(tls *libc.TLS, a int16, b int16) int16 { /* SKP_Silk_SigProc_FIX.h:588:22: */
   881  	return func() int16 {
   882  		if (int32(a)) > (int32(b)) {
   883  			return a
   884  		}
   885  		return b
   886  	}()
   887  }
   888  
   889  func SKP_max_32(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_SigProc_FIX.h:592:22: */
   890  	return func() int32 {
   891  		if (a) > (b) {
   892  			return a
   893  		}
   894  		return b
   895  	}()
   896  }
   897  
   898  // Static assertion.  Requires support in the compiler.
   899  
   900  /***********************************************************************
   901  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
   902  Redistribution and use in source and binary forms, with or without
   903  modification, (subject to the limitations in the disclaimer below)
   904  are permitted provided that the following conditions are met:
   905  - Redistributions of source code must retain the above copyright notice,
   906  this list of conditions and the following disclaimer.
   907  - Redistributions in binary form must reproduce the above copyright
   908  notice, this list of conditions and the following disclaimer in the
   909  documentation and/or other materials provided with the distribution.
   910  - Neither the name of Skype Limited, nor the names of specific
   911  contributors, may be used to endorse or promote products derived from
   912  this software without specific prior written permission.
   913  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
   914  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
   915  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
   916  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
   917  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
   918  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   919  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   920  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
   921  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
   922  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   923  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   924  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   925  ***********************************************************************/
   926  
   927  /* count leading zeros of SKP_int64 */
   928  func SKP_Silk_CLZ64(tls *libc.TLS, in int64_t) int32 { /* SKP_Silk_Inlines.h:44:22: */
   929  	var in_upper int32
   930  
   931  	in_upper = (int32((in) >> (32)))
   932  	if in_upper == 0 {
   933  		/* Search in the lower 32 bits */
   934  		return (32 + SKP_Silk_CLZ32(tls, int32(in)))
   935  	} else {
   936  		/* Search in the upper 32 bits */
   937  		return SKP_Silk_CLZ32(tls, in_upper)
   938  	}
   939  	return int32(0)
   940  }
   941  
   942  /* get number of leading zeros and fractional part (the bits right after the leading one */
   943  func SKP_Silk_CLZ_FRAC(tls *libc.TLS, in int32, lz uintptr, frac_Q7 uintptr) { /* SKP_Silk_Inlines.h:59:17: */
   944  	var lzeros int32 = SKP_Silk_CLZ32(tls, in)
   945  
   946  	*(*int32)(unsafe.Pointer(lz)) = lzeros
   947  	*(*int32)(unsafe.Pointer(frac_Q7)) = (SKP_ROR32(tls, in, (24-lzeros)) & 0x7f)
   948  }
   949  
   950  /* Approximation of square root                                          */
   951  /* Accuracy: < +/- 10%  for output values > 15                           */
   952  /*           < +/- 2.5% for output values > 120                          */
   953  func SKP_Silk_SQRT_APPROX(tls *libc.TLS, x int32) int32 { /* SKP_Silk_Inlines.h:72:22: */
   954  	bp := tls.Alloc(8)
   955  	defer tls.Free(8)
   956  
   957  	var y int32
   958  	// var lz int32 at bp, 4
   959  
   960  	// var frac_Q7 int32 at bp+4, 4
   961  
   962  	if x <= 0 {
   963  		return 0
   964  	}
   965  
   966  	SKP_Silk_CLZ_FRAC(tls, x, bp /* &lz */, bp+4 /* &frac_Q7 */)
   967  
   968  	if (*(*int32)(unsafe.Pointer(bp /* lz */)) & 1) != 0 {
   969  		y = 32768
   970  	} else {
   971  		y = 46214 /* 46214 = sqrt(2) * 32768 */
   972  	}
   973  
   974  	/* get scaling right */
   975  	y >>= ((*(*int32)(unsafe.Pointer(bp /* lz */))) >> (1))
   976  
   977  	/* increment using fractional part of input */
   978  	y = ((y) + ((((y) >> 16) * (int32((int16((int32(int16(213))) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */)))))))))) + ((((y) & 0x0000FFFF) * (int32((int16((int32(int16(213))) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */)))))))))) >> 16)))
   979  
   980  	return y
   981  }
   982  
   983  /* Divide two int32 values and return result as int32 in a given Q-domain */
   984  func SKP_DIV32_varQ(tls *libc.TLS, a32 int32, b32 int32, Qres int32) int32 { /* SKP_Silk_Inlines.h:125:22: */
   985  	var a_headrm int32
   986  	var b_headrm int32
   987  	var lshift int32
   988  	var b32_inv int32
   989  	var a32_nrm int32
   990  	var b32_nrm int32
   991  	var result int32
   992  
   993  	/* Compute number of bits head room and normalize inputs */
   994  	a_headrm = (SKP_Silk_CLZ32(tls, func() int32 {
   995  		if (a32) > 0 {
   996  			return a32
   997  		}
   998  		return -a32
   999  	}()) - 1)
  1000  	a32_nrm = ((a32) << (a_headrm)) /* Q: a_headrm                    */
  1001  	b_headrm = (SKP_Silk_CLZ32(tls, func() int32 {
  1002  		if (b32) > 0 {
  1003  			return b32
  1004  		}
  1005  		return -b32
  1006  	}()) - 1)
  1007  	b32_nrm = ((b32) << (b_headrm)) /* Q: b_headrm                    */
  1008  
  1009  	/* Inverse of b32, with 14 bits of precision */
  1010  	b32_inv = ((int32(0x7FFFFFFF) >> 2) / ((b32_nrm) >> (16))) /* Q: 29 + 16 - b_headrm        */
  1011  
  1012  	/* First approximation */
  1013  	result = ((((a32_nrm) >> 16) * (int32(int16(b32_inv)))) + ((((a32_nrm) & 0x0000FFFF) * (int32(int16(b32_inv)))) >> 16)) /* Q: 29 + a_headrm - b_headrm    */
  1014  
  1015  	/* Compute residual by subtracting product of denominator and first approximation */
  1016  	a32_nrm = a32_nrm - ((int32(((int64_t(b32_nrm)) * (int64_t(result))) >> (32))) << (3)) /* Q: a_headrm                    */
  1017  
  1018  	/* Refinement */
  1019  	result = ((result) + ((((a32_nrm) >> 16) * (int32(int16(b32_inv)))) + ((((a32_nrm) & 0x0000FFFF) * (int32(int16(b32_inv)))) >> 16))) /* Q: 29 + a_headrm - b_headrm    */
  1020  
  1021  	/* Convert to Qres domain */
  1022  	lshift = (((29 + a_headrm) - b_headrm) - Qres)
  1023  	if lshift <= 0 {
  1024  		return ((func() int32 {
  1025  			if (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) > (int32((0x7FFFFFFF)) >> (-lshift)) {
  1026  				return func() int32 {
  1027  					if (result) > (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) {
  1028  						return (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift))
  1029  					}
  1030  					return func() int32 {
  1031  						if (result) < (int32((0x7FFFFFFF)) >> (-lshift)) {
  1032  							return (int32((0x7FFFFFFF)) >> (-lshift))
  1033  						}
  1034  						return result
  1035  					}()
  1036  				}()
  1037  			}
  1038  			return func() int32 {
  1039  				if (result) > (int32((0x7FFFFFFF)) >> (-lshift)) {
  1040  					return (int32((0x7FFFFFFF)) >> (-lshift))
  1041  				}
  1042  				return func() int32 {
  1043  					if (result) < (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) {
  1044  						return (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift))
  1045  					}
  1046  					return result
  1047  				}()
  1048  			}()
  1049  		}()) << (-lshift))
  1050  	} else {
  1051  		if lshift < 32 {
  1052  			return ((result) >> (lshift))
  1053  		} else {
  1054  			/* Avoid undefined result */
  1055  			return 0
  1056  		}
  1057  	}
  1058  	return int32(0)
  1059  }
  1060  
  1061  /* Invert int32 value and return result as int32 in a given Q-domain */
  1062  func SKP_INVERSE32_varQ(tls *libc.TLS, b32 int32, Qres int32) int32 { /* SKP_Silk_Inlines.h:170:22: */
  1063  	var b_headrm int32
  1064  	var lshift int32
  1065  	var b32_inv int32
  1066  	var b32_nrm int32
  1067  	var err_Q32 int32
  1068  	var result int32
  1069  
  1070  	/* SKP_int32_MIN is not handled by SKP_abs */
  1071  
  1072  	/* Compute number of bits head room and normalize input */
  1073  	b_headrm = (SKP_Silk_CLZ32(tls, func() int32 {
  1074  		if (b32) > 0 {
  1075  			return b32
  1076  		}
  1077  		return -b32
  1078  	}()) - 1)
  1079  	b32_nrm = ((b32) << (b_headrm)) /* Q: b_headrm                */
  1080  
  1081  	/* Inverse of b32, with 14 bits of precision */
  1082  	b32_inv = ((int32(0x7FFFFFFF) >> 2) / ((b32_nrm) >> (16))) /* Q: 29 + 16 - b_headrm    */
  1083  
  1084  	/* First approximation */
  1085  	result = ((b32_inv) << (16)) /* Q: 61 - b_headrm            */
  1086  
  1087  	/* Compute residual by subtracting product of denominator and first approximation from one */
  1088  	err_Q32 = ((-((((b32_nrm) >> 16) * (int32(int16(b32_inv)))) + ((((b32_nrm) & 0x0000FFFF) * (int32(int16(b32_inv)))) >> 16))) << (3)) /* Q32                        */
  1089  
  1090  	/* Refinement */
  1091  	result = (((result) + ((((err_Q32) >> 16) * (int32(int16(b32_inv)))) + ((((err_Q32) & 0x0000FFFF) * (int32(int16(b32_inv)))) >> 16))) + ((err_Q32) * (func() int32 {
  1092  		if (16) == 1 {
  1093  			return (((b32_inv) >> 1) + ((b32_inv) & 1))
  1094  		}
  1095  		return ((((b32_inv) >> ((16) - 1)) + 1) >> 1)
  1096  	}()))) /* Q: 61 - b_headrm            */
  1097  
  1098  	/* Convert to Qres domain */
  1099  	lshift = ((61 - b_headrm) - Qres)
  1100  	if lshift <= 0 {
  1101  		return ((func() int32 {
  1102  			if (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) > (int32((0x7FFFFFFF)) >> (-lshift)) {
  1103  				return func() int32 {
  1104  					if (result) > (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) {
  1105  						return (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift))
  1106  					}
  1107  					return func() int32 {
  1108  						if (result) < (int32((0x7FFFFFFF)) >> (-lshift)) {
  1109  							return (int32((0x7FFFFFFF)) >> (-lshift))
  1110  						}
  1111  						return result
  1112  					}()
  1113  				}()
  1114  			}
  1115  			return func() int32 {
  1116  				if (result) > (int32((0x7FFFFFFF)) >> (-lshift)) {
  1117  					return (int32((0x7FFFFFFF)) >> (-lshift))
  1118  				}
  1119  				return func() int32 {
  1120  					if (result) < (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift)) {
  1121  						return (int32((libc.Int32FromUint32(0x80000000))) >> (-lshift))
  1122  					}
  1123  					return result
  1124  				}()
  1125  			}()
  1126  		}()) << (-lshift))
  1127  	} else {
  1128  		if lshift < 32 {
  1129  			return ((result) >> (lshift))
  1130  		} else {
  1131  			/* Avoid undefined result */
  1132  			return 0
  1133  		}
  1134  	}
  1135  	return int32(0)
  1136  }
  1137  
  1138  /* Sine approximation; an input of 65536 corresponds to 2 * pi */
  1139  /* Uses polynomial expansion of the input to the power 0, 2, 4 and 6 */
  1140  /* The relative error is below 1e-5 */
  1141  func SKP_Silk_SIN_APPROX_Q24(tls *libc.TLS, x int32) int32 { /* SKP_Silk_Inlines.h:220:22: */
  1142  	var y_Q30 int32
  1143  
  1144  	/* Keep only bottom 16 bits (the function repeats itself with period 65536) */
  1145  	x = x & (65535)
  1146  
  1147  	/* Split range in four quadrants */
  1148  	if x <= 32768 {
  1149  		if x < 16384 {
  1150  			/* Return cos(pi/2 - x) */
  1151  			x = (16384 - x)
  1152  		} else {
  1153  			/* Return cos(x - pi/2) */
  1154  			x = x - (16384)
  1155  		}
  1156  		if x < 1100 {
  1157  			/* Special case: high accuracy */
  1158  			return ((int32(1) << 24) + (((((x) * (x)) >> 16) * (int32(int16(-5053)))) + (((((x) * (x)) & 0x0000FFFF) * (int32(int16(-5053)))) >> 16)))
  1159  		}
  1160  		x = (((((x) << (8)) >> 16) * (int32(int16(x)))) + (((((x) << (8)) & 0x0000FFFF) * (int32(int16(x)))) >> 16)) /* contains x^2 in Q20 */
  1161  		y_Q30 = ((1059577) + ((((x) >> 16) * (int32(int16(-5013)))) + ((((x) & 0x0000FFFF) * (int32(int16(-5013)))) >> 16)))
  1162  		y_Q30 = (((-82778932) + ((((x) >> 16) * (int32(int16(y_Q30)))) + ((((x) & 0x0000FFFF) * (int32(int16(y_Q30)))) >> 16))) + ((x) * (func() int32 {
  1163  			if (16) == 1 {
  1164  				return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1165  			}
  1166  			return ((((y_Q30) >> ((16) - 1)) + 1) >> 1)
  1167  		}())))
  1168  		y_Q30 = ((((1073735400) + 66) + ((((x) >> 16) * (int32(int16(y_Q30)))) + ((((x) & 0x0000FFFF) * (int32(int16(y_Q30)))) >> 16))) + ((x) * (func() int32 {
  1169  			if (16) == 1 {
  1170  				return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1171  			}
  1172  			return ((((y_Q30) >> ((16) - 1)) + 1) >> 1)
  1173  		}())))
  1174  	} else {
  1175  		if x < 49152 {
  1176  			/* Return -cos(3*pi/2 - x) */
  1177  			x = (49152 - x)
  1178  		} else {
  1179  			/* Return -cos(x - 3*pi/2) */
  1180  			x = x - (49152)
  1181  		}
  1182  		if x < 1100 {
  1183  			/* Special case: high accuracy */
  1184  			return ((int32(-1) << 24) + (((((x) * (x)) >> 16) * (int32(int16(5053)))) + (((((x) * (x)) & 0x0000FFFF) * (int32(int16(5053)))) >> 16)))
  1185  		}
  1186  		x = (((((x) << (8)) >> 16) * (int32(int16(x)))) + (((((x) << (8)) & 0x0000FFFF) * (int32(int16(x)))) >> 16)) /* contains x^2 in Q20 */
  1187  		y_Q30 = ((-1059577) + ((((x) >> 16) * (int32(int16(- -5013)))) + ((((x) & 0x0000FFFF) * (int32(int16(- -5013)))) >> 16)))
  1188  		y_Q30 = (((- -82778932) + ((((x) >> 16) * (int32(int16(y_Q30)))) + ((((x) & 0x0000FFFF) * (int32(int16(y_Q30)))) >> 16))) + ((x) * (func() int32 {
  1189  			if (16) == 1 {
  1190  				return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1191  			}
  1192  			return ((((y_Q30) >> ((16) - 1)) + 1) >> 1)
  1193  		}())))
  1194  		y_Q30 = (((-1073735400) + ((((x) >> 16) * (int32(int16(y_Q30)))) + ((((x) & 0x0000FFFF) * (int32(int16(y_Q30)))) >> 16))) + ((x) * (func() int32 {
  1195  			if (16) == 1 {
  1196  				return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1197  			}
  1198  			return ((((y_Q30) >> ((16) - 1)) + 1) >> 1)
  1199  		}())))
  1200  	}
  1201  	return func() int32 {
  1202  		if (6) == 1 {
  1203  			return (((y_Q30) >> 1) + ((y_Q30) & 1))
  1204  		}
  1205  		return ((((y_Q30) >> ((6) - 1)) + 1) >> 1)
  1206  	}()
  1207  }
  1208  
  1209  /* Number of binary divisions */
  1210  
  1211  /* Flag for using 2x as many cosine sampling points, reduces the risk of missing a root */
  1212  
  1213  /* Helper function for A2NLSF(..)                    */
  1214  /* Transforms polynomials from cos(n*f) to cos(f)^n  */
  1215  func SKP_Silk_A2NLSF_trans_poly(tls *libc.TLS, p uintptr, dd int32) { /* SKP_Silk_A2NLSF.c:46:17: */
  1216  	var k int32
  1217  	var n int32
  1218  
  1219  	for k = 2; k <= dd; k++ {
  1220  		for n = dd; n > k; n-- {
  1221  			*(*int32)(unsafe.Pointer(p + uintptr((n-2))*4)) -= (*(*int32)(unsafe.Pointer(p + uintptr(n)*4)))
  1222  		}
  1223  		*(*int32)(unsafe.Pointer(p + uintptr((k-2))*4)) -= ((*(*int32)(unsafe.Pointer(p + uintptr(k)*4))) << (1))
  1224  	}
  1225  }
  1226  
  1227  /* Helper function for A2NLSF(..)                    */
  1228  /* Polynomial evaluation                             */
  1229  func SKP_Silk_A2NLSF_eval_poly(tls *libc.TLS, p uintptr, x int32, dd int32) int32 { /* SKP_Silk_A2NLSF.c:62:22: */
  1230  	var n int32
  1231  	var x_Q16 int32
  1232  	var y32 int32
  1233  
  1234  	y32 = *(*int32)(unsafe.Pointer(p + uintptr(dd)*4)) /* QPoly */
  1235  	x_Q16 = ((x) << (4))
  1236  	for n = (dd - 1); n >= 0; n-- {
  1237  		y32 = (((*(*int32)(unsafe.Pointer(p + uintptr(n)*4))) + ((((y32) >> 16) * (int32(int16(x_Q16)))) + ((((y32) & 0x0000FFFF) * (int32(int16(x_Q16)))) >> 16))) + ((y32) * (func() int32 {
  1238  			if (16) == 1 {
  1239  				return (((x_Q16) >> 1) + ((x_Q16) & 1))
  1240  			}
  1241  			return ((((x_Q16) >> ((16) - 1)) + 1) >> 1)
  1242  		}()))) /* QPoly */
  1243  	}
  1244  	return y32
  1245  }
  1246  
  1247  func SKP_Silk_A2NLSF_init(tls *libc.TLS, a_Q16 uintptr, P uintptr, Q uintptr, dd int32) { /* SKP_Silk_A2NLSF.c:79:17: */
  1248  	var k int32
  1249  
  1250  	/* Convert filter coefs to even and odd polynomials */
  1251  	*(*int32)(unsafe.Pointer(P + uintptr(dd)*4)) = (int32((1)) << (16))
  1252  	*(*int32)(unsafe.Pointer(Q + uintptr(dd)*4)) = (int32((1)) << (16))
  1253  	for k = 0; k < dd; k++ {
  1254  		*(*int32)(unsafe.Pointer(P + uintptr(k)*4)) = (-*(*int32)(unsafe.Pointer(a_Q16 + uintptr(((dd-k)-1))*4)) - *(*int32)(unsafe.Pointer(a_Q16 + uintptr((dd+k))*4))) // QPoly
  1255  		*(*int32)(unsafe.Pointer(Q + uintptr(k)*4)) = (-*(*int32)(unsafe.Pointer(a_Q16 + uintptr(((dd-k)-1))*4)) + *(*int32)(unsafe.Pointer(a_Q16 + uintptr((dd+k))*4))) // QPoly
  1256  	}
  1257  
  1258  	/* Divide out zeros as we have that for even filter orders, */
  1259  	/* z =  1 is always a root in Q, and                        */
  1260  	/* z = -1 is always a root in P                             */
  1261  	for k = dd; k > 0; k-- {
  1262  		*(*int32)(unsafe.Pointer(P + uintptr((k-1))*4)) -= (*(*int32)(unsafe.Pointer(P + uintptr(k)*4)))
  1263  		*(*int32)(unsafe.Pointer(Q + uintptr((k-1))*4)) += (*(*int32)(unsafe.Pointer(Q + uintptr(k)*4)))
  1264  	}
  1265  
  1266  	/* Transform polynomials from cos(n*f) to cos(f)^n */
  1267  	SKP_Silk_A2NLSF_trans_poly(tls, P, dd)
  1268  	SKP_Silk_A2NLSF_trans_poly(tls, Q, dd)
  1269  }
  1270  
  1271  /* Compute Normalized Line Spectral Frequencies (NLSFs) from whitening filter coefficients        */
  1272  /* If not all roots are found, the a_Q16 coefficients are bandwidth expanded until convergence.    */
  1273  func SKP_Silk_A2NLSF(tls *libc.TLS, NLSF uintptr, a_Q16 uintptr, d int32) { /* SKP_Silk_A2NLSF.c:119:6: */
  1274  	bp := tls.Alloc(80)
  1275  	defer tls.Free(80)
  1276  
  1277  	var i int32
  1278  	var k int32
  1279  	var m int32
  1280  	var dd int32
  1281  	var root_ix int32
  1282  	var ffrac int32
  1283  	var xlo int32
  1284  	var xhi int32
  1285  	var xmid int32
  1286  	var ylo int32
  1287  	var yhi int32
  1288  	var ymid int32
  1289  	var nom int32
  1290  	var den int32
  1291  	// var P [9]int32 at bp+8, 36
  1292  
  1293  	// var Q [9]int32 at bp+44, 36
  1294  
  1295  	// var PQ [2]uintptr at bp, 8
  1296  
  1297  	var p uintptr
  1298  
  1299  	/* Store pointers to array */
  1300  	*(*uintptr)(unsafe.Pointer(bp /* &PQ[0] */)) = bp + 8        /* &P[0] */
  1301  	*(*uintptr)(unsafe.Pointer(bp /* &PQ[0] */ + 1*4)) = bp + 44 /* &Q[0] */
  1302  
  1303  	dd = ((d) >> (1))
  1304  
  1305  	SKP_Silk_A2NLSF_init(tls, a_Q16, bp+8 /* &P[0] */, bp+44 /* &Q[0] */, dd)
  1306  
  1307  	/* Find roots, alternating between P and Q */
  1308  	p = bp + 8 /* &P[0] */ /* Pointer to polynomial */
  1309  
  1310  	xlo = SKP_Silk_LSFCosTab_FIX_Q12[0] // Q12
  1311  	ylo = SKP_Silk_A2NLSF_eval_poly(tls, p, xlo, dd)
  1312  
  1313  	if ylo < 0 {
  1314  		/* Set the first NLSF to zero and move on to the next */
  1315  		*(*int32)(unsafe.Pointer(NLSF)) = 0
  1316  		p = bp + 44 /* &Q[0] */ /* Pointer to polynomial */
  1317  		ylo = SKP_Silk_A2NLSF_eval_poly(tls, p, xlo, dd)
  1318  		root_ix = 1 /* Index of current root */
  1319  	} else {
  1320  		root_ix = 0 /* Index of current root */
  1321  	}
  1322  	k = 1 /* Loop counter */
  1323  	i = 0 /* Counter for bandwidth expansions applied */
  1324  	for 1 != 0 {
  1325  		/* Evaluate polynomial */
  1326  		xhi = SKP_Silk_LSFCosTab_FIX_Q12[k] /* Q12 */
  1327  		yhi = SKP_Silk_A2NLSF_eval_poly(tls, p, xhi, dd)
  1328  
  1329  		/* Detect zero crossing */
  1330  		if ((ylo <= 0) && (yhi >= 0)) || ((ylo >= 0) && (yhi <= 0)) {
  1331  			/* Binary division */
  1332  			ffrac = -256
  1333  			for m = 0; m < 3; m++ {
  1334  				/* Evaluate polynomial */
  1335  				xmid = func() int32 {
  1336  					if (1) == 1 {
  1337  						return (((xlo + xhi) >> 1) + ((xlo + xhi) & 1))
  1338  					}
  1339  					return ((((xlo + xhi) >> ((1) - 1)) + 1) >> 1)
  1340  				}()
  1341  				ymid = SKP_Silk_A2NLSF_eval_poly(tls, p, xmid, dd)
  1342  
  1343  				/* Detect zero crossing */
  1344  				if ((ylo <= 0) && (ymid >= 0)) || ((ylo >= 0) && (ymid <= 0)) {
  1345  					/* Reduce frequency */
  1346  					xhi = xmid
  1347  					yhi = ymid
  1348  				} else {
  1349  					/* Increase frequency */
  1350  					xlo = xmid
  1351  					ylo = ymid
  1352  					ffrac = ((ffrac) + (int32((128)) >> (m)))
  1353  				}
  1354  			}
  1355  
  1356  			/* Interpolate */
  1357  			if (func() int32 {
  1358  				if (ylo) > 0 {
  1359  					return ylo
  1360  				}
  1361  				return -ylo
  1362  			}()) < 65536 {
  1363  				/* Avoid dividing by zero */
  1364  				den = (ylo - yhi)
  1365  				nom = (((ylo) << (8 - 3)) + ((den) >> (1)))
  1366  				if den != 0 {
  1367  					ffrac = ffrac + ((nom) / (den))
  1368  				}
  1369  			} else {
  1370  				/* No risk of dividing by zero because abs(ylo - yhi) >= abs(ylo) >= 65536 */
  1371  				ffrac = ffrac + ((ylo) / ((ylo - yhi) >> (8 - 3)))
  1372  			}
  1373  			*(*int32)(unsafe.Pointer(NLSF + uintptr(root_ix)*4)) = SKP_min_32(tls, (((k) << (8)) + ffrac), 0x7FFF)
  1374  
  1375  			root_ix++ /* Next root */
  1376  			if root_ix >= d {
  1377  				/* Found all roots */
  1378  				break
  1379  			}
  1380  			/* Alternate pointer to polynomial */
  1381  			p = *(*uintptr)(unsafe.Pointer(bp /* &PQ[0] */ + uintptr((root_ix&1))*4))
  1382  
  1383  			/* Evaluate polynomial */
  1384  			xlo = SKP_Silk_LSFCosTab_FIX_Q12[(k - 1)] // Q12
  1385  			ylo = ((1 - (root_ix & 2)) << (12))
  1386  		} else {
  1387  			/* Increment loop counter */
  1388  			k++
  1389  			xlo = xhi
  1390  			ylo = yhi
  1391  
  1392  			if k > 128 {
  1393  				i++
  1394  				if i > 30 {
  1395  					/* Set NLSFs to white spectrum and exit */
  1396  					*(*int32)(unsafe.Pointer(NLSF)) = ((int32(1) << 15) / (d + 1))
  1397  					for k = 1; k < d; k++ {
  1398  						*(*int32)(unsafe.Pointer(NLSF + uintptr(k)*4)) = ((int32((int16(k + 1)))) * (int32(int16(*(*int32)(unsafe.Pointer(NLSF))))))
  1399  					}
  1400  					return
  1401  				}
  1402  
  1403  				/* Error: Apply progressively more bandwidth expansion and run again */
  1404  				SKP_Silk_bwexpander_32(tls, a_Q16, d, (65536 - ((int32((int16(10 + i)))) * (int32(int16(i)))))) // 10_Q16 = 0.00015
  1405  
  1406  				SKP_Silk_A2NLSF_init(tls, a_Q16, bp+8 /* &P[0] */, bp+44 /* &Q[0] */, dd)
  1407  				p = bp + 8                          /* &P[0] */ /* Pointer to polynomial */
  1408  				xlo = SKP_Silk_LSFCosTab_FIX_Q12[0] // Q12
  1409  				ylo = SKP_Silk_A2NLSF_eval_poly(tls, p, xlo, dd)
  1410  				if ylo < 0 {
  1411  					/* Set the first NLSF to zero and move on to the next */
  1412  					*(*int32)(unsafe.Pointer(NLSF)) = 0
  1413  					p = bp + 44 /* &Q[0] */ /* Pointer to polynomial */
  1414  					ylo = SKP_Silk_A2NLSF_eval_poly(tls, p, xlo, dd)
  1415  					root_ix = 1 /* Index of current root */
  1416  				} else {
  1417  					root_ix = 0 /* Index of current root */
  1418  				}
  1419  				k = 1 /* Reset loop counter */
  1420  			}
  1421  		}
  1422  	}
  1423  }
  1424  
  1425  /* Coefficients for 2-band filter bank based on first-order allpass filters */
  1426  // old
  1427  var A_fb1_20 = [1]int16{(int16(int32(5394) << 1))}                /* SKP_Silk_ana_filt_bank_1.c:40:18 */
  1428  var A_fb1_21 = [1]int16{(libc.Int16FromInt32(int32(20623) << 1))} /* SKP_Silk_ana_filt_bank_1.c:41:18 */
  1429  
  1430  /* wrap-around to negative number is intentional */
  1431  
  1432  /* Split signal into two decimated bands using first-order allpass filters */
  1433  func SKP_Silk_ana_filt_bank_1(tls *libc.TLS, in uintptr, S uintptr, outL uintptr, outH uintptr, scratch uintptr, N int32) { /* SKP_Silk_ana_filt_bank_1.c:44:6: */
  1434  	var k int32
  1435  	var N2 int32 = ((N) >> (1))
  1436  	var in32 int32
  1437  	var X int32
  1438  	var Y int32
  1439  	var out_1 int32
  1440  	var out_2 int32
  1441  
  1442  	/* Internal variables and state are in Q10 format */
  1443  	for k = 0; k < N2; k++ {
  1444  		/* Convert to Q10 */
  1445  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr((2*k))*2)))) << (10))
  1446  
  1447  		/* All-pass section for even input sample */
  1448  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
  1449  		X = ((Y) + ((((Y) >> 16) * (int32(A_fb1_21[0]))) + ((((Y) & 0x0000FFFF) * (int32(A_fb1_21[0]))) >> 16)))
  1450  		out_1 = ((*(*int32)(unsafe.Pointer(S))) + (X))
  1451  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
  1452  
  1453  		/* Convert to Q10 */
  1454  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(((2*k)+1))*2)))) << (10))
  1455  
  1456  		/* All-pass section for odd input sample */
  1457  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
  1458  		X = ((((Y) >> 16) * (int32(A_fb1_20[0]))) + ((((Y) & 0x0000FFFF) * (int32(A_fb1_20[0]))) >> 16))
  1459  		out_2 = ((*(*int32)(unsafe.Pointer(S + 1*4))) + (X))
  1460  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
  1461  
  1462  		/* Add/subtract, convert back to int16 and store to output */
  1463  		*(*int16)(unsafe.Pointer(outL + uintptr(k)*2)) = func() int16 {
  1464  			if (func() int32 {
  1465  				if (11) == 1 {
  1466  					return ((((out_2) + (out_1)) >> 1) + (((out_2) + (out_1)) & 1))
  1467  				}
  1468  				return (((((out_2) + (out_1)) >> ((11) - 1)) + 1) >> 1)
  1469  			}()) > 0x7FFF {
  1470  				return int16(0x7FFF)
  1471  			}
  1472  			return func() int16 {
  1473  				if (func() int32 {
  1474  					if (11) == 1 {
  1475  						return ((((out_2) + (out_1)) >> 1) + (((out_2) + (out_1)) & 1))
  1476  					}
  1477  					return (((((out_2) + (out_1)) >> ((11) - 1)) + 1) >> 1)
  1478  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
  1479  					return libc.Int16FromInt32(0x8000)
  1480  				}
  1481  				return func() int16 {
  1482  					if (11) == 1 {
  1483  						return (int16((((out_2) + (out_1)) >> 1) + (((out_2) + (out_1)) & 1)))
  1484  					}
  1485  					return (int16(((((out_2) + (out_1)) >> ((11) - 1)) + 1) >> 1))
  1486  				}()
  1487  			}()
  1488  		}()
  1489  		*(*int16)(unsafe.Pointer(outH + uintptr(k)*2)) = func() int16 {
  1490  			if (func() int32 {
  1491  				if (11) == 1 {
  1492  					return ((((out_2) - (out_1)) >> 1) + (((out_2) - (out_1)) & 1))
  1493  				}
  1494  				return (((((out_2) - (out_1)) >> ((11) - 1)) + 1) >> 1)
  1495  			}()) > 0x7FFF {
  1496  				return int16(0x7FFF)
  1497  			}
  1498  			return func() int16 {
  1499  				if (func() int32 {
  1500  					if (11) == 1 {
  1501  						return ((((out_2) - (out_1)) >> 1) + (((out_2) - (out_1)) & 1))
  1502  					}
  1503  					return (((((out_2) - (out_1)) >> ((11) - 1)) + 1) >> 1)
  1504  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
  1505  					return libc.Int16FromInt32(0x8000)
  1506  				}
  1507  				return func() int16 {
  1508  					if (11) == 1 {
  1509  						return (int16((((out_2) - (out_1)) >> 1) + (((out_2) - (out_1)) & 1)))
  1510  					}
  1511  					return (int16(((((out_2) - (out_1)) >> ((11) - 1)) + 1) >> 1))
  1512  				}()
  1513  			}()
  1514  		}()
  1515  	}
  1516  }
  1517  
  1518  /* Apply sine window to signal vector.                                      */
  1519  /* Window types:                                                            */
  1520  /*    1 -> sine window from 0 to pi/2                                       */
  1521  /*    2 -> sine window from pi/2 to pi                                      */
  1522  /* Every other sample is linearly interpolated, for speed.                  */
  1523  /* Window length must be between 16 and 120 (incl) and a multiple of 4.     */
  1524  
  1525  /* Matlab code for table:
  1526     for k=16:9*4:16+2*9*4, fprintf(' %7.d,', -round(65536*pi ./ (k:4:k+8*4))); fprintf('\n'); end
  1527  */
  1528  var freq_table_Q16 = [27]int16{
  1529  	int16(12111), int16(9804), int16(8235), int16(7100), int16(6239), int16(5565), int16(5022), int16(4575), int16(4202),
  1530  	int16(3885), int16(3612), int16(3375), int16(3167), int16(2984), int16(2820), int16(2674), int16(2542), int16(2422),
  1531  	int16(2313), int16(2214), int16(2123), int16(2038), int16(1961), int16(1889), int16(1822), int16(1760), int16(1702),
  1532  } /* SKP_Silk_apply_sine_window.c:40:18 */
  1533  
  1534  func SKP_Silk_apply_sine_window(tls *libc.TLS, px_win uintptr, px uintptr, win_type int32, length int32) { /* SKP_Silk_apply_sine_window.c:47:6: */
  1535  	var k int32
  1536  	var f_Q16 int32
  1537  	var c_Q16 int32
  1538  	var S0_Q16 int32
  1539  	var S1_Q16 int32
  1540  
  1541  	/* Length must be in a range from 16 to 120 and a multiple of 4 */
  1542  
  1543  	/* Input pointer must be 4-byte aligned */
  1544  
  1545  	/* Frequency */
  1546  	k = ((length >> 2) - 4)
  1547  
  1548  	f_Q16 = int32(freq_table_Q16[k])
  1549  
  1550  	/* Factor used for cosine approximation */
  1551  	c_Q16 = ((((f_Q16) >> 16) * (int32(int16(-f_Q16)))) + ((((f_Q16) & 0x0000FFFF) * (int32(int16(-f_Q16)))) >> 16))
  1552  
  1553  	/* initialize state */
  1554  	if win_type == 1 {
  1555  		/* start from 0 */
  1556  		S0_Q16 = 0
  1557  		/* approximation of sin(f) */
  1558  		S1_Q16 = (f_Q16 + ((length) >> (3)))
  1559  	} else {
  1560  		/* start from 1 */
  1561  		S0_Q16 = (int32(1) << 16)
  1562  		/* approximation of cos(f) */
  1563  		S1_Q16 = (((int32(1) << 16) + ((c_Q16) >> (1))) + ((length) >> (4)))
  1564  	}
  1565  
  1566  	/* Uses the recursive equation:   sin(n*f) = 2 * cos(f) * sin((n-1)*f) - sin((n-2)*f)    */
  1567  	/* 4 samples at a time */
  1568  	for k = 0; k < length; k = k + (4) {
  1569  		*(*int16)(unsafe.Pointer(px_win + uintptr(k)*2)) = (int16(((((S0_Q16 + S1_Q16) >> (1)) >> 16) * (int32(*(*int16)(unsafe.Pointer(px + uintptr(k)*2))))) + (((((S0_Q16 + S1_Q16) >> (1)) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(px + uintptr(k)*2))))) >> 16)))
  1570  		*(*int16)(unsafe.Pointer(px_win + uintptr((k+1))*2)) = (int16((((S1_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+1))*2))))) + ((((S1_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+1))*2))))) >> 16)))
  1571  		S0_Q16 = (((((((S1_Q16) >> 16) * (int32(int16(c_Q16)))) + ((((S1_Q16) & 0x0000FFFF) * (int32(int16(c_Q16)))) >> 16)) + ((S1_Q16) << (1))) - S0_Q16) + 1)
  1572  		S0_Q16 = func() int32 {
  1573  			if (S0_Q16) < (int32(1) << 16) {
  1574  				return S0_Q16
  1575  			}
  1576  			return (int32(1) << 16)
  1577  		}()
  1578  
  1579  		*(*int16)(unsafe.Pointer(px_win + uintptr((k+2))*2)) = (int16(((((S0_Q16 + S1_Q16) >> (1)) >> 16) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+2))*2))))) + (((((S0_Q16 + S1_Q16) >> (1)) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+2))*2))))) >> 16)))
  1580  		*(*int16)(unsafe.Pointer(px_win + uintptr((k+3))*2)) = (int16((((S0_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+3))*2))))) + ((((S0_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(px + uintptr((k+3))*2))))) >> 16)))
  1581  		S1_Q16 = ((((((S0_Q16) >> 16) * (int32(int16(c_Q16)))) + ((((S0_Q16) & 0x0000FFFF) * (int32(int16(c_Q16)))) >> 16)) + ((S0_Q16) << (1))) - S1_Q16)
  1582  		S1_Q16 = func() int32 {
  1583  			if (S1_Q16) < (int32(1) << 16) {
  1584  				return S1_Q16
  1585  			}
  1586  			return (int32(1) << 16)
  1587  		}()
  1588  	}
  1589  }
  1590  
  1591  /* Function that returns the maximum absolut value of the input vector */
  1592  func SKP_Silk_int16_array_maxabs(tls *libc.TLS, vec uintptr, len int32) int16 { /* SKP_Silk_array_maxabs.c:40:11: */
  1593  	var max int32 = 0
  1594  	var i int32
  1595  	var lvl int32 = 0
  1596  	var ind int32
  1597  	if len == 0 {
  1598  		return int16(0)
  1599  	}
  1600  
  1601  	ind = (len - 1)
  1602  	max = ((int32(*(*int16)(unsafe.Pointer(vec + uintptr(ind)*2)))) * (int32(*(*int16)(unsafe.Pointer(vec + uintptr(ind)*2)))))
  1603  	for i = (len - 2); i >= 0; i-- {
  1604  		lvl = ((int32(*(*int16)(unsafe.Pointer(vec + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(vec + uintptr(i)*2)))))
  1605  		if lvl > max {
  1606  			max = lvl
  1607  			ind = i
  1608  		}
  1609  	}
  1610  
  1611  	/* Do not return 32768, as it will not fit in an int16 so may lead to problems later on */
  1612  	if max >= 1073676289 { // (2^15-1)^2 = 1073676289
  1613  		return int16(0x7FFF)
  1614  	} else {
  1615  		if int32(*(*int16)(unsafe.Pointer(vec + uintptr(ind)*2))) < 0 {
  1616  			return int16(-int32(*(*int16)(unsafe.Pointer(vec + uintptr(ind)*2))))
  1617  		} else {
  1618  			return *(*int16)(unsafe.Pointer(vec + uintptr(ind)*2))
  1619  		}
  1620  	}
  1621  	return int16(0)
  1622  }
  1623  
  1624  /* Compute autocorrelation */
  1625  func SKP_Silk_autocorr(tls *libc.TLS, results uintptr, scale uintptr, inputData uintptr, inputDataSize int32, correlationCount int32) { /* SKP_Silk_autocorr.c:40:6: */
  1626  	var i int32
  1627  	var lz int32
  1628  	var nRightShifts int32
  1629  	var corrCount int32
  1630  	var corr64 int64_t
  1631  
  1632  	corrCount = SKP_min_int(tls, inputDataSize, correlationCount)
  1633  
  1634  	/* compute energy (zero-lag correlation) */
  1635  	corr64 = SKP_Silk_inner_prod16_aligned_64(tls, inputData, inputData, inputDataSize)
  1636  
  1637  	/* deal with all-zero input data */
  1638  	corr64 = corr64 + (int64(1))
  1639  
  1640  	/* number of leading zeros */
  1641  	lz = SKP_Silk_CLZ64(tls, corr64)
  1642  
  1643  	/* scaling: number of right shifts applied to correlations */
  1644  	nRightShifts = (35 - lz)
  1645  	*(*int32)(unsafe.Pointer(scale)) = nRightShifts
  1646  
  1647  	if nRightShifts <= 0 {
  1648  		*(*int32)(unsafe.Pointer(results)) = ((int32(corr64)) << (-nRightShifts))
  1649  
  1650  		/* compute remaining correlations based on int32 inner product */
  1651  		for i = 1; i < corrCount; i++ {
  1652  			*(*int32)(unsafe.Pointer(results + uintptr(i)*4)) = ((SKP_Silk_inner_prod_aligned(tls, inputData, (inputData + uintptr(i)*2), (inputDataSize - i))) << (-nRightShifts))
  1653  		}
  1654  	} else {
  1655  		*(*int32)(unsafe.Pointer(results)) = (int32((corr64) >> (nRightShifts)))
  1656  
  1657  		/* compute remaining correlations based on int64 inner product */
  1658  		for i = 1; i < corrCount; i++ {
  1659  			*(*int32)(unsafe.Pointer(results + uintptr(i)*4)) = (int32((SKP_Silk_inner_prod16_aligned_64(tls, inputData, (inputData + uintptr(i)*2), (inputDataSize - i))) >> (nRightShifts)))
  1660  		}
  1661  	}
  1662  }
  1663  
  1664  /* Second order ARMA filter */
  1665  /* Can handle slowly varying filter coefficients */
  1666  func SKP_Silk_biquad(tls *libc.TLS, in uintptr, B uintptr, A uintptr, S uintptr, out uintptr, len int32) { /* SKP_Silk_biquad.c:41:6: */
  1667  	var k int32
  1668  	var in16 int32
  1669  	var A0_neg int32
  1670  	var A1_neg int32
  1671  	var S0 int32
  1672  	var S1 int32
  1673  	var out32 int32
  1674  	var tmp32 int32
  1675  
  1676  	S0 = *(*int32)(unsafe.Pointer(S))
  1677  	S1 = *(*int32)(unsafe.Pointer(S + 1*4))
  1678  	A0_neg = -int32(*(*int16)(unsafe.Pointer(A)))
  1679  	A1_neg = -int32(*(*int16)(unsafe.Pointer(A + 1*2)))
  1680  	for k = 0; k < len; k++ {
  1681  		/* S[ 0 ], S[ 1 ]: Q13 */
  1682  		in16 = int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))
  1683  		out32 = ((S0) + ((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B))))))
  1684  
  1685  		S0 = ((S1) + ((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B + 1*2))))))
  1686  		S0 = S0 + (((((out32) >> 16) * (int32(int16(A0_neg)))) + ((((out32) & 0x0000FFFF) * (int32(int16(A0_neg)))) >> 16)) << (3))
  1687  
  1688  		S1 = (((((out32) >> 16) * (int32(int16(A1_neg)))) + ((((out32) & 0x0000FFFF) * (int32(int16(A1_neg)))) >> 16)) << (3))
  1689  		S1 = ((S1) + ((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B + 2*2))))))
  1690  		tmp32 = ((func() int32 {
  1691  			if (13) == 1 {
  1692  				return (((out32) >> 1) + ((out32) & 1))
  1693  			}
  1694  			return ((((out32) >> ((13) - 1)) + 1) >> 1)
  1695  		}()) + 1)
  1696  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  1697  			if (tmp32) > 0x7FFF {
  1698  				return int16(0x7FFF)
  1699  			}
  1700  			return func() int16 {
  1701  				if (tmp32) < (int32(libc.Int16FromInt32(0x8000))) {
  1702  					return libc.Int16FromInt32(0x8000)
  1703  				}
  1704  				return int16(tmp32)
  1705  			}()
  1706  		}()
  1707  	}
  1708  	*(*int32)(unsafe.Pointer(S)) = S0
  1709  	*(*int32)(unsafe.Pointer(S + 1*4)) = S1
  1710  }
  1711  
  1712  /* Second order ARMA filter, alternative implementation */
  1713  func SKP_Silk_biquad_alt(tls *libc.TLS, in uintptr, B_Q28 uintptr, A_Q28 uintptr, S uintptr, out uintptr, len int32) { /* SKP_Silk_biquad_alt.c:38:6: */
  1714  	/* DIRECT FORM II TRANSPOSED (uses 2 element state vector) */
  1715  	var k int32
  1716  	var inval int32
  1717  	var A0_U_Q28 int32
  1718  	var A0_L_Q28 int32
  1719  	var A1_U_Q28 int32
  1720  	var A1_L_Q28 int32
  1721  	var out32_Q14 int32
  1722  
  1723  	/* Negate A_Q28 values and split in two parts */
  1724  	A0_L_Q28 = ((-*(*int32)(unsafe.Pointer(A_Q28))) & 0x00003FFF)       /* lower part */
  1725  	A0_U_Q28 = ((-*(*int32)(unsafe.Pointer(A_Q28))) >> (14))            /* upper part */
  1726  	A1_L_Q28 = ((-*(*int32)(unsafe.Pointer(A_Q28 + 1*4))) & 0x00003FFF) /* lower part */
  1727  	A1_U_Q28 = ((-*(*int32)(unsafe.Pointer(A_Q28 + 1*4))) >> (14))      /* upper part */
  1728  
  1729  	for k = 0; k < len; k++ {
  1730  		/* S[ 0 ], S[ 1 ]: Q12 */
  1731  		inval = int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))
  1732  		out32_Q14 = (((*(*int32)(unsafe.Pointer(S))) + ((((*(*int32)(unsafe.Pointer(B_Q28))) >> 16) * (int32(int16(inval)))) + ((((*(*int32)(unsafe.Pointer(B_Q28))) & 0x0000FFFF) * (int32(int16(inval)))) >> 16))) << (2))
  1733  
  1734  		*(*int32)(unsafe.Pointer(S)) = (*(*int32)(unsafe.Pointer(S + 1*4)) + (func() int32 {
  1735  			if (14) == 1 {
  1736  				return ((((((out32_Q14) >> 16) * (int32(int16(A0_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A0_L_Q28)))) >> 16)) >> 1) + (((((out32_Q14) >> 16) * (int32(int16(A0_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A0_L_Q28)))) >> 16)) & 1))
  1737  			}
  1738  			return (((((((out32_Q14) >> 16) * (int32(int16(A0_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A0_L_Q28)))) >> 16)) >> ((14) - 1)) + 1) >> 1)
  1739  		}()))
  1740  		*(*int32)(unsafe.Pointer(S)) = ((*(*int32)(unsafe.Pointer(S))) + ((((out32_Q14) >> 16) * (int32(int16(A0_U_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A0_U_Q28)))) >> 16)))
  1741  		*(*int32)(unsafe.Pointer(S)) = ((*(*int32)(unsafe.Pointer(S))) + ((((*(*int32)(unsafe.Pointer(B_Q28 + 1*4))) >> 16) * (int32(int16(inval)))) + ((((*(*int32)(unsafe.Pointer(B_Q28 + 1*4))) & 0x0000FFFF) * (int32(int16(inval)))) >> 16)))
  1742  
  1743  		*(*int32)(unsafe.Pointer(S + 1*4)) = func() int32 {
  1744  			if (14) == 1 {
  1745  				return ((((((out32_Q14) >> 16) * (int32(int16(A1_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A1_L_Q28)))) >> 16)) >> 1) + (((((out32_Q14) >> 16) * (int32(int16(A1_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A1_L_Q28)))) >> 16)) & 1))
  1746  			}
  1747  			return (((((((out32_Q14) >> 16) * (int32(int16(A1_L_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A1_L_Q28)))) >> 16)) >> ((14) - 1)) + 1) >> 1)
  1748  		}()
  1749  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((*(*int32)(unsafe.Pointer(S + 1*4))) + ((((out32_Q14) >> 16) * (int32(int16(A1_U_Q28)))) + ((((out32_Q14) & 0x0000FFFF) * (int32(int16(A1_U_Q28)))) >> 16)))
  1750  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((*(*int32)(unsafe.Pointer(S + 1*4))) + ((((*(*int32)(unsafe.Pointer(B_Q28 + 2*4))) >> 16) * (int32(int16(inval)))) + ((((*(*int32)(unsafe.Pointer(B_Q28 + 2*4))) & 0x0000FFFF) * (int32(int16(inval)))) >> 16)))
  1751  
  1752  		/* Scale back to Q0 and saturate */
  1753  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  1754  			if (((out32_Q14 + (int32(1) << 14)) - 1) >> (14)) > 0x7FFF {
  1755  				return int16(0x7FFF)
  1756  			}
  1757  			return func() int16 {
  1758  				if (((out32_Q14 + (int32(1) << 14)) - 1) >> (14)) < (int32(libc.Int16FromInt32(0x8000))) {
  1759  					return libc.Int16FromInt32(0x8000)
  1760  				}
  1761  				return (int16(((out32_Q14 + (int32(1) << 14)) - 1) >> (14)))
  1762  			}()
  1763  		}()
  1764  	}
  1765  }
  1766  
  1767  /* Compute reflection coefficients from input signal */
  1768  func SKP_Silk_burg_modified(tls *libc.TLS, res_nrg uintptr, res_nrg_Q uintptr, A_Q16 uintptr, x uintptr, subfr_length int32, nb_subfr int32, WhiteNoiseFrac_Q32 int32, D int32) { /* SKP_Silk_burg_modified.c:49:6: */
  1769  	bp := tls.Alloc(336)
  1770  	defer tls.Free(336)
  1771  
  1772  	var k int32
  1773  	var n int32
  1774  	var s int32
  1775  	var lz int32
  1776  	// var rshifts int32 at bp+4, 4
  1777  
  1778  	var rshifts_extra int32
  1779  	// var C0 int32 at bp, 4
  1780  
  1781  	var num int32
  1782  	var nrg int32
  1783  	var rc_Q31 int32
  1784  	var Atmp_QA int32
  1785  	var Atmp1 int32
  1786  	var tmp1 int32
  1787  	var tmp2 int32
  1788  	var x1 int32
  1789  	var x2 int32
  1790  	var x_ptr uintptr
  1791  	// var C_first_row [16]int32 at bp+8, 64
  1792  
  1793  	// var C_last_row [16]int32 at bp+72, 64
  1794  
  1795  	// var Af_QA [16]int32 at bp+272, 64
  1796  
  1797  	// var CAf [17]int32 at bp+204, 68
  1798  
  1799  	// var CAb [17]int32 at bp+136, 68
  1800  
  1801  	/* Compute autocorrelations, added over subframes */
  1802  	SKP_Silk_sum_sqr_shift(tls, bp /* &C0 */, bp+4 /* &rshifts */, x, (nb_subfr * subfr_length))
  1803  	if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) > (32 - 25) {
  1804  		*(*int32)(unsafe.Pointer(bp /* C0 */)) = ((*(*int32)(unsafe.Pointer(bp /* C0 */))) << (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - (32 - 25)))
  1805  
  1806  		*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) = (32 - 25)
  1807  	} else {
  1808  		lz = (SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp /* C0 */))) - 1)
  1809  		rshifts_extra = (2 - lz)
  1810  		if rshifts_extra > 0 {
  1811  			rshifts_extra = func() int32 {
  1812  				if (rshifts_extra) < ((32 - 25) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))) {
  1813  					return rshifts_extra
  1814  				}
  1815  				return ((32 - 25) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))
  1816  			}()
  1817  			*(*int32)(unsafe.Pointer(bp /* C0 */)) = ((*(*int32)(unsafe.Pointer(bp /* C0 */))) >> (rshifts_extra))
  1818  		} else {
  1819  			rshifts_extra = func() int32 {
  1820  				if (rshifts_extra) > (-16 - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))) {
  1821  					return rshifts_extra
  1822  				}
  1823  				return (-16 - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))
  1824  			}()
  1825  			*(*int32)(unsafe.Pointer(bp /* C0 */)) = ((*(*int32)(unsafe.Pointer(bp /* C0 */))) << (-rshifts_extra))
  1826  		}
  1827  		*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) += rshifts_extra
  1828  	}
  1829  	libc.Xmemset(tls, bp+8 /* &C_first_row[0] */, 0, (uint32(16) * uint32(unsafe.Sizeof(int32(0)))))
  1830  	if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) > 0 {
  1831  		for s = 0; s < nb_subfr; s++ {
  1832  			x_ptr = (x + uintptr((s*subfr_length))*2)
  1833  			for n = 1; n < (D + 1); n++ {
  1834  				*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row */ + uintptr((n-1))*4)) += (int32((SKP_Silk_inner_prod16_aligned_64(tls, x_ptr, (x_ptr + uintptr(n)*2), (subfr_length - n))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))))
  1835  			}
  1836  		}
  1837  	} else {
  1838  		for s = 0; s < nb_subfr; s++ {
  1839  			x_ptr = (x + uintptr((s*subfr_length))*2)
  1840  			for n = 1; n < (D + 1); n++ {
  1841  				*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row */ + uintptr((n-1))*4)) += ((SKP_Silk_inner_prod_aligned(tls, x_ptr, (x_ptr + uintptr(n)*2), (subfr_length - n))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))))
  1842  			}
  1843  		}
  1844  	}
  1845  	libc.Xmemcpy(tls, bp+72 /* &C_last_row[0] */, bp+8 /* &C_first_row[0] */, (uint32(16) * uint32(unsafe.Sizeof(int32(0)))))
  1846  
  1847  	/* Initialize */
  1848  	*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */)) = libc.AssignPtrInt32(bp+204 /* &CAf */, ((*(*int32)(unsafe.Pointer(bp /* C0 */)) + (int32(((int64_t(WhiteNoiseFrac_Q32)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* C0 */))))) >> (32)))) + 1)) // Q(-rshifts)
  1849  
  1850  	for n = 0; n < D; n++ {
  1851  		/* Update first row of correlation matrix (without first element) */
  1852  		/* Update last row of correlation matrix (without last element, stored in reversed order) */
  1853  		/* Update C * Af */
  1854  		/* Update C * flipud(Af) (stored in reversed order) */
  1855  		if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) > -2 {
  1856  			for s = 0; s < nb_subfr; s++ {
  1857  				x_ptr = (x + uintptr((s*subfr_length))*2)
  1858  				x1 = -((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(n)*2)))) << (16 - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))))                    // Q(16-rshifts)
  1859  				x2 = -((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)-1))*2)))) << (16 - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))) // Q(16-rshifts)
  1860  				tmp1 = ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(n)*2)))) << (25 - 16))                                                                // Q(QA-16)
  1861  				tmp2 = ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)-1))*2)))) << (25 - 16))                                             // Q(QA-16)
  1862  				for k = 0; k < n; k++ {
  1863  					*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(k)*4))) + ((((x1) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))) + ((((x1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))) >> 16)))                       // Q( -rshifts )
  1864  					*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(k)*4))) + ((((x2) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2))))) + ((((x2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2))))) >> 16))) // Q( -rshifts )
  1865  					Atmp_QA = *(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))
  1866  					tmp1 = ((tmp1) + ((((Atmp_QA) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))) + ((((Atmp_QA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))) >> 16)))                       // Q(QA-16)
  1867  					tmp2 = ((tmp2) + ((((Atmp_QA) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2))))) + ((((Atmp_QA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2))))) >> 16))) // Q(QA-16)
  1868  				}
  1869  				tmp1 = ((-tmp1) << ((32 - 25) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))) // Q(16-rshifts)
  1870  				tmp2 = ((-tmp2) << ((32 - 25) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))) // Q(16-rshifts)
  1871  				for k = 0; k <= n; k++ {
  1872  					*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4))) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2))))) >> 16)))                                       // Q( -rshift )
  1873  					*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(k)*4))) + ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2))))) >> 16))) // Q( -rshift )
  1874  				}
  1875  			}
  1876  		} else {
  1877  			for s = 0; s < nb_subfr; s++ {
  1878  				x_ptr = (x + uintptr((s*subfr_length))*2)
  1879  				x1 = -((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(n)*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))))                    // Q( -rshifts )
  1880  				x2 = -((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)))) // Q( -rshifts )
  1881  				tmp1 = ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(n)*2)))) << (17))                                                                 // Q17
  1882  				tmp2 = ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)-1))*2)))) << (17))                                              // Q17
  1883  				for k = 0; k < n; k++ {
  1884  					*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(k)*4))) + ((x1) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2))))))            // Q( -rshifts )
  1885  					*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(k)*4))) + ((x2) * (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2)))))) // Q( -rshifts )
  1886  					Atmp1 = func() int32 {
  1887  						if (25 - 17) == 1 {
  1888  							return (((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) & 1))
  1889  						}
  1890  						return ((((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) >> ((25 - 17) - 1)) + 1) >> 1)
  1891  					}() // Q17
  1892  					tmp1 = ((tmp1) + ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((n-k)-1))*2)))) * (Atmp1)))            // Q17
  1893  					tmp2 = ((tmp2) + ((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(((subfr_length-n)+k))*2)))) * (Atmp1))) // Q17
  1894  				}
  1895  				tmp1 = -tmp1 // Q17
  1896  				tmp2 = -tmp2 // Q17
  1897  				for k = 0; k <= n; k++ {
  1898  					*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4)) = (((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4))) + ((((tmp1) >> 16) * (int32((int16((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)))))) + ((((tmp1) & 0x0000FFFF) * (int32((int16((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)))))) >> 16))) + ((tmp1) * (func() int32 {
  1899  						if (16) == 1 {
  1900  							return ((((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) >> 1) + (((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) & 1))
  1901  						}
  1902  						return (((((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((n-k))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) >> ((16) - 1)) + 1) >> 1)
  1903  					}()))) // Q( -rshift )
  1904  					*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(k)*4)) = (((*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(k)*4))) + ((((tmp2) >> 16) * (int32((int16((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)))))) + ((((tmp2) & 0x0000FFFF) * (int32((int16((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)))))) >> 16))) + ((tmp2) * (func() int32 {
  1905  						if (16) == 1 {
  1906  							return ((((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) >> 1) + (((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) & 1))
  1907  						}
  1908  						return (((((int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr((((subfr_length-n)+k)-1))*2)))) << (-*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */)) - 1)) >> ((16) - 1)) + 1) >> 1)
  1909  					}()))) // Q( -rshift )
  1910  				}
  1911  			}
  1912  		}
  1913  
  1914  		/* Calculate nominator and denominator for the next order reflection (parcor) coefficient */
  1915  		tmp1 = *(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(n)*4))                                     // Q( -rshifts )
  1916  		tmp2 = *(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(n)*4))                                     // Q( -rshifts )
  1917  		num = 0                                                                                                           // Q( -rshifts )
  1918  		nrg = ((*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */))) + (*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */)))) // Q( 1-rshifts )
  1919  		for k = 0; k < n; k++ {
  1920  			Atmp_QA = *(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))
  1921  			lz = (SKP_Silk_CLZ32(tls, func() int32 {
  1922  				if (Atmp_QA) > 0 {
  1923  					return Atmp_QA
  1924  				}
  1925  				return -Atmp_QA
  1926  			}()) - 1)
  1927  			lz = func() int32 {
  1928  				if (32 - 25) < (lz) {
  1929  					return (32 - 25)
  1930  				}
  1931  				return lz
  1932  			}()
  1933  			Atmp1 = ((Atmp_QA) << (lz)) // Q( QA + lz )
  1934  
  1935  			tmp1 = ((tmp1) + ((int32(((int64_t(*(*int32)(unsafe.Pointer(bp + 72 /* &C_last_row[0] */ + uintptr(((n-k)-1))*4)))) * (int64_t(Atmp1))) >> (32))) << ((32 - 25) - lz)))                                                                // Q( -rshifts )
  1936  			tmp2 = ((tmp2) + ((int32(((int64_t(*(*int32)(unsafe.Pointer(bp + 8 /* &C_first_row[0] */ + uintptr(((n-k)-1))*4)))) * (int64_t(Atmp1))) >> (32))) << ((32 - 25) - lz)))                                                                // Q( -rshifts )
  1937  			num = ((num) + ((int32(((int64_t(*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr((n-k))*4)))) * (int64_t(Atmp1))) >> (32))) << ((32 - 25) - lz)))                                                                            // Q( -rshifts )
  1938  			nrg = ((nrg) + ((int32(((int64_t((*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr((k+1))*4))) + (*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((k+1))*4))))) * (int64_t(Atmp1))) >> (32))) << ((32 - 25) - lz))) // Q( 1-rshifts )
  1939  		}
  1940  		*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((n+1))*4)) = tmp1 // Q( -rshifts )
  1941  		*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr((n+1))*4)) = tmp2 // Q( -rshifts )
  1942  		num = ((num) + (tmp2))                                                      // Q( -rshifts )
  1943  		num = ((-num) << (1))                                                       // Q( 1-rshifts )
  1944  
  1945  		/* Calculate the next order reflection (parcor) coefficient */
  1946  		if (func() int32 {
  1947  			if (num) > 0 {
  1948  				return num
  1949  			}
  1950  			return -num
  1951  		}()) < nrg {
  1952  			rc_Q31 = SKP_DIV32_varQ(tls, num, nrg, 31)
  1953  		} else {
  1954  			/* Negative energy or ratio too high; set remaining coefficients to zero and exit loop */
  1955  			libc.Xmemset(tls, (bp + 272 /* &Af_QA */ + uintptr(n)*4), 0, ((uint32(D - n)) * uint32(unsafe.Sizeof(int32(0)))))
  1956  
  1957  			break
  1958  		}
  1959  
  1960  		/* Update the AR coefficients */
  1961  		for k = 0; k < ((n + 1) >> 1); k++ {
  1962  			tmp1 = *(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))                                                                               // QA
  1963  			tmp2 = *(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(((n-k)-1))*4))                                                                       // QA
  1964  			*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4)) = ((tmp1) + ((int32(((int64_t(tmp2)) * (int64_t(rc_Q31))) >> (32))) << (1)))         // QA
  1965  			*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(((n-k)-1))*4)) = ((tmp2) + ((int32(((int64_t(tmp1)) * (int64_t(rc_Q31))) >> (32))) << (1))) // QA
  1966  		}
  1967  		*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(n)*4)) = ((rc_Q31) >> (31 - 25)) // QA
  1968  
  1969  		/* Update C * Af and C * Ab */
  1970  		for k = 0; k <= (n + 1); k++ {
  1971  			tmp1 = *(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4))                                                                               // Q( -rshifts )
  1972  			tmp2 = *(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(((n-k)+1))*4))                                                                       // Q( -rshifts )
  1973  			*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr(k)*4)) = ((tmp1) + ((int32(((int64_t(tmp2)) * (int64_t(rc_Q31))) >> (32))) << (1)))         // Q( -rshifts )
  1974  			*(*int32)(unsafe.Pointer(bp + 136 /* &CAb[0] */ + uintptr(((n-k)+1))*4)) = ((tmp2) + ((int32(((int64_t(tmp1)) * (int64_t(rc_Q31))) >> (32))) << (1))) // Q( -rshifts )
  1975  		}
  1976  	}
  1977  
  1978  	/* Return residual energy */
  1979  	nrg = *(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */)) // Q( -rshifts )
  1980  	tmp1 = (int32(1) << 16)                                 // Q16
  1981  	for k = 0; k < D; k++ {
  1982  		Atmp1 = func() int32 {
  1983  			if (25 - 16) == 1 {
  1984  				return (((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) & 1))
  1985  			}
  1986  			return ((((*(*int32)(unsafe.Pointer(bp + 272 /* &Af_QA[0] */ + uintptr(k)*4))) >> ((25 - 16) - 1)) + 1) >> 1)
  1987  		}() // Q16
  1988  		nrg = (((nrg) + ((((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((k+1))*4))) >> 16) * (int32(int16(Atmp1)))) + ((((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((k+1))*4))) & 0x0000FFFF) * (int32(int16(Atmp1)))) >> 16))) + ((*(*int32)(unsafe.Pointer(bp + 204 /* &CAf[0] */ + uintptr((k+1))*4))) * (func() int32 {
  1989  			if (16) == 1 {
  1990  				return (((Atmp1) >> 1) + ((Atmp1) & 1))
  1991  			}
  1992  			return ((((Atmp1) >> ((16) - 1)) + 1) >> 1)
  1993  		}()))) // Q( -rshifts )
  1994  		tmp1 = (((tmp1) + ((((Atmp1) >> 16) * (int32(int16(Atmp1)))) + ((((Atmp1) & 0x0000FFFF) * (int32(int16(Atmp1)))) >> 16))) + ((Atmp1) * (func() int32 {
  1995  			if (16) == 1 {
  1996  				return (((Atmp1) >> 1) + ((Atmp1) & 1))
  1997  			}
  1998  			return ((((Atmp1) >> ((16) - 1)) + 1) >> 1)
  1999  		}()))) // Q16
  2000  		*(*int32)(unsafe.Pointer(A_Q16 + uintptr(k)*4)) = -Atmp1
  2001  	}
  2002  	*(*int32)(unsafe.Pointer(res_nrg)) = (((nrg) + ((((int32(((int64_t(WhiteNoiseFrac_Q32)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* C0 */))))) >> (32))) >> 16) * (int32(int16(-tmp1)))) + ((((int32(((int64_t(WhiteNoiseFrac_Q32)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* C0 */))))) >> (32))) & 0x0000FFFF) * (int32(int16(-tmp1)))) >> 16))) + ((int32(((int64_t(WhiteNoiseFrac_Q32)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* C0 */))))) >> (32))) * (func() int32 {
  2003  		if (16) == 1 {
  2004  			return (((-tmp1) >> 1) + ((-tmp1) & 1))
  2005  		}
  2006  		return ((((-tmp1) >> ((16) - 1)) + 1) >> 1)
  2007  	}()))) // Q( -rshifts )
  2008  	*(*int32)(unsafe.Pointer(res_nrg_Q)) = -*(*int32)(unsafe.Pointer(bp + 4 /* rshifts */))
  2009  }
  2010  
  2011  /* Chirp (bandwidth expand) LP AR filter */
  2012  func SKP_Silk_bwexpander(tls *libc.TLS, ar uintptr, d int32, chirp_Q16 int32) { /* SKP_Silk_bwexpander.c:31:6: */
  2013  	var i int32
  2014  	var chirp_minus_one_Q16 int32
  2015  
  2016  	chirp_minus_one_Q16 = (chirp_Q16 - 65536)
  2017  
  2018  	/* NB: Dont use SKP_SMULWB, instead of SKP_RSHIFT_ROUND( SKP_MUL() , 16 ), below. */
  2019  	/* Bias in SKP_SMULWB can lead to unstable filters                                */
  2020  	for i = 0; i < (d - 1); i++ {
  2021  		*(*int16)(unsafe.Pointer(ar + uintptr(i)*2)) = func() int16 {
  2022  			if (16) == 1 {
  2023  				return (int16((((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr(i)*2))))) >> 1) + (((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr(i)*2))))) & 1)))
  2024  			}
  2025  			return (int16(((((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr(i)*2))))) >> ((16) - 1)) + 1) >> 1))
  2026  		}()
  2027  		chirp_Q16 = chirp_Q16 + (func() int32 {
  2028  			if (16) == 1 {
  2029  				return ((((chirp_Q16) * (chirp_minus_one_Q16)) >> 1) + (((chirp_Q16) * (chirp_minus_one_Q16)) & 1))
  2030  			}
  2031  			return (((((chirp_Q16) * (chirp_minus_one_Q16)) >> ((16) - 1)) + 1) >> 1)
  2032  		}())
  2033  	}
  2034  	*(*int16)(unsafe.Pointer(ar + uintptr((d-1))*2)) = func() int16 {
  2035  		if (16) == 1 {
  2036  			return (int16((((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr((d-1))*2))))) >> 1) + (((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr((d-1))*2))))) & 1)))
  2037  		}
  2038  		return (int16(((((chirp_Q16) * (int32(*(*int16)(unsafe.Pointer(ar + uintptr((d-1))*2))))) >> ((16) - 1)) + 1) >> 1))
  2039  	}()
  2040  }
  2041  
  2042  /* Chirp (bandwidth expand) LP AR filter */
  2043  func SKP_Silk_bwexpander_32(tls *libc.TLS, ar uintptr, d int32, chirp_Q16 int32) { /* SKP_Silk_bwexpander_32.c:31:6: */
  2044  	var i int32
  2045  	var tmp_chirp_Q16 int32
  2046  
  2047  	tmp_chirp_Q16 = chirp_Q16
  2048  	for i = 0; i < (d - 1); i++ {
  2049  		*(*int32)(unsafe.Pointer(ar + uintptr(i)*4)) = (((((*(*int32)(unsafe.Pointer(ar + uintptr(i)*4))) >> 16) * (int32(int16(tmp_chirp_Q16)))) + ((((*(*int32)(unsafe.Pointer(ar + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(tmp_chirp_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(ar + uintptr(i)*4))) * (func() int32 {
  2050  			if (16) == 1 {
  2051  				return (((tmp_chirp_Q16) >> 1) + ((tmp_chirp_Q16) & 1))
  2052  			}
  2053  			return ((((tmp_chirp_Q16) >> ((16) - 1)) + 1) >> 1)
  2054  		}())))
  2055  		tmp_chirp_Q16 = (((((chirp_Q16) >> 16) * (int32(int16(tmp_chirp_Q16)))) + ((((chirp_Q16) & 0x0000FFFF) * (int32(int16(tmp_chirp_Q16)))) >> 16)) + ((chirp_Q16) * (func() int32 {
  2056  			if (16) == 1 {
  2057  				return (((tmp_chirp_Q16) >> 1) + ((tmp_chirp_Q16) & 1))
  2058  			}
  2059  			return ((((tmp_chirp_Q16) >> ((16) - 1)) + 1) >> 1)
  2060  		}())))
  2061  	}
  2062  	*(*int32)(unsafe.Pointer(ar + uintptr((d-1))*4)) = (((((*(*int32)(unsafe.Pointer(ar + uintptr((d-1))*4))) >> 16) * (int32(int16(tmp_chirp_Q16)))) + ((((*(*int32)(unsafe.Pointer(ar + uintptr((d-1))*4))) & 0x0000FFFF) * (int32(int16(tmp_chirp_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(ar + uintptr((d-1))*4))) * (func() int32 {
  2063  		if (16) == 1 {
  2064  			return (((tmp_chirp_Q16) >> 1) + ((tmp_chirp_Q16) & 1))
  2065  		}
  2066  		return ((((tmp_chirp_Q16) >> ((16) - 1)) + 1) >> 1)
  2067  	}())))
  2068  }
  2069  
  2070  /***********************************************************************
  2071  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2072  Redistribution and use in source and binary forms, with or without
  2073  modification, (subject to the limitations in the disclaimer below)
  2074  are permitted provided that the following conditions are met:
  2075  - Redistributions of source code must retain the above copyright notice,
  2076  this list of conditions and the following disclaimer.
  2077  - Redistributions in binary form must reproduce the above copyright
  2078  notice, this list of conditions and the following disclaimer in the
  2079  documentation and/or other materials provided with the distribution.
  2080  - Neither the name of Skype Limited, nor the names of specific
  2081  contributors, may be used to endorse or promote products derived from
  2082  this software without specific prior written permission.
  2083  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2084  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2085  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2086  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2087  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2088  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2089  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2090  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2091  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2092  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2093  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2094  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2095  ***********************************************************************/
  2096  
  2097  /***********************************************************************
  2098  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2099  Redistribution and use in source and binary forms, with or without
  2100  modification, (subject to the limitations in the disclaimer below)
  2101  are permitted provided that the following conditions are met:
  2102  - Redistributions of source code must retain the above copyright notice,
  2103  this list of conditions and the following disclaimer.
  2104  - Redistributions in binary form must reproduce the above copyright
  2105  notice, this list of conditions and the following disclaimer in the
  2106  documentation and/or other materials provided with the distribution.
  2107  - Neither the name of Skype Limited, nor the names of specific
  2108  contributors, may be used to endorse or promote products derived from
  2109  this software without specific prior written permission.
  2110  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2111  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2112  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2113  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2114  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2115  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2116  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2117  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2118  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2119  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2120  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2121  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2122  ***********************************************************************/
  2123  
  2124  /******************/
  2125  /* Error messages */
  2126  /******************/
  2127  
  2128  /**************************/
  2129  /* Encoder error messages */
  2130  /**************************/
  2131  
  2132  /* Input length is not a multiplum of 10 ms, or length is longer than the packet length */
  2133  
  2134  /* Sampling frequency not 8000, 12000, 16000 or 24000 Hertz */
  2135  
  2136  /* Packet size not 20, 40, 60, 80 or 100 ms */
  2137  
  2138  /* Allocated payload buffer too short */
  2139  
  2140  /* Loss rate not between 0 and 100 percent */
  2141  
  2142  /* Complexity setting not valid, use 0, 1 or 2 */
  2143  
  2144  /* Inband FEC setting not valid, use 0 or 1 */
  2145  
  2146  /* DTX setting not valid, use 0 or 1 */
  2147  
  2148  /* Internal encoder error */
  2149  
  2150  /**************************/
  2151  /* Decoder error messages */
  2152  /**************************/
  2153  
  2154  /* Output sampling frequency lower than internal decoded sampling frequency */
  2155  
  2156  /* Payload size exceeded the maximum allowed 1024 bytes */
  2157  
  2158  /* Payload has bit errors */
  2159  
  2160  /***********************************************************************
  2161  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2162  Redistribution and use in source and binary forms, with or without
  2163  modification, (subject to the limitations in the disclaimer below)
  2164  are permitted provided that the following conditions are met:
  2165  - Redistributions of source code must retain the above copyright notice,
  2166  this list of conditions and the following disclaimer.
  2167  - Redistributions in binary form must reproduce the above copyright
  2168  notice, this list of conditions and the following disclaimer in the
  2169  documentation and/or other materials provided with the distribution.
  2170  - Neither the name of Skype Limited, nor the names of specific
  2171  contributors, may be used to endorse or promote products derived from
  2172  this software without specific prior written permission.
  2173  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2174  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2175  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2176  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2177  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2178  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2179  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2180  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2181  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2182  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2183  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2184  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2185  ***********************************************************************/
  2186  
  2187  /* Limits on bitrate */
  2188  
  2189  /* Transition bitrates between modes */
  2190  
  2191  /* Integration/hysteresis threshold for lowering internal sample frequency */
  2192  /* 30000000 -> 6 sec if bitrate is 5000 bps below limit; 3 sec if bitrate is 10000 bps below limit */
  2193  
  2194  /* DTX settings                                 */
  2195  
  2196  /* Amount of concecutive no FEC packets before telling JB */
  2197  
  2198  /* Maximum delay between real packet and LBRR packet */
  2199  
  2200  /* LBRR usage defines */
  2201  
  2202  /* Frame termination indicator defines */
  2203  
  2204  /* Number of Second order Sections for SWB detection HP filter */
  2205  
  2206  /* Low complexity setting */
  2207  
  2208  /* Activate bandwidth transition filtering for mode switching */
  2209  
  2210  /* Decoder Parameters */
  2211  
  2212  /* Maximum sampling frequency, should be 16 for some embedded platforms */
  2213  
  2214  /* Signal Types used by silk */
  2215  
  2216  /* VAD Types used by silk */
  2217  
  2218  /* Number of samples per frame */
  2219  
  2220  /* Milliseconds of lookahead for pitch analysis */
  2221  
  2222  /* Length of LPC window used in find pitch */
  2223  
  2224  /* Order of LPC used in find pitch */
  2225  
  2226  /* Milliseconds of lookahead for noise shape analysis */
  2227  
  2228  /* Max length of LPC window used in noise shape analysis */
  2229  
  2230  /* Max number of bytes in payload output buffer (may contain multiple frames) */
  2231  
  2232  /* dB level of lowest gain quantization level */
  2233  /* dB level of highest gain quantization level */
  2234  /* Number of gain quantization levels */
  2235  /* Max increase in gain quantization index */
  2236  /* Max decrease in gain quantization index */
  2237  
  2238  /* Quantization offsets (multiples of 4) */
  2239  
  2240  /* Maximum numbers of iterations used to stabilize a LPC vector */
  2241  
  2242  /* Find Pred Coef defines */
  2243  
  2244  /* LTP quantization settings */
  2245  
  2246  /* Number of subframes */
  2247  
  2248  /* Flag to use harmonic noise shaping */
  2249  
  2250  /* Max LPC order of noise shaping filters */
  2251  
  2252  /* Maximum number of delayed decision states */
  2253  
  2254  /* number of subframes for excitation entropy coding */
  2255  
  2256  /* number of rate levels, for entropy coding of excitation */
  2257  
  2258  /* maximum sum of pulses per shell coding frame */
  2259  
  2260  /***********************/
  2261  /* High pass filtering */
  2262  /***********************/
  2263  
  2264  /***************************/
  2265  /* Voice activity detector */
  2266  /***************************/
  2267  
  2268  /* Sigmoid settings */
  2269  
  2270  /* smoothing for SNR measurement */
  2271  
  2272  /******************/
  2273  /* NLSF quantizer */
  2274  /******************/
  2275  
  2276  /* Based on above defines, calculate how much memory is necessary to allocate */
  2277  
  2278  /* Transition filtering for mode switching */
  2279  
  2280  /* Row based */
  2281  
  2282  /* Column based */
  2283  
  2284  /* BWE factors to apply after packet loss */
  2285  
  2286  /* Defines for CN generation */
  2287  
  2288  /***********************************************************************
  2289  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2290  Redistribution and use in source and binary forms, with or without
  2291  modification, (subject to the limitations in the disclaimer below)
  2292  are permitted provided that the following conditions are met:
  2293  - Redistributions of source code must retain the above copyright notice,
  2294  this list of conditions and the following disclaimer.
  2295  - Redistributions in binary form must reproduce the above copyright
  2296  notice, this list of conditions and the following disclaimer in the
  2297  documentation and/or other materials provided with the distribution.
  2298  - Neither the name of Skype Limited, nor the names of specific
  2299  contributors, may be used to endorse or promote products derived from
  2300  this software without specific prior written permission.
  2301  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2302  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2303  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2304  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2305  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2306  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2307  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2308  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2309  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2310  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2311  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2312  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2313  ***********************************************************************/
  2314  
  2315  /***********************************************************************
  2316  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2317  Redistribution and use in source and binary forms, with or without
  2318  modification, (subject to the limitations in the disclaimer below)
  2319  are permitted provided that the following conditions are met:
  2320  - Redistributions of source code must retain the above copyright notice,
  2321  this list of conditions and the following disclaimer.
  2322  - Redistributions in binary form must reproduce the above copyright
  2323  notice, this list of conditions and the following disclaimer in the
  2324  documentation and/or other materials provided with the distribution.
  2325  - Neither the name of Skype Limited, nor the names of specific
  2326  contributors, may be used to endorse or promote products derived from
  2327  this software without specific prior written permission.
  2328  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2329  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2330  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2331  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2332  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2333  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2334  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2335  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2336  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2337  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2338  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2339  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2340  ***********************************************************************/
  2341  
  2342  /***********************************************************************
  2343  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2344  Redistribution and use in source and binary forms, with or without
  2345  modification, (subject to the limitations in the disclaimer below)
  2346  are permitted provided that the following conditions are met:
  2347  - Redistributions of source code must retain the above copyright notice,
  2348  this list of conditions and the following disclaimer.
  2349  - Redistributions in binary form must reproduce the above copyright
  2350  notice, this list of conditions and the following disclaimer in the
  2351  documentation and/or other materials provided with the distribution.
  2352  - Neither the name of Skype Limited, nor the names of specific
  2353  contributors, may be used to endorse or promote products derived from
  2354  this software without specific prior written permission.
  2355  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2356  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2357  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2358  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2359  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2360  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2361  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2362  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2363  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2364  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2365  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2366  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2367  ***********************************************************************/
  2368  
  2369  /***********************************************************************
  2370  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  2371  Redistribution and use in source and binary forms, with or without
  2372  modification, (subject to the limitations in the disclaimer below)
  2373  are permitted provided that the following conditions are met:
  2374  - Redistributions of source code must retain the above copyright notice,
  2375  this list of conditions and the following disclaimer.
  2376  - Redistributions in binary form must reproduce the above copyright
  2377  notice, this list of conditions and the following disclaimer in the
  2378  documentation and/or other materials provided with the distribution.
  2379  - Neither the name of Skype Limited, nor the names of specific
  2380  contributors, may be used to endorse or promote products derived from
  2381  this software without specific prior written permission.
  2382  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  2383  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  2384  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  2385  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  2386  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  2387  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  2388  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  2389  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  2390  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  2391  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  2392  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  2393  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  2394  ***********************************************************************/
  2395  
  2396  /************************************/
  2397  /* Noise shaping quantization state */
  2398  /************************************/
  2399  type SKP_Silk_nsq_state = struct {
  2400  	Fxq                [960]int16
  2401  	FsLTP_shp_Q10      [960]int32
  2402  	FsLPC_Q14          [152]int32
  2403  	FsAR2_Q14          [16]int32
  2404  	FsLF_AR_shp_Q12    int32
  2405  	FlagPrev           int32
  2406  	FsLTP_buf_idx      int32
  2407  	FsLTP_shp_buf_idx  int32
  2408  	Frand_seed         int32
  2409  	Fprev_inv_gain_Q16 int32
  2410  	Frewhite_flag      int32
  2411  } /* SKP_Silk_structs.h:56:3 */ /* FIX*/
  2412  
  2413  /* Struct for Low BitRate Redundant (LBRR) information */
  2414  type SKP_SILK_LBRR_struct = struct {
  2415  	Fpayload [1024]uint8
  2416  	FnBytes  int32
  2417  	Fusage   int32
  2418  } /* SKP_Silk_structs.h:63:3 */
  2419  
  2420  /********************************/
  2421  /* VAD state                    */
  2422  /********************************/
  2423  type SKP_Silk_VAD_state = struct {
  2424  	FAnaState        [2]int32
  2425  	FAnaState1       [2]int32
  2426  	FAnaState2       [2]int32
  2427  	FXnrgSubfr       [4]int32
  2428  	FNrgRatioSmth_Q8 [4]int32
  2429  	FHPstate         int16
  2430  	_                [2]byte
  2431  	FNL              [4]int32
  2432  	Finv_NL          [4]int32
  2433  	FNoiseLevelBias  [4]int32
  2434  	Fcounter         int32
  2435  } /* SKP_Silk_structs.h:79:3 */
  2436  
  2437  /*******************************/
  2438  /* Range encoder/decoder state */
  2439  /*******************************/
  2440  type SKP_Silk_range_coder_state = struct {
  2441  	FbufferLength int32
  2442  	FbufferIx     int32
  2443  	Fbase_Q32     uint32
  2444  	Frange_Q16    uint32
  2445  	Ferror        int32
  2446  	Fbuffer       [1024]uint8
  2447  } /* SKP_Silk_structs.h:91:3 */
  2448  
  2449  /* Input frequency range detection struct */
  2450  type SKP_Silk_detect_SWB_state = struct {
  2451  	FS_HP_8_kHz            [3][2]int32
  2452  	FConsecSmplsAboveThres int32
  2453  	FActiveSpeech_ms       int32
  2454  	FSWB_detected          int32
  2455  	FWB_detected           int32
  2456  } /* SKP_Silk_structs.h:100:3 */
  2457  
  2458  /* Variable cut-off low-pass filter state */
  2459  type SKP_Silk_LP_state = struct {
  2460  	FIn_LP_State         [2]int32
  2461  	Ftransition_frame_no int32
  2462  	Fmode                int32
  2463  } /* SKP_Silk_structs.h:108:3 */
  2464  
  2465  /* Structure for one stage of MSVQ */
  2466  type SKP_Silk_NLSF_CBS = struct {
  2467  	FnVectors    int32
  2468  	FCB_NLSF_Q15 uintptr
  2469  	FRates_Q5    uintptr
  2470  } /* SKP_Silk_structs.h:116:3 */
  2471  
  2472  /* Structure containing NLSF MSVQ codebook */
  2473  type SKP_Silk_NLSF_CB_struct = struct {
  2474  	FnStages       int32
  2475  	FCBStages      uintptr
  2476  	FNDeltaMin_Q15 uintptr
  2477  	FCDF           uintptr
  2478  	FStartPtr      uintptr
  2479  	FMiddleIx      uintptr
  2480  } /* SKP_Silk_structs.h:130:3 */
  2481  
  2482  /********************************/
  2483  /* Encoder state                */
  2484  /********************************/
  2485  type SKP_Silk_encoder_state = struct {
  2486  	FsRC                           SKP_Silk_range_coder_state
  2487  	FsRC_LBRR                      SKP_Silk_range_coder_state
  2488  	FsNSQ                          SKP_Silk_nsq_state
  2489  	FsNSQ_LBRR                     SKP_Silk_nsq_state
  2490  	FIn_HP_State                   [2]int32
  2491  	FsLP                           SKP_Silk_LP_state
  2492  	FsVAD                          SKP_Silk_VAD_state
  2493  	FLBRRprevLastGainIndex         int32
  2494  	Fprev_sigtype                  int32
  2495  	FtypeOffsetPrev                int32
  2496  	FprevLag                       int32
  2497  	Fprev_lagIndex                 int32
  2498  	FAPI_fs_Hz                     int32
  2499  	Fprev_API_fs_Hz                int32
  2500  	FmaxInternal_fs_kHz            int32
  2501  	Ffs_kHz                        int32
  2502  	Ffs_kHz_changed                int32
  2503  	Fframe_length                  int32
  2504  	Fsubfr_length                  int32
  2505  	Fla_pitch                      int32
  2506  	Fla_shape                      int32
  2507  	FshapeWinLength                int32
  2508  	FTargetRate_bps                int32
  2509  	FPacketSize_ms                 int32
  2510  	FPacketLoss_perc               int32
  2511  	FframeCounter                  int32
  2512  	FComplexity                    int32
  2513  	FnStatesDelayedDecision        int32
  2514  	FuseInterpolatedNLSFs          int32
  2515  	FshapingLPCOrder               int32
  2516  	FpredictLPCOrder               int32
  2517  	FpitchEstimationComplexity     int32
  2518  	FpitchEstimationLPCOrder       int32
  2519  	FpitchEstimationThreshold_Q16  int32
  2520  	FLTPQuantLowComplexity         int32
  2521  	FNLSF_MSVQ_Survivors           int32
  2522  	Ffirst_frame_after_reset       int32
  2523  	Fcontrolled_since_last_payload int32
  2524  	Fwarping_Q16                   int32
  2525  	FinputBuf                      [480]int16
  2526  	FinputBufIx                    int32
  2527  	FnFramesInPayloadBuf           int32
  2528  	FnBytesInPayloadBuf            int32
  2529  	Fframes_since_onset            int32
  2530  	FpsNLSF_CB                     [2]uintptr
  2531  	FLBRR_buffer                   [2]SKP_SILK_LBRR_struct
  2532  	Foldest_LBRR_idx               int32
  2533  	FuseInBandFEC                  int32
  2534  	FLBRR_enabled                  int32
  2535  	FLBRR_GainIncreases            int32
  2536  	FbitrateDiff                   int32
  2537  	Fbitrate_threshold_up          int32
  2538  	Fbitrate_threshold_down        int32
  2539  	Fresampler_state               SKP_Silk_resampler_state_struct
  2540  	FnoSpeechCounter               int32
  2541  	FuseDTX                        int32
  2542  	FinDTX                         int32
  2543  	FvadFlag                       int32
  2544  	FsSWBdetect                    SKP_Silk_detect_SWB_state
  2545  	Fq                             [480]int8
  2546  	Fq_LBRR                        [480]int8
  2547  } /* SKP_Silk_structs.h:221:3 */
  2548  
  2549  /************************/
  2550  /* Encoder control      */
  2551  /************************/
  2552  type SKP_Silk_encoder_control = struct {
  2553  	FlagIndex          int32
  2554  	FcontourIndex      int32
  2555  	FPERIndex          int32
  2556  	FLTPIndex          [4]int32
  2557  	FNLSFIndices       [10]int32
  2558  	FNLSFInterpCoef_Q2 int32
  2559  	FGainsIndices      [4]int32
  2560  	FSeed              int32
  2561  	FLTP_scaleIndex    int32
  2562  	FRateLevelIndex    int32
  2563  	FQuantOffsetType   int32
  2564  	Fsigtype           int32
  2565  	FpitchL            [4]int32
  2566  	FLBRR_usage        int32
  2567  } /* SKP_Silk_structs.h:246:3 */
  2568  
  2569  /* Struct for Packet Loss Concealment */
  2570  type SKP_Silk_PLC_struct = struct {
  2571  	FpitchL_Q8         int32
  2572  	FLTPCoef_Q14       [5]int16
  2573  	FprevLPC_Q12       [16]int16
  2574  	_                  [2]byte
  2575  	Flast_frame_lost   int32
  2576  	Frand_seed         int32
  2577  	FrandScale_Q14     int16
  2578  	_                  [2]byte
  2579  	Fconc_energy       int32
  2580  	Fconc_energy_shift int32
  2581  	FprevLTP_scale_Q14 int16
  2582  	_                  [2]byte
  2583  	FprevGain_Q16      [4]int32
  2584  	Ffs_kHz            int32
  2585  } /* SKP_Silk_structs.h:261:3 */
  2586  
  2587  /* Struct for CNG */
  2588  type SKP_Silk_CNG_struct = struct {
  2589  	FCNG_exc_buf_Q10   [480]int32
  2590  	FCNG_smth_NLSF_Q15 [16]int32
  2591  	FCNG_synth_state   [16]int32
  2592  	FCNG_smth_Gain_Q16 int32
  2593  	Frand_seed         int32
  2594  	Ffs_kHz            int32
  2595  } /* SKP_Silk_structs.h:271:3 */
  2596  
  2597  /********************************/
  2598  /* Decoder state                */
  2599  /********************************/
  2600  type SKP_Silk_decoder_state = struct {
  2601  	FsRC                       SKP_Silk_range_coder_state
  2602  	Fprev_inv_gain_Q16         int32
  2603  	FsLTP_Q16                  [960]int32
  2604  	FsLPC_Q14                  [136]int32
  2605  	Fexc_Q10                   [480]int32
  2606  	Fres_Q10                   [480]int32
  2607  	FoutBuf                    [960]int16
  2608  	FlagPrev                   int32
  2609  	FLastGainIndex             int32
  2610  	FLastGainIndex_EnhLayer    int32
  2611  	FtypeOffsetPrev            int32
  2612  	FHPState                   [2]int32
  2613  	FHP_A                      uintptr
  2614  	FHP_B                      uintptr
  2615  	Ffs_kHz                    int32
  2616  	Fprev_API_sampleRate       int32
  2617  	Fframe_length              int32
  2618  	Fsubfr_length              int32
  2619  	FLPC_order                 int32
  2620  	FprevNLSF_Q15              [16]int32
  2621  	Ffirst_frame_after_reset   int32
  2622  	FnBytesLeft                int32
  2623  	FnFramesDecoded            int32
  2624  	FnFramesInPacket           int32
  2625  	FmoreInternalDecoderFrames int32
  2626  	FFrameTermination          int32
  2627  	Fresampler_state           SKP_Silk_resampler_state_struct
  2628  	FpsNLSF_CB                 [2]uintptr
  2629  	FvadFlag                   int32
  2630  	Fno_FEC_counter            int32
  2631  	Finband_FEC_offset         int32
  2632  	FsCNG                      SKP_Silk_CNG_struct
  2633  	FlossCnt                   int32
  2634  	Fprev_sigtype              int32
  2635  	FsPLC                      SKP_Silk_PLC_struct
  2636  } /* SKP_Silk_structs.h:326:3 */
  2637  
  2638  /************************/
  2639  /* Decoder control      */
  2640  /************************/
  2641  type SKP_Silk_decoder_control = struct {
  2642  	FpitchL            [4]int32
  2643  	FGains_Q16         [4]int32
  2644  	FSeed              int32
  2645  	FPredCoef_Q12      [2][16]int16
  2646  	FLTPCoef_Q14       [20]int16
  2647  	FLTP_scale_Q14     int32
  2648  	FPERIndex          int32
  2649  	FRateLevelIndex    int32
  2650  	FQuantOffsetType   int32
  2651  	Fsigtype           int32
  2652  	FNLSFInterpCoef_Q2 int32
  2653  } /* SKP_Silk_structs.h:347:3 */
  2654  
  2655  /* Generates excitation for CNG LPC synthesis */
  2656  func SKP_Silk_CNG_exc(tls *libc.TLS, residual uintptr, exc_buf_Q10 uintptr, Gain_Q16 int32, length int32, rand_seed uintptr) { /* SKP_Silk_CNG.c:31:17: */
  2657  	var seed int32
  2658  	var i int32
  2659  	var idx int32
  2660  	var exc_mask int32
  2661  
  2662  	exc_mask = 255
  2663  	for exc_mask > length {
  2664  		exc_mask = ((exc_mask) >> (1))
  2665  	}
  2666  
  2667  	seed = *(*int32)(unsafe.Pointer(rand_seed))
  2668  	for i = 0; i < length; i++ {
  2669  		seed = (int32((uint32(907633515)) + ((uint32(seed)) * (uint32(196314165)))))
  2670  		idx = (((seed) >> (24)) & exc_mask)
  2671  
  2672  		*(*int16)(unsafe.Pointer(residual + uintptr(i)*2)) = func() int16 {
  2673  			if (func() int32 {
  2674  				if (10) == 1 {
  2675  					return (((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2676  						if (16) == 1 {
  2677  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2678  						}
  2679  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2680  					}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2681  						if (16) == 1 {
  2682  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2683  						}
  2684  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2685  					}()))) & 1))
  2686  				}
  2687  				return ((((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2688  					if (16) == 1 {
  2689  						return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2690  					}
  2691  					return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2692  				}()))) >> ((10) - 1)) + 1) >> 1)
  2693  			}()) > 0x7FFF {
  2694  				return int16(0x7FFF)
  2695  			}
  2696  			return func() int16 {
  2697  				if (func() int32 {
  2698  					if (10) == 1 {
  2699  						return (((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2700  							if (16) == 1 {
  2701  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2702  							}
  2703  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2704  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2705  							if (16) == 1 {
  2706  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2707  							}
  2708  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2709  						}()))) & 1))
  2710  					}
  2711  					return ((((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2712  						if (16) == 1 {
  2713  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2714  						}
  2715  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2716  					}()))) >> ((10) - 1)) + 1) >> 1)
  2717  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
  2718  					return libc.Int16FromInt32(0x8000)
  2719  				}
  2720  				return func() int16 {
  2721  					if (10) == 1 {
  2722  						return (int16(((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2723  							if (16) == 1 {
  2724  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2725  							}
  2726  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2727  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2728  							if (16) == 1 {
  2729  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2730  							}
  2731  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2732  						}()))) & 1)))
  2733  					}
  2734  					return (int16((((((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(exc_buf_Q10 + uintptr(idx)*4))) * (func() int32 {
  2735  						if (16) == 1 {
  2736  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  2737  						}
  2738  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  2739  					}()))) >> ((10) - 1)) + 1) >> 1))
  2740  				}()
  2741  			}()
  2742  		}()
  2743  	}
  2744  	*(*int32)(unsafe.Pointer(rand_seed)) = seed
  2745  }
  2746  
  2747  func SKP_Silk_CNG_Reset(tls *libc.TLS, psDec uintptr) { /* SKP_Silk_CNG.c:58:6: */
  2748  	var i int32
  2749  	var NLSF_step_Q15 int32
  2750  	var NLSF_acc_Q15 int32
  2751  
  2752  	NLSF_step_Q15 = ((0x7FFF) / ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order + 1))
  2753  	NLSF_acc_Q15 = 0
  2754  	for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order; i++ {
  2755  		NLSF_acc_Q15 = NLSF_acc_Q15 + (NLSF_step_Q15)
  2756  		*(*int32)(unsafe.Pointer((psDec + 11520 /* &.sCNG */ + 1920 /* &.CNG_smth_NLSF_Q15 */) + uintptr(i)*4)) = NLSF_acc_Q15
  2757  	}
  2758  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsCNG.FCNG_smth_Gain_Q16 = 0
  2759  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsCNG.Frand_seed = 3176576
  2760  }
  2761  
  2762  /* Updates CNG estimate, and applies the CNG when packet was lost   */
  2763  func SKP_Silk_CNG(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32) { /* SKP_Silk_CNG.c:75:6: */
  2764  	bp := tls.Alloc(992)
  2765  	defer tls.Free(992)
  2766  
  2767  	var i int32
  2768  	var subfr int32
  2769  	var tmp_32 int32
  2770  	var Gain_Q26 int32
  2771  	var max_Gain_Q16 int32
  2772  	// var LPC_buf [16]int16 at bp+960, 32
  2773  
  2774  	// var CNG_sig [480]int16 at bp, 960
  2775  
  2776  	var psCNG uintptr
  2777  	psCNG = (psDec + 11520 /* &.sCNG */)
  2778  
  2779  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz != (*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).Ffs_kHz {
  2780  		/* Reset state */
  2781  		SKP_Silk_CNG_Reset(tls, psDec)
  2782  
  2783  		(*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).Ffs_kHz = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz
  2784  	}
  2785  	if ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt == 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FvadFlag == 0) {
  2786  		/* Update CNG parameters */
  2787  
  2788  		/* Smoothing of LSF's  */
  2789  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order; i++ {
  2790  			*(*int32)(unsafe.Pointer((psCNG + 1920 /* &.CNG_smth_NLSF_Q15 */) + uintptr(i)*4)) += ((((*(*int32)(unsafe.Pointer((psDec + 11244 /* &.prevNLSF_Q15 */) + uintptr(i)*4)) - *(*int32)(unsafe.Pointer((psCNG + 1920 /* &.CNG_smth_NLSF_Q15 */) + uintptr(i)*4))) >> 16) * (int32(int16(16348)))) + ((((*(*int32)(unsafe.Pointer((psDec + 11244 /* &.prevNLSF_Q15 */) + uintptr(i)*4)) - *(*int32)(unsafe.Pointer((psCNG + 1920 /* &.CNG_smth_NLSF_Q15 */) + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(16348)))) >> 16))
  2791  		}
  2792  		/* Find the subframe with the highest gain */
  2793  		max_Gain_Q16 = 0
  2794  		subfr = 0
  2795  		for i = 0; i < 4; i++ {
  2796  			if *(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(i)*4)) > max_Gain_Q16 {
  2797  				max_Gain_Q16 = *(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(i)*4))
  2798  				subfr = i
  2799  			}
  2800  		}
  2801  		/* Update CNG excitation buffer with excitation from this subframe */
  2802  		libc.Xmemmove(tls, ((psCNG /* &.CNG_exc_buf_Q10 */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)*4), psCNG /* &.CNG_exc_buf_Q10 */, ((uint32((4 - 1) * (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)) * uint32(unsafe.Sizeof(int32(0)))))
  2803  		libc.Xmemcpy(tls, psCNG /* &.CNG_exc_buf_Q10 */, ((psDec + 5432 /* &.exc_Q10 */) + uintptr((subfr*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))*4), (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length) * uint32(unsafe.Sizeof(int32(0)))))
  2804  
  2805  		/* Smooth gains */
  2806  		for i = 0; i < 4; i++ {
  2807  			*(*int32)(unsafe.Pointer(psCNG + 2048 /* &.CNG_smth_Gain_Q16 */)) += ((((*(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(i)*4)) - (*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).FCNG_smth_Gain_Q16) >> 16) * (int32(int16(4634)))) + ((((*(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(i)*4)) - (*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).FCNG_smth_Gain_Q16) & 0x0000FFFF) * (int32(int16(4634)))) >> 16))
  2808  		}
  2809  	}
  2810  
  2811  	/* Add CNG when packet is lost and / or when low speech activity */
  2812  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt != 0 { //|| psDec->vadFlag == NO_VOICE_ACTIVITY ) {
  2813  
  2814  		/* Generate CNG excitation */
  2815  		SKP_Silk_CNG_exc(tls, bp /* &CNG_sig[0] */, psCNG, /* &.CNG_exc_buf_Q10 */
  2816  			(*SKP_Silk_CNG_struct)(unsafe.Pointer(psCNG)).FCNG_smth_Gain_Q16, length, (psCNG + 2052 /* &.rand_seed */))
  2817  
  2818  		/* Convert CNG NLSF to filter representation */
  2819  		SKP_Silk_NLSF2A_stable(tls, bp+960 /* &LPC_buf[0] */, psCNG+1920 /* &.CNG_smth_NLSF_Q15 */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  2820  
  2821  		Gain_Q26 = (int32(1) << 26) /* 1.0 */
  2822  
  2823  		/* Generate CNG signal, by synthesis filtering */
  2824  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order == 16 {
  2825  			SKP_Silk_LPC_synthesis_order16(tls, bp /* &CNG_sig[0] */, bp+960, /* &LPC_buf[0] */
  2826  				Gain_Q26, psCNG+1984 /* &.CNG_synth_state */, bp /* &CNG_sig[0] */, length)
  2827  		} else {
  2828  			SKP_Silk_LPC_synthesis_filter(tls, bp /* &CNG_sig[0] */, bp+960, /* &LPC_buf[0] */
  2829  				Gain_Q26, psCNG+1984 /* &.CNG_synth_state */, bp /* &CNG_sig[0] */, length, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  2830  		}
  2831  		/* Mix with signal */
  2832  		for i = 0; i < length; i++ {
  2833  			tmp_32 = (int32(*(*int16)(unsafe.Pointer(signal + uintptr(i)*2))) + int32(*(*int16)(unsafe.Pointer(bp /* &CNG_sig[0] */ + uintptr(i)*2))))
  2834  			*(*int16)(unsafe.Pointer(signal + uintptr(i)*2)) = func() int16 {
  2835  				if (tmp_32) > 0x7FFF {
  2836  					return int16(0x7FFF)
  2837  				}
  2838  				return func() int16 {
  2839  					if (tmp_32) < (int32(libc.Int16FromInt32(0x8000))) {
  2840  						return libc.Int16FromInt32(0x8000)
  2841  					}
  2842  					return int16(tmp_32)
  2843  				}()
  2844  			}()
  2845  		}
  2846  	} else {
  2847  		libc.Xmemset(tls, psCNG+1984 /* &.CNG_synth_state */, 0, (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint32(unsafe.Sizeof(int32(0)))))
  2848  	}
  2849  }
  2850  
  2851  //#define SKP_enc_map(a)                ((a) > 0 ? 1 : 0)
  2852  //#define SKP_dec_map(a)                ((a) > 0 ? 1 : -1)
  2853  /* shifting avoids if-statement */
  2854  
  2855  /* Encodes signs of excitation */
  2856  func SKP_Silk_encode_signs(tls *libc.TLS, sRC uintptr, q uintptr, length int32, sigtype int32, QuantOffsetType int32, RateLevelIndex int32) { /* SKP_Silk_code_signs.c:37:6: */
  2857  	bp := tls.Alloc(6)
  2858  	defer tls.Free(6)
  2859  
  2860  	var i int32
  2861  	var inData int32
  2862  	// var cdf [3]uint16 at bp, 6
  2863  
  2864  	i = (((int32((int16(10 - 1)))) * (int32((int16(((sigtype) << (1)) + QuantOffsetType))))) + RateLevelIndex)
  2865  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */)) = uint16(0)
  2866  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */ + 1*2)) = SKP_Silk_sign_CDF[i]
  2867  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */ + 2*2)) = uint16(65535)
  2868  
  2869  	for i = 0; i < length; i++ {
  2870  		if int32(*(*int8)(unsafe.Pointer(q + uintptr(i)))) != 0 {
  2871  			inData = (((int32(*(*int8)(unsafe.Pointer(q + uintptr(i))))) >> (15)) + 1) /* - = 0, + = 1 */
  2872  			SKP_Silk_range_encoder(tls, sRC, inData, bp /* &cdf[0] */)
  2873  		}
  2874  	}
  2875  }
  2876  
  2877  /* Decodes signs of excitation */
  2878  func SKP_Silk_decode_signs(tls *libc.TLS, sRC uintptr, q uintptr, length int32, sigtype int32, QuantOffsetType int32, RateLevelIndex int32) { /* SKP_Silk_code_signs.c:64:6: */
  2879  	bp := tls.Alloc(12)
  2880  	defer tls.Free(12)
  2881  
  2882  	var i int32
  2883  	// var data int32 at bp+8, 4
  2884  
  2885  	// var cdf [3]uint16 at bp, 6
  2886  
  2887  	i = (((int32((int16(10 - 1)))) * (int32((int16(((sigtype) << (1)) + QuantOffsetType))))) + RateLevelIndex)
  2888  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */)) = uint16(0)
  2889  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */ + 1*2)) = SKP_Silk_sign_CDF[i]
  2890  	*(*uint16)(unsafe.Pointer(bp /* &cdf[0] */ + 2*2)) = uint16(65535)
  2891  
  2892  	for i = 0; i < length; i++ {
  2893  		if *(*int32)(unsafe.Pointer(q + uintptr(i)*4)) > 0 {
  2894  			SKP_Silk_range_decoder(tls, bp+8 /* &data */, sRC, bp /* &cdf[0] */, 1)
  2895  			/* attach sign */
  2896  			/* implementation with shift, subtraction, multiplication */
  2897  			*(*int32)(unsafe.Pointer(q + uintptr(i)*4)) *= (((*(*int32)(unsafe.Pointer(bp + 8 /* data */))) << (1)) - 1)
  2898  		}
  2899  	}
  2900  }
  2901  
  2902  // 7.18.2  Limits of specified-width integer types
  2903  
  2904  // 7.18.2.1  Limits of exact-width integer types
  2905  
  2906  // 7.18.2.2  Limits of minimum-width integer types
  2907  
  2908  // 7.18.2.3  Limits of fastest minimum-width integer types
  2909  
  2910  // 7.18.2.4  Limits of integer types capable of holding
  2911  //     object pointers
  2912  
  2913  // 7.18.2.5  Limits of greatest-width integer types
  2914  
  2915  // 7.18.3  Limits of other integer types
  2916  
  2917  // wint_t is unsigned short for compatibility with MS runtime
  2918  
  2919  // 7.18.4  Macros for integer constants
  2920  
  2921  // 7.18.4.1  Macros for minimum-width integer constants
  2922  //
  2923  //     Accoding to Douglas Gwyn <gwyn@arl.mil>:
  2924  // 	"This spec was changed in ISO/IEC 9899:1999 TC1; in ISO/IEC
  2925  // 	9899:1999 as initially published, the expansion was required
  2926  // 	to be an integer constant of precisely matching type, which
  2927  // 	is impossible to accomplish for the shorter types on most
  2928  // 	platforms, because C99 provides no standard way to designate
  2929  // 	an integer constant with width less than that of type int.
  2930  // 	TC1 changed this to require just an integer constant
  2931  // 	*expression* with *promoted* type."
  2932  //
  2933  // 	The trick used here is from Clive D W Feather.
  2934  
  2935  //  The 'trick' doesn't work in C89 for long long because, without
  2936  //     suffix, (val) will be evaluated as int, not intmax_t
  2937  
  2938  // 7.18.4.2  Macros for greatest-width integer constants
  2939  
  2940  /* assertions */
  2941  
  2942  /***********************************************/
  2943  /* Structure for controlling encoder operation */
  2944  /***********************************************/
  2945  type SKP_SILK_SDK_EncControlStruct = struct {
  2946  	FAPI_sampleRate        int32
  2947  	FmaxInternalSampleRate int32
  2948  	FpacketSize            int32
  2949  	FbitRate               int32
  2950  	FpacketLossPercentage  int32
  2951  	Fcomplexity            int32
  2952  	FuseInBandFEC          int32
  2953  	FuseDTX                int32
  2954  } /* SKP_Silk_control.h:65:3 */
  2955  
  2956  /**************************************************************************/
  2957  /* Structure for controlling decoder operation and reading decoder status */
  2958  /**************************************************************************/
  2959  type SKP_SILK_SDK_DecControlStruct = struct {
  2960  	FAPI_sampleRate            int32
  2961  	FframeSize                 int32
  2962  	FframesPerPacket           int32
  2963  	FmoreInternalDecoderFrames int32
  2964  	FinBandFECOffset           int32
  2965  } /* SKP_Silk_control.h:85:3 */
  2966  
  2967  /* Control internal sampling rate */
  2968  func SKP_Silk_control_audio_bandwidth(tls *libc.TLS, psEncC uintptr, TargetRate_bps int32) int32 { /* SKP_Silk_control_audio_bandwidth.c:31:9: */
  2969  	var fs_kHz int32
  2970  
  2971  	fs_kHz = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz
  2972  	if fs_kHz == 0 {
  2973  		/* Encoder has just been initialized */
  2974  		if TargetRate_bps >= 25000 {
  2975  			fs_kHz = 24
  2976  		} else if TargetRate_bps >= 14000 {
  2977  			fs_kHz = 16
  2978  		} else if TargetRate_bps >= 10000 {
  2979  			fs_kHz = 12
  2980  		} else {
  2981  			fs_kHz = 8
  2982  		}
  2983  		/* Make sure internal rate is not higher than external rate or maximum allowed, or lower than minimum allowed */
  2984  		fs_kHz = func() int32 {
  2985  			if (fs_kHz) < (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) / (1000)) {
  2986  				return fs_kHz
  2987  			}
  2988  			return (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) / (1000))
  2989  		}()
  2990  		fs_kHz = func() int32 {
  2991  			if (fs_kHz) < ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz) {
  2992  				return fs_kHz
  2993  			}
  2994  			return (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz
  2995  		}()
  2996  	} else if (((int32(int16(fs_kHz))) * (int32(int16(1000)))) > (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) || (fs_kHz > (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz) {
  2997  		/* Make sure internal rate is not higher than external rate or maximum allowed */
  2998  		fs_kHz = (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) / (1000))
  2999  		fs_kHz = func() int32 {
  3000  			if (fs_kHz) < ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz) {
  3001  				return fs_kHz
  3002  			}
  3003  			return (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz
  3004  		}()
  3005  	} else {
  3006  		/* State machine for the internal sampling rate switching */
  3007  		if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz > 8000 {
  3008  			/* Accumulate the difference between the target rate and limit for switching down */
  3009  			*(*int32)(unsafe.Pointer(psEncC + 18336 /* &.bitrateDiff */)) += (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FPacketSize_ms) * (TargetRate_bps - (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fbitrate_threshold_down))
  3010  			(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff = func() int32 {
  3011  				if ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff) < (0) {
  3012  					return (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff
  3013  				}
  3014  				return 0
  3015  			}()
  3016  
  3017  			if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FvadFlag == 0 { /* Low speech activity */
  3018  				/* Check if we should switch down */
  3019  				if ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no == 0) && (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff <= -30000000) || (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsSWBdetect.FWB_detected * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz) == 24)) { /* Forced down-switching due to WB input */
  3020  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no = 1 /* Begin transition phase */
  3021  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Fmode = 0                /* Switch down */
  3022  				} else if ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no >= (2560 / 20)) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Fmode == 0) { /* Ready to switch down */
  3023  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no = 0 /* Ready for new transition phase */
  3024  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff = 0
  3025  
  3026  					/* Switch to a lower sample frequency */
  3027  					if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 24 {
  3028  						fs_kHz = 16
  3029  					} else if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 16 {
  3030  						fs_kHz = 12
  3031  					} else {
  3032  
  3033  						fs_kHz = 8
  3034  					}
  3035  				}
  3036  
  3037  				/* Check if we should switch up */
  3038  				if ((((((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz * 1000) < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FAPI_fs_Hz) && (TargetRate_bps >= (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fbitrate_threshold_up)) && (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsSWBdetect.FWB_detected * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz) < 16)) && (((((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 16) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz >= 24)) || (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 12) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz >= 16))) || (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 8) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FmaxInternal_fs_kHz >= 12)))) &&
  3039  					((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no == 0) { /* No transition phase running, ready to switch */
  3040  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Fmode = 1 /* Switch up */
  3041  					(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FbitrateDiff = 0
  3042  
  3043  					/* Switch to a higher sample frequency */
  3044  					if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 8 {
  3045  						fs_kHz = 12
  3046  					} else if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 12 {
  3047  						fs_kHz = 16
  3048  					} else {
  3049  
  3050  						fs_kHz = 24
  3051  					}
  3052  				}
  3053  			}
  3054  		}
  3055  
  3056  		/* After switching up, stop transition filter during speech inactivity */
  3057  		if (((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Fmode == 1) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no >= (5120 / 20))) && ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FvadFlag == 0) {
  3058  
  3059  			(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FsLP.Ftransition_frame_no = 0
  3060  
  3061  			/* Reset transition filter state */
  3062  			libc.Xmemset(tls, psEncC+15016 /* &.sLP */ /* &.In_LP_State */, 0, (uint32(2) * uint32(unsafe.Sizeof(int32(0)))))
  3063  		}
  3064  	}
  3065  
  3066  	return fs_kHz
  3067  }
  3068  
  3069  /***********************************************************************
  3070  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  3071  Redistribution and use in source and binary forms, with or without
  3072  modification, (subject to the limitations in the disclaimer below)
  3073  are permitted provided that the following conditions are met:
  3074  - Redistributions of source code must retain the above copyright notice,
  3075  this list of conditions and the following disclaimer.
  3076  - Redistributions in binary form must reproduce the above copyright
  3077  notice, this list of conditions and the following disclaimer in the
  3078  documentation and/or other materials provided with the distribution.
  3079  - Neither the name of Skype Limited, nor the names of specific
  3080  contributors, may be used to endorse or promote products derived from
  3081  this software without specific prior written permission.
  3082  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  3083  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  3084  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  3085  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  3086  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  3087  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  3088  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  3089  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  3090  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  3091  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3092  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  3093  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3094  ***********************************************************************/
  3095  
  3096  /********************************/
  3097  /* Noise shaping analysis state */
  3098  /********************************/
  3099  type SKP_Silk_shape_state_FIX = struct {
  3100  	FLastGainIndex          int32
  3101  	FHarmBoost_smth_Q16     int32
  3102  	FHarmShapeGain_smth_Q16 int32
  3103  	FTilt_smth_Q16          int32
  3104  } /* SKP_Silk_structs_FIX.h:49:3 */
  3105  
  3106  /********************************/
  3107  /* Prefilter state              */
  3108  /********************************/
  3109  type SKP_Silk_prefilter_state_FIX = struct {
  3110  	FsLTP_shp         [512]int16
  3111  	FsAR_shp          [17]int32
  3112  	FsLTP_shp_buf_idx int32
  3113  	FsLF_AR_shp_Q12   int32
  3114  	FsLF_MA_shp_Q12   int32
  3115  	FsHarmHP          int32
  3116  	Frand_seed        int32
  3117  	FlagPrev          int32
  3118  } /* SKP_Silk_structs_FIX.h:63:3 */
  3119  
  3120  /*****************************/
  3121  /* Prediction analysis state */
  3122  /*****************************/
  3123  type SKP_Silk_predict_state_FIX = struct {
  3124  	Fpitch_LPC_win_length int32
  3125  	Fmin_pitch_lag        int32
  3126  	Fmax_pitch_lag        int32
  3127  	Fprev_NLSFq_Q15       [16]int32
  3128  } /* SKP_Silk_structs_FIX.h:73:3 */
  3129  
  3130  /********************************/
  3131  /* Encoder state FIX            */
  3132  /********************************/
  3133  type SKP_Silk_encoder_state_FIX = struct {
  3134  	FsCmn                           SKP_Silk_encoder_state
  3135  	Fvariable_HP_smth1_Q15          int32
  3136  	Fvariable_HP_smth2_Q15          int32
  3137  	FsShape                         SKP_Silk_shape_state_FIX
  3138  	FsPrefilt                       SKP_Silk_prefilter_state_FIX
  3139  	FsPred                          SKP_Silk_predict_state_FIX
  3140  	Fx_buf                          [1080]int16
  3141  	FLTPCorr_Q15                    int32
  3142  	Fmu_LTP_Q8                      int32
  3143  	FSNR_dB_Q7                      int32
  3144  	FavgGain_Q16                    int32
  3145  	FavgGain_Q16_one_bit_per_sample int32
  3146  	FBufferedInChannel_ms           int32
  3147  	Fspeech_activity_Q8             int32
  3148  	FprevLTPredCodGain_Q7           int32
  3149  	FHPLTPredCodGain_Q7             int32
  3150  	FinBandFEC_SNR_comp_Q8          int32
  3151  } /* SKP_Silk_structs_FIX.h:106:3 */
  3152  
  3153  /************************/
  3154  /* Encoder control FIX  */
  3155  /************************/
  3156  type SKP_Silk_encoder_control_FIX = struct {
  3157  	FsCmn                    SKP_Silk_encoder_control
  3158  	FGains_Q16               [4]int32
  3159  	FPredCoef_Q12            [2][16]int16
  3160  	FLTPCoef_Q14             [20]int16
  3161  	FLTP_scale_Q14           int32
  3162  	FAR1_Q13                 [64]int16
  3163  	FAR2_Q13                 [64]int16
  3164  	FLF_shp_Q14              [4]int32
  3165  	FGainsPre_Q14            [4]int32
  3166  	FHarmBoost_Q14           [4]int32
  3167  	FTilt_Q14                [4]int32
  3168  	FHarmShapeGain_Q14       [4]int32
  3169  	FLambda_Q10              int32
  3170  	Finput_quality_Q14       int32
  3171  	Fcoding_quality_Q14      int32
  3172  	Fpitch_freq_low_Hz       int32
  3173  	Fcurrent_SNR_dB_Q7       int32
  3174  	Fsparseness_Q8           int32
  3175  	FpredGain_Q16            int32
  3176  	FLTPredCodGain_Q7        int32
  3177  	Finput_quality_bands_Q15 [4]int32
  3178  	Finput_tilt_Q15          int32
  3179  	FResNrg                  [4]int32
  3180  	FResNrgQ                 [4]int32
  3181  } /* SKP_Silk_structs_FIX.h:144:3 */
  3182  
  3183  /***********************************************************************
  3184  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  3185  Redistribution and use in source and binary forms, with or without
  3186  modification, (subject to the limitations in the disclaimer below)
  3187  are permitted provided that the following conditions are met:
  3188  - Redistributions of source code must retain the above copyright notice,
  3189  this list of conditions and the following disclaimer.
  3190  - Redistributions in binary form must reproduce the above copyright
  3191  notice, this list of conditions and the following disclaimer in the
  3192  documentation and/or other materials provided with the distribution.
  3193  - Neither the name of Skype Limited, nor the names of specific
  3194  contributors, may be used to endorse or promote products derived from
  3195  this software without specific prior written permission.
  3196  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  3197  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  3198  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  3199  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  3200  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  3201  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  3202  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  3203  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  3204  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  3205  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3206  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  3207  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3208  ***********************************************************************/
  3209  
  3210  /***********************************************************************
  3211  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  3212  Redistribution and use in source and binary forms, with or without
  3213  modification, (subject to the limitations in the disclaimer below)
  3214  are permitted provided that the following conditions are met:
  3215  - Redistributions of source code must retain the above copyright notice,
  3216  this list of conditions and the following disclaimer.
  3217  - Redistributions in binary form must reproduce the above copyright
  3218  notice, this list of conditions and the following disclaimer in the
  3219  documentation and/or other materials provided with the distribution.
  3220  - Neither the name of Skype Limited, nor the names of specific
  3221  contributors, may be used to endorse or promote products derived from
  3222  this software without specific prior written permission.
  3223  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  3224  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  3225  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  3226  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  3227  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  3228  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  3229  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  3230  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  3231  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  3232  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3233  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  3234  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3235  ***********************************************************************/
  3236  
  3237  /***********************************************************************
  3238  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  3239  Redistribution and use in source and binary forms, with or without
  3240  modification, (subject to the limitations in the disclaimer below)
  3241  are permitted provided that the following conditions are met:
  3242  - Redistributions of source code must retain the above copyright notice,
  3243  this list of conditions and the following disclaimer.
  3244  - Redistributions in binary form must reproduce the above copyright
  3245  notice, this list of conditions and the following disclaimer in the
  3246  documentation and/or other materials provided with the distribution.
  3247  - Neither the name of Skype Limited, nor the names of specific
  3248  contributors, may be used to endorse or promote products derived from
  3249  this software without specific prior written permission.
  3250  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  3251  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  3252  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  3253  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  3254  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  3255  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  3256  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  3257  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  3258  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  3259  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  3260  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  3261  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  3262  ***********************************************************************/
  3263  
  3264  /*******************/
  3265  /* Pitch estimator */
  3266  /*******************/
  3267  
  3268  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  3269  
  3270  /* Bandwidth expansion for whitening filter in pitch analysis */
  3271  
  3272  /* Threshold used by pitch estimator for early escape */
  3273  
  3274  /*********************/
  3275  /* Linear prediction */
  3276  /*********************/
  3277  
  3278  /* LPC analysis defines: regularization and bandwidth expansion */
  3279  
  3280  /* LTP analysis defines */
  3281  
  3282  /* LTP quantization settings */
  3283  
  3284  /***********************/
  3285  /* High pass filtering */
  3286  /***********************/
  3287  
  3288  /* Smoothing parameters for low end of pitch frequency range estimation */
  3289  
  3290  /* Min and max values for low end of pitch frequency range estimation */
  3291  
  3292  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  3293  
  3294  /***********/
  3295  /* Various */
  3296  /***********/
  3297  
  3298  /* Required speech activity for counting frame as active */
  3299  
  3300  /* Speech Activity LBRR enable threshold (needs tuning) */
  3301  
  3302  /*************************/
  3303  /* Perceptual parameters */
  3304  /*************************/
  3305  
  3306  /* reduction in coding SNR during low speech activity */
  3307  
  3308  /* factor for reducing quantization noise during voiced speech */
  3309  
  3310  /* factor for reducing quantization noise for unvoiced sparse signals */
  3311  
  3312  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  3313  
  3314  /* warping control */
  3315  
  3316  /* fraction added to first autocorrelation value */
  3317  
  3318  /* noise shaping filter chirp factor */
  3319  
  3320  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  3321  
  3322  /* gain reduction for fricatives */
  3323  
  3324  /* extra harmonic boosting (signal shaping) at low bitrates */
  3325  
  3326  /* extra harmonic boosting (signal shaping) for noisy input signals */
  3327  
  3328  /* harmonic noise shaping */
  3329  
  3330  /* extra harmonic noise shaping for high bitrates or noisy input */
  3331  
  3332  /* parameter for shaping noise towards higher frequencies */
  3333  
  3334  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  3335  
  3336  /* parameter for applying a high-pass tilt to the input signal */
  3337  
  3338  /* parameter for extra high-pass tilt to the input signal at high rates */
  3339  
  3340  /* parameter for reducing noise at the very low frequencies */
  3341  
  3342  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  3343  
  3344  /* noise floor to put a lower limit on the quantization step size */
  3345  
  3346  /* noise floor relative to active speech gain level */
  3347  
  3348  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  3349  
  3350  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  3351  
  3352  /* parameters defining the R/D tradeoff in the residual quantizer */
  3353  
  3354  func SKP_Silk_setup_complexity(tls *libc.TLS, psEncC uintptr, Complexity int32) int32 { /* SKP_Silk_setup_complexity.h:31:20: */
  3355  	var ret int32 = 0
  3356  
  3357  	/* Check that settings are valid */
  3358  	if (0 != 0) && (Complexity != 0) {
  3359  		ret = -6
  3360  	}
  3361  
  3362  	/* Set encoding complexity */
  3363  	if (Complexity == 0) || (0 != 0) {
  3364  		/* Low complexity */
  3365  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FComplexity = 0
  3366  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationComplexity = 0
  3367  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationThreshold_Q16 = SKP_FIX_CONST(tls, 0.8, 16)
  3368  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder = 6
  3369  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder = 8
  3370  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fla_shape = (3 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz)
  3371  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision = 1
  3372  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FuseInterpolatedNLSFs = 0
  3373  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FLTPQuantLowComplexity = 1
  3374  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FNLSF_MSVQ_Survivors = 2
  3375  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fwarping_Q16 = 0
  3376  	} else if Complexity == 1 {
  3377  		/* Medium complexity */
  3378  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FComplexity = 1
  3379  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationComplexity = 1
  3380  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationThreshold_Q16 = SKP_FIX_CONST(tls, 0.75, 16)
  3381  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder = 12
  3382  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder = 12
  3383  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fla_shape = (5 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz)
  3384  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision = 2
  3385  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FuseInterpolatedNLSFs = 0
  3386  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FLTPQuantLowComplexity = 0
  3387  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FNLSF_MSVQ_Survivors = 4
  3388  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fwarping_Q16 = ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz * SKP_FIX_CONST(tls, 0.015, 16))
  3389  	} else if Complexity == 2 {
  3390  		/* High complexity */
  3391  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FComplexity = 2
  3392  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationComplexity = 2
  3393  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationThreshold_Q16 = SKP_FIX_CONST(tls, 0.7, 16)
  3394  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder = 16
  3395  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder = 16
  3396  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fla_shape = (5 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz)
  3397  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision = 4
  3398  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FuseInterpolatedNLSFs = 1
  3399  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FLTPQuantLowComplexity = 0
  3400  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FNLSF_MSVQ_Survivors = 16
  3401  		(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fwarping_Q16 = ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz * SKP_FIX_CONST(tls, 0.015, 16))
  3402  	} else {
  3403  		ret = -6
  3404  	}
  3405  
  3406  	/* Do not allow higher pitch estimation LPC order than predict LPC order */
  3407  	(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder = SKP_min_int(tls, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpitchEstimationLPCOrder, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder)
  3408  	(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapeWinLength = ((5 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz) + (2 * (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fla_shape))
  3409  
  3410  	return ret
  3411  }
  3412  
  3413  /* Control encoder */
  3414  func SKP_Silk_control_encoder_FIX(tls *libc.TLS, psEnc uintptr, PacketSize_ms int32, TargetRate_bps int32, PacketLoss_perc int32, DTX_enabled int32, Complexity int32) int32 { /* SKP_Silk_control_codec_FIX.c:56:9: */
  3415  	var fs_kHz int32
  3416  	var ret int32 = 0
  3417  
  3418  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fcontrolled_since_last_payload != 0 {
  3419  		if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_API_fs_Hz) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz > 0) {
  3420  			/* Change in API sampling rate in the middle of encoding a packet */
  3421  			ret = ret + (SKP_Silk_setup_resamplers_FIX(tls, psEnc, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))
  3422  		}
  3423  		return ret
  3424  	}
  3425  
  3426  	/* Beyond this point we know that there are no previously coded frames in the payload buffer */
  3427  
  3428  	/********************************************/
  3429  	/* Determine internal sampling rate         */
  3430  	/********************************************/
  3431  	fs_kHz = SKP_Silk_control_audio_bandwidth(tls, (psEnc /* &.sCmn */), TargetRate_bps)
  3432  
  3433  	/********************************************/
  3434  	/* Prepare resampler and buffered data      */
  3435  	/********************************************/
  3436  	ret = ret + (SKP_Silk_setup_resamplers_FIX(tls, psEnc, fs_kHz))
  3437  
  3438  	/********************************************/
  3439  	/* Set packet size                          */
  3440  	/********************************************/
  3441  	ret = ret + (SKP_Silk_setup_packetsize_FIX(tls, psEnc, PacketSize_ms))
  3442  
  3443  	/********************************************/
  3444  	/* Set internal sampling frequency          */
  3445  	/********************************************/
  3446  	ret = ret + (SKP_Silk_setup_fs_FIX(tls, psEnc, fs_kHz))
  3447  
  3448  	/********************************************/
  3449  	/* Set encoding complexity                  */
  3450  	/********************************************/
  3451  	ret = ret + (SKP_Silk_setup_complexity(tls, (psEnc /* &.sCmn */), Complexity))
  3452  
  3453  	/********************************************/
  3454  	/* Set bitrate/coding quality               */
  3455  	/********************************************/
  3456  	ret = ret + (SKP_Silk_setup_rate_FIX(tls, psEnc, TargetRate_bps))
  3457  
  3458  	/********************************************/
  3459  	/* Set packet loss rate measured by farend  */
  3460  	/********************************************/
  3461  	if (PacketLoss_perc < 0) || (PacketLoss_perc > 100) {
  3462  		ret = -5
  3463  	}
  3464  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc = PacketLoss_perc
  3465  
  3466  	/********************************************/
  3467  	/* Set LBRR usage                           */
  3468  	/********************************************/
  3469  	ret = ret + (SKP_Silk_setup_LBRR_FIX(tls, psEnc))
  3470  
  3471  	/********************************************/
  3472  	/* Set DTX mode                             */
  3473  	/********************************************/
  3474  	if (DTX_enabled < 0) || (DTX_enabled > 1) {
  3475  		ret = -8
  3476  	}
  3477  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseDTX = DTX_enabled
  3478  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fcontrolled_since_last_payload = 1
  3479  
  3480  	return ret
  3481  }
  3482  
  3483  /* Control low bitrate redundancy usage */
  3484  func SKP_Silk_LBRR_ctrl_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrlC uintptr) { /* SKP_Silk_control_codec_FIX.c:133:6: */
  3485  	var LBRR_usage int32
  3486  
  3487  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled != 0 {
  3488  		/* Control LBRR */
  3489  
  3490  		/* Usage Control based on sensitivity and packet loss caracteristics */
  3491  		/* For now only enable adding to next for active frames. Make more complex later */
  3492  		LBRR_usage = 0
  3493  		if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 > SKP_FIX_CONST(tls, 0.5, 8)) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc > 1) { // nb! maybe multiply loss prob and speech activity
  3494  			LBRR_usage = 1
  3495  		}
  3496  		(*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FLBRR_usage = LBRR_usage
  3497  	} else {
  3498  		(*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FLBRR_usage = 0
  3499  	}
  3500  }
  3501  
  3502  func SKP_Silk_setup_resamplers_FIX(tls *libc.TLS, psEnc uintptr, fs_kHz int32) int32 { /* SKP_Silk_control_codec_FIX.c:155:20: */
  3503  	bp := tls.Alloc(13128)
  3504  	defer tls.Free(13128)
  3505  
  3506  	var ret int32 = 0
  3507  
  3508  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz != fs_kHz) || ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_API_fs_Hz != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz) {
  3509  
  3510  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 0 {
  3511  			/* Initialize the resampler for enc_API.c preparing resampling from API_fs_Hz to fs_kHz */
  3512  			ret = ret + (SKP_Silk_resampler_init(tls, (psEnc /* &.sCmn */ + 18348 /* &.resampler_state */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz, (fs_kHz * 1000)))
  3513  		} else {
  3514  			/* Allocate space for worst case temporary upsampling, 8 to 48 kHz, so a factor 6 */
  3515  			// var x_buf_API_fs_Hz [6480]int16 at bp+168, 12960
  3516  
  3517  			var nSamples_temp int32 = ((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length) << (1)) + (5 * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))
  3518  
  3519  			if (((int32(int16(fs_kHz))) * (int32(int16(1000)))) < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz != 0) {
  3520  				/* Resample buffered data in x_buf to API_fs_Hz */
  3521  
  3522  				// var temp_resampler_state SKP_Silk_resampler_state_struct at bp, 168
  3523  
  3524  				/* Initialize resampler for temporary resampling of x_buf data to API_fs_Hz */
  3525  				ret = ret + (SKP_Silk_resampler_init(tls, bp /* &temp_resampler_state */, ((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))) * (int32(int16(1000)))), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz))
  3526  
  3527  				/* Temporary resampling of x_buf data to API_fs_Hz */
  3528  				ret = ret + (SKP_Silk_resampler(tls, bp /* &temp_resampler_state */, bp+168 /* &x_buf_API_fs_Hz[0] */, psEnc+20748 /* &.x_buf */, nSamples_temp))
  3529  
  3530  				/* Calculate number of samples that has been temporarily upsampled */
  3531  				nSamples_temp = ((nSamples_temp * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz) / ((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))) * (int32(int16(1000)))))
  3532  
  3533  				/* Initialize the resampler for enc_API.c preparing resampling from API_fs_Hz to fs_kHz */
  3534  				ret = ret + (SKP_Silk_resampler_init(tls, (psEnc /* &.sCmn */ + 18348 /* &.resampler_state */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz, ((int32(int16(fs_kHz))) * (int32(int16(1000))))))
  3535  
  3536  			} else {
  3537  				/* Copy data */
  3538  				libc.Xmemcpy(tls, bp+168 /* &x_buf_API_fs_Hz[0] */, psEnc+20748 /* &.x_buf */, (uint32(nSamples_temp) * uint32(unsafe.Sizeof(int16(0)))))
  3539  			}
  3540  
  3541  			if (1000 * fs_kHz) != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz {
  3542  				/* Correct resampler state (unless resampling by a factor 1) by resampling buffered data from API_fs_Hz to fs_kHz */
  3543  				ret = ret + (SKP_Silk_resampler(tls, (psEnc /* &.sCmn */ + 18348 /* &.resampler_state */), psEnc+20748 /* &.x_buf */, bp+168 /* &x_buf_API_fs_Hz[0] */, nSamples_temp))
  3544  			}
  3545  		}
  3546  	}
  3547  
  3548  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_API_fs_Hz = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz
  3549  
  3550  	return ret
  3551  }
  3552  
  3553  func SKP_Silk_setup_packetsize_FIX(tls *libc.TLS, psEnc uintptr, PacketSize_ms int32) int32 { /* SKP_Silk_control_codec_FIX.c:207:20: */
  3554  	var ret int32 = 0
  3555  
  3556  	/* Set packet size */
  3557  	if ((((PacketSize_ms != 20) && (PacketSize_ms != 40)) && (PacketSize_ms != 60)) && (PacketSize_ms != 80)) && (PacketSize_ms != 100) {
  3558  		ret = -3
  3559  	} else {
  3560  		if PacketSize_ms != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms {
  3561  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms = PacketSize_ms
  3562  
  3563  			/* Packet length changes. Reset LBRR buffer */
  3564  			SKP_Silk_LBRR_reset(tls, (psEnc /* &.sCmn */))
  3565  		}
  3566  	}
  3567  	return ret
  3568  }
  3569  
  3570  func SKP_Silk_setup_fs_FIX(tls *libc.TLS, psEnc uintptr, fs_kHz int32) int32 { /* SKP_Silk_control_codec_FIX.c:232:20: */
  3571  	var ret int32 = 0
  3572  
  3573  	/* Set internal sampling frequency */
  3574  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz != fs_kHz {
  3575  		/* reset part of the state */
  3576  		libc.Xmemset(tls, (psEnc + 19540 /* &.sShape */), 0, uint32(unsafe.Sizeof(SKP_Silk_shape_state_FIX{})))
  3577  		libc.Xmemset(tls, (psEnc + 19556 /* &.sPrefilt */), 0, uint32(unsafe.Sizeof(SKP_Silk_prefilter_state_FIX{})))
  3578  		libc.Xmemset(tls, (psEnc + 20672 /* &.sPred */), 0, uint32(unsafe.Sizeof(SKP_Silk_predict_state_FIX{})))
  3579  		libc.Xmemset(tls, (psEnc /* &.sCmn */ + 2088 /* &.sNSQ */), 0, uint32(unsafe.Sizeof(SKP_Silk_nsq_state{})))
  3580  		libc.Xmemset(tls, psEnc /* &.sCmn */ +8548 /* &.sNSQ_LBRR */ /* &.xq */, 0, ((uint32(2 * (20 * 24))) * uint32(unsafe.Sizeof(int16(0)))))
  3581  		libc.Xmemset(tls, psEnc /* &.sCmn */ +16256 /* &.LBRR_buffer */, 0, (uint32(2) * uint32(unsafe.Sizeof(SKP_SILK_LBRR_struct{}))))
  3582  		libc.Xmemset(tls, psEnc /* &.sCmn */ +15016 /* &.sLP */ /* &.In_LP_State */, 0, (uint32(2) * uint32(unsafe.Sizeof(int32(0)))))
  3583  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsLP.Fmode == 1 {
  3584  			/* Begin transition phase */
  3585  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsLP.Ftransition_frame_no = 1
  3586  		} else {
  3587  			/* End transition phase */
  3588  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsLP.Ftransition_frame_no = 0
  3589  		}
  3590  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx = 0
  3591  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf = 0
  3592  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf = 0
  3593  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx = 0
  3594  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps = 0 /* Ensures that psEnc->SNR_dB is recomputed */
  3595  
  3596  		libc.Xmemset(tls, psEnc+20672 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, 0, (uint32(16) * uint32(unsafe.Sizeof(int32(0)))))
  3597  
  3598  		/* Initialize non-zero parameters */
  3599  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FprevLag = 100
  3600  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_sigtype = 1
  3601  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset = 1
  3602  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsPrefilt.FlagPrev = 100
  3603  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsShape.FLastGainIndex = 1
  3604  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ.FlagPrev = 100
  3605  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ.Fprev_inv_gain_Q16 = 65536
  3606  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ_LBRR.Fprev_inv_gain_Q16 = 65536
  3607  
  3608  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz = fs_kHz
  3609  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 8 {
  3610  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder = 10
  3611  			*(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */))) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10))
  3612  			*(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */) + 1*4)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10))
  3613  		} else {
  3614  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder = 16
  3615  			*(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */))) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16))
  3616  			*(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */) + 1*4)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16))
  3617  		}
  3618  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length = ((int32(int16(20))) * (int32(int16(fs_kHz))))
  3619  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length) / (4))
  3620  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch = ((int32(int16(2))) * (int32(int16(fs_kHz))))
  3621  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsPred.Fmin_pitch_lag = ((int32(int16(3))) * (int32(int16(fs_kHz))))
  3622  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsPred.Fmax_pitch_lag = ((int32(int16(18))) * (int32(int16(fs_kHz))))
  3623  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsPred.Fpitch_LPC_win_length = ((int32((int16(20 + (int32(2) << 1))))) * (int32(int16(fs_kHz))))
  3624  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 24 {
  3625  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8 = SKP_FIX_CONST(tls, 0.016, 8)
  3626  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_up = 0x7FFFFFFF
  3627  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_down = 25000
  3628  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
  3629  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8 = SKP_FIX_CONST(tls, 0.02, 8)
  3630  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_up = 30000
  3631  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_down = 14000
  3632  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 12 {
  3633  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8 = SKP_FIX_CONST(tls, 0.025, 8)
  3634  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_up = 18000
  3635  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_down = 10000
  3636  		} else {
  3637  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8 = SKP_FIX_CONST(tls, 0.03, 8)
  3638  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_up = 14000
  3639  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fbitrate_threshold_down = 0
  3640  		}
  3641  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz_changed = 1
  3642  
  3643  		/* Check that settings are valid */
  3644  
  3645  	}
  3646  	return ret
  3647  }
  3648  
  3649  func SKP_Silk_setup_rate_FIX(tls *libc.TLS, psEnc uintptr, TargetRate_bps int32) int32 { /* SKP_Silk_control_codec_FIX.c:317:20: */
  3650  	var k int32
  3651  	var ret int32 = 0
  3652  	var frac_Q6 int32
  3653  	var rateTable uintptr
  3654  
  3655  	/* Set bitrate/coding quality */
  3656  	if TargetRate_bps != (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps {
  3657  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps = TargetRate_bps
  3658  
  3659  		/* If new TargetRate_bps, translate to SNR_dB value */
  3660  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 8 {
  3661  			rateTable = uintptr(unsafe.Pointer(&TargetRate_table_NB))
  3662  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 12 {
  3663  			rateTable = uintptr(unsafe.Pointer(&TargetRate_table_MB))
  3664  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
  3665  			rateTable = uintptr(unsafe.Pointer(&TargetRate_table_WB))
  3666  		} else {
  3667  			rateTable = uintptr(unsafe.Pointer(&TargetRate_table_SWB))
  3668  		}
  3669  		for k = 1; k < 8; k++ {
  3670  			/* Find bitrate interval in table and interpolate */
  3671  			if TargetRate_bps <= *(*int32)(unsafe.Pointer(rateTable + uintptr(k)*4)) {
  3672  				frac_Q6 = (((TargetRate_bps - *(*int32)(unsafe.Pointer(rateTable + uintptr((k-1))*4))) << (6)) / (*(*int32)(unsafe.Pointer(rateTable + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(rateTable + uintptr((k-1))*4))))
  3673  				(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FSNR_dB_Q7 = (((SNR_table_Q1[(k - 1)]) << (6)) + ((frac_Q6) * (SNR_table_Q1[k] - SNR_table_Q1[(k-1)])))
  3674  				break
  3675  			}
  3676  		}
  3677  	}
  3678  	return ret
  3679  }
  3680  
  3681  func SKP_Silk_setup_LBRR_FIX(tls *libc.TLS, psEnc uintptr) int32 { /* SKP_Silk_control_codec_FIX.c:353:20: */
  3682  	var ret int32 = 0
  3683  	var LBRRRate_thres_bps int32
  3684  
  3685  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC < 0) || ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC > 1) {
  3686  		ret = -7
  3687  	}
  3688  
  3689  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC
  3690  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 8 {
  3691  		LBRRRate_thres_bps = (18000 - 9000)
  3692  	} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 12 {
  3693  		LBRRRate_thres_bps = (18000 - 6000)
  3694  
  3695  	} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
  3696  		LBRRRate_thres_bps = (18000 - 3000)
  3697  	} else {
  3698  		LBRRRate_thres_bps = 18000
  3699  	}
  3700  
  3701  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps >= LBRRRate_thres_bps {
  3702  		/* Set gain increase / rate reduction for LBRR usage */
  3703  		/* Coarsely tuned with PESQ for now. */
  3704  		/* Linear regression coefs G = 8 - 0.5 * loss */
  3705  		/* Meaning that at 16% loss main rate and redundant rate is the same, -> G = 0 */
  3706  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_GainIncreases = SKP_max_int(tls, (8 - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc) >> (1))), 0)
  3707  
  3708  		/* Set main stream rate compensation */
  3709  		if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled != 0) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc > 1) {
  3710  			/* Tuned to give approx same mean / weighted bitrate as no inband FEC */
  3711  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FinBandFEC_SNR_comp_Q8 = (SKP_FIX_CONST(tls, 6.0, 8) - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_GainIncreases) << (7)))
  3712  		} else {
  3713  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FinBandFEC_SNR_comp_Q8 = 0
  3714  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled = 0
  3715  		}
  3716  	} else {
  3717  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FinBandFEC_SNR_comp_Q8 = 0
  3718  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled = 0
  3719  	}
  3720  	return ret
  3721  }
  3722  
  3723  /* Calculates correlation vector X'*t */
  3724  func SKP_Silk_corrVector_FIX(tls *libc.TLS, x uintptr, t uintptr, L int32, order int32, Xt uintptr, rshifts int32) { /* SKP_Silk_corrMatrix_FIX.c:35:6: */
  3725  	var lag int32
  3726  	var i int32
  3727  	var ptr1 uintptr
  3728  	var ptr2 uintptr
  3729  	var inner_prod int32
  3730  
  3731  	ptr1 = (x + uintptr((order-1))*2) /* Points to first sample of column 0 of X: X[:,0] */
  3732  	ptr2 = t
  3733  	/* Calculate X'*t */
  3734  	if rshifts > 0 {
  3735  		/* Right shifting used */
  3736  		for lag = 0; lag < order; lag++ {
  3737  			inner_prod = 0
  3738  			for i = 0; i < L; i++ {
  3739  				inner_prod = inner_prod + (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr(i)*2))))) >> (rshifts))
  3740  			}
  3741  			*(*int32)(unsafe.Pointer(Xt + uintptr(lag)*4)) = inner_prod /* X[:,lag]'*t */
  3742  			ptr1 -= 2                                                   /* Go to next column of X */
  3743  		}
  3744  	} else {
  3745  
  3746  		for lag = 0; lag < order; lag++ {
  3747  			*(*int32)(unsafe.Pointer(Xt + uintptr(lag)*4)) = SKP_Silk_inner_prod_aligned(tls, ptr1, ptr2, L) /* X[:,lag]'*t */
  3748  			ptr1 -= 2                                                                                        /* Go to next column of X */
  3749  		}
  3750  	}
  3751  }
  3752  
  3753  /* Calculates correlation matrix X'*X */
  3754  func SKP_Silk_corrMatrix_FIX(tls *libc.TLS, x uintptr, L int32, order int32, head_room int32, XX uintptr, rshifts uintptr) { /* SKP_Silk_corrMatrix_FIX.c:71:6: */
  3755  	bp := tls.Alloc(8)
  3756  	defer tls.Free(8)
  3757  
  3758  	var i int32
  3759  	var j int32
  3760  	var lag int32
  3761  	// var rshifts_local int32 at bp+4, 4
  3762  
  3763  	var head_room_rshifts int32
  3764  	// var energy int32 at bp, 4
  3765  
  3766  	var ptr1 uintptr
  3767  	var ptr2 uintptr
  3768  
  3769  	/* Calculate energy to find shift used to fit in 32 bits */
  3770  	SKP_Silk_sum_sqr_shift(tls, bp /* &energy */, bp+4 /* &rshifts_local */, x, ((L + order) - 1))
  3771  
  3772  	/* Add shifts to get the desired head room */
  3773  	head_room_rshifts = func() int32 {
  3774  		if (head_room - SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp /* energy */)))) > (0) {
  3775  			return (head_room - SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp /* energy */))))
  3776  		}
  3777  		return 0
  3778  	}()
  3779  
  3780  	*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) >> (head_room_rshifts))
  3781  	*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)) += head_room_rshifts
  3782  
  3783  	/* Calculate energy of first column (0) of X: X[:,0]'*X[:,0] */
  3784  	/* Remove contribution of first order - 1 samples */
  3785  	for i = 0; i < (order - 1); i++ {
  3786  		*(*int32)(unsafe.Pointer(bp /* energy */)) -= (((int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */))))
  3787  	}
  3788  	if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)) < *(*int32)(unsafe.Pointer(rshifts)) {
  3789  		/* Adjust energy */
  3790  		*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) >> (*(*int32)(unsafe.Pointer(rshifts)) - *(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */))))
  3791  		*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)) = *(*int32)(unsafe.Pointer(rshifts))
  3792  	}
  3793  
  3794  	/* Calculate energy of remaining columns of X: X[:,j]'*X[:,j] */
  3795  	/* Fill out the diagonal of the correlation matrix */
  3796  	*(*int32)(unsafe.Pointer((XX + uintptr((((0)*(order))+(0)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3797  	ptr1 = (x + uintptr((order-1))*2) /* First sample of column 0 of X */
  3798  	for j = 1; j < order; j++ {
  3799  		*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) - (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr((L-j))*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr((L-j))*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)))))
  3800  		*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) + (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(-j)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(-j)*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)))))
  3801  		*(*int32)(unsafe.Pointer((XX + uintptr((((j)*(order))+(j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3802  	}
  3803  
  3804  	ptr2 = (x + uintptr((order-2))*2) /* First sample of column 1 of X */
  3805  	/* Calculate the remaining elements of the correlation matrix */
  3806  	if *(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)) > 0 {
  3807  		/* Right shifting used */
  3808  		for lag = 1; lag < order; lag++ {
  3809  			/* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */
  3810  			*(*int32)(unsafe.Pointer(bp /* energy */)) = 0
  3811  			for i = 0; i < L; i++ {
  3812  				*(*int32)(unsafe.Pointer(bp /* energy */)) += (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr(i)*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */))))
  3813  			}
  3814  			/* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */
  3815  			*(*int32)(unsafe.Pointer((XX + uintptr((((lag)*(order))+(0)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3816  			*(*int32)(unsafe.Pointer((XX + uintptr((((0)*(order))+(lag)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3817  			for j = 1; j < (order - lag); j++ {
  3818  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) - (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr((L-j))*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr((L-j))*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)))))
  3819  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) + (((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(-j)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr(-j)*2))))) >> (*(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */)))))
  3820  				*(*int32)(unsafe.Pointer((XX + uintptr((((lag+j)*(order))+(j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3821  				*(*int32)(unsafe.Pointer((XX + uintptr((((j)*(order))+(lag+j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3822  			}
  3823  			ptr2 -= 2 /* Update pointer to first sample of next column (lag) in X */
  3824  		}
  3825  	} else {
  3826  		for lag = 1; lag < order; lag++ {
  3827  			/* Inner product of column 0 and column lag: X[:,0]'*X[:,lag] */
  3828  			*(*int32)(unsafe.Pointer(bp /* energy */)) = SKP_Silk_inner_prod_aligned(tls, ptr1, ptr2, L)
  3829  			*(*int32)(unsafe.Pointer((XX + uintptr((((lag)*(order))+(0)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3830  			*(*int32)(unsafe.Pointer((XX + uintptr((((0)*(order))+(lag)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3831  			/* Calculate remaining off diagonal: X[:,j]'*X[:,j + lag] */
  3832  			for j = 1; j < (order - lag); j++ {
  3833  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) - ((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr((L-j))*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr((L-j))*2))))))
  3834  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) + ((int32(*(*int16)(unsafe.Pointer(ptr1 + uintptr(-j)*2)))) * (int32(*(*int16)(unsafe.Pointer(ptr2 + uintptr(-j)*2))))))
  3835  				*(*int32)(unsafe.Pointer((XX + uintptr((((lag+j)*(order))+(j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3836  				*(*int32)(unsafe.Pointer((XX + uintptr((((j)*(order))+(lag+j)))*4))) = *(*int32)(unsafe.Pointer(bp /* energy */))
  3837  			}
  3838  			ptr2 -= 2 /* Update pointer to first sample of next column (lag) in X */
  3839  		}
  3840  	}
  3841  	*(*int32)(unsafe.Pointer(rshifts)) = *(*int32)(unsafe.Pointer(bp + 4 /* rshifts_local */))
  3842  }
  3843  
  3844  /************************/
  3845  /* Init Decoder State   */
  3846  /************************/
  3847  func SKP_Silk_init_decoder(tls *libc.TLS, psDec uintptr) int32 { /* SKP_Silk_create_init_destroy.c:34:9: */
  3848  	libc.Xmemset(tls, psDec, 0, uint32(unsafe.Sizeof(SKP_Silk_decoder_state{})))
  3849  	/* Set sampling rate to 24 kHz, and init non-zero values */
  3850  	SKP_Silk_decoder_set_fs(tls, psDec, 24)
  3851  
  3852  	/* Used to deactivate e.g. LSF interpolation and fluctuation reduction */
  3853  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffirst_frame_after_reset = 1
  3854  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_inv_gain_Q16 = 65536
  3855  
  3856  	/* Reset CNG state */
  3857  	SKP_Silk_CNG_Reset(tls, psDec)
  3858  
  3859  	SKP_Silk_PLC_Reset(tls, psDec)
  3860  
  3861  	return 0
  3862  }
  3863  
  3864  /* Set decoder sampling rate */
  3865  func SKP_Silk_decoder_set_fs(tls *libc.TLS, psDec uintptr, fs_kHz int32) { /* SKP_Silk_decoder_set_fs.c:31:6: */
  3866  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz != fs_kHz {
  3867  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz = fs_kHz
  3868  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length = ((int32(int16(20))) * (int32(int16(fs_kHz))))
  3869  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length = ((int32((int16(20 / 4)))) * (int32(int16(fs_kHz))))
  3870  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 8 {
  3871  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order = 10
  3872  			*(*uintptr)(unsafe.Pointer((psDec + 11500 /* &.psNLSF_CB */))) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10))
  3873  			*(*uintptr)(unsafe.Pointer((psDec + 11500 /* &.psNLSF_CB */) + 1*4)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10))
  3874  		} else {
  3875  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order = 16
  3876  			*(*uintptr)(unsafe.Pointer((psDec + 11500 /* &.psNLSF_CB */))) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16))
  3877  			*(*uintptr)(unsafe.Pointer((psDec + 11500 /* &.psNLSF_CB */) + 1*4)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16))
  3878  		}
  3879  		/* Reset part of the decoder state */
  3880  		libc.Xmemset(tls, psDec+4888 /* &.sLPC_Q14 */, 0, (uint32(16) * uint32(unsafe.Sizeof(int32(0)))))
  3881  		libc.Xmemset(tls, psDec+9272 /* &.outBuf */, 0, ((uint32(20 * 24)) * uint32(unsafe.Sizeof(int16(0)))))
  3882  		libc.Xmemset(tls, psDec+11244 /* &.prevNLSF_Q15 */, 0, (uint32(16) * uint32(unsafe.Sizeof(int32(0)))))
  3883  
  3884  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlagPrev = 100
  3885  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLastGainIndex = 1
  3886  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype = 0
  3887  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffirst_frame_after_reset = 1
  3888  
  3889  		if fs_kHz == 24 {
  3890  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A = uintptr(unsafe.Pointer(&SKP_Silk_Dec_A_HP_24))
  3891  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B = uintptr(unsafe.Pointer(&SKP_Silk_Dec_B_HP_24))
  3892  		} else if fs_kHz == 16 {
  3893  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A = uintptr(unsafe.Pointer(&SKP_Silk_Dec_A_HP_16))
  3894  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B = uintptr(unsafe.Pointer(&SKP_Silk_Dec_B_HP_16))
  3895  		} else if fs_kHz == 12 {
  3896  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A = uintptr(unsafe.Pointer(&SKP_Silk_Dec_A_HP_12))
  3897  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B = uintptr(unsafe.Pointer(&SKP_Silk_Dec_B_HP_12))
  3898  		} else if fs_kHz == 8 {
  3899  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A = uintptr(unsafe.Pointer(&SKP_Silk_Dec_A_HP_8))
  3900  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B = uintptr(unsafe.Pointer(&SKP_Silk_Dec_B_HP_8))
  3901  		} else {
  3902  			/* unsupported sampling rate */
  3903  
  3904  		}
  3905  	}
  3906  
  3907  	/* Check that settings are valid */
  3908  
  3909  }
  3910  
  3911  /**********************************************************/
  3912  /* Core decoder. Performs inverse NSQ operation LTP + LPC */
  3913  /**********************************************************/
  3914  func SKP_Silk_decode_core(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, xq uintptr, q uintptr) { /* SKP_Silk_decode_core.c:44:6: */
  3915  	bp := tls.Alloc(1536)
  3916  	defer tls.Free(1536)
  3917  
  3918  	var i int32
  3919  	var k int32
  3920  	var lag int32 = 0
  3921  	var start_idx int32
  3922  	var sLTP_buf_idx int32
  3923  	var NLSF_interpolation_flag int32
  3924  	var sigtype int32
  3925  	var A_Q12 uintptr
  3926  	var B_Q14 uintptr
  3927  	var pxq uintptr
  3928  	// var A_Q12_tmp [16]int16 at bp, 32
  3929  
  3930  	// var sLTP [480]int16 at bp+96, 960
  3931  
  3932  	var LTP_pred_Q14 int32
  3933  	var Gain_Q16 int32
  3934  	var inv_gain_Q16 int32
  3935  	var inv_gain_Q32 int32
  3936  	var gain_adj_Q16 int32
  3937  	var rand_seed int32
  3938  	var offset_Q10 int32
  3939  	var dither int32
  3940  	var pred_lag_ptr uintptr
  3941  	var pexc_Q10 uintptr
  3942  	var pres_Q10 uintptr
  3943  	// var vec_Q10 [120]int32 at bp+1056, 480
  3944  
  3945  	// var FiltState [16]int32 at bp+32, 64
  3946  
  3947  	offset_Q10 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Quantization_Offsets_Q10)) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype)*4) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FQuantOffsetType)*2)))
  3948  
  3949  	if (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FNLSFInterpCoef_Q2 < (int32(1) << 2) {
  3950  		NLSF_interpolation_flag = 1
  3951  	} else {
  3952  		NLSF_interpolation_flag = 0
  3953  	}
  3954  
  3955  	/* Decode excitation */
  3956  	rand_seed = (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FSeed
  3957  	for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length; i++ {
  3958  		rand_seed = (int32((uint32(907633515)) + ((uint32(rand_seed)) * (uint32(196314165)))))
  3959  		/* dither = rand_seed < 0 ? 0xFFFFFFFF : 0; */
  3960  		dither = ((rand_seed) >> (31))
  3961  
  3962  		*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr(i)*4)) = (((*(*int32)(unsafe.Pointer(q + uintptr(i)*4))) << (10)) + offset_Q10)
  3963  		*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr(i)*4)) ^ dither) - dither)
  3964  
  3965  		rand_seed = rand_seed + (*(*int32)(unsafe.Pointer(q + uintptr(i)*4)))
  3966  	}
  3967  
  3968  	pexc_Q10 = psDec + 5432 /* &.exc_Q10 */
  3969  	pres_Q10 = psDec + 7352 /* &.res_Q10 */
  3970  	pxq = ((psDec + 9272 /* &.outBuf */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length)*2)
  3971  	sLTP_buf_idx = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length
  3972  	/* Loop over subframes */
  3973  	for k = 0; k < 4; k++ {
  3974  		A_Q12 = psDecCtrl + 36 /* &.PredCoef_Q12 */ + uintptr((k>>1))*32
  3975  
  3976  		/* Preload LPC coeficients to array on stack. Gives small performance gain */
  3977  		libc.Xmemcpy(tls, bp /* &A_Q12_tmp[0] */, A_Q12, (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint32(unsafe.Sizeof(int16(0)))))
  3978  		B_Q14 = ((psDecCtrl + 100 /* &.LTPCoef_Q14 */) + uintptr((k*5))*2)
  3979  		Gain_Q16 = *(*int32)(unsafe.Pointer((psDecCtrl + 16 /* &.Gains_Q16 */) + uintptr(k)*4))
  3980  		sigtype = (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype
  3981  
  3982  		inv_gain_Q16 = SKP_INVERSE32_varQ(tls, func() int32 {
  3983  			if (Gain_Q16) > (1) {
  3984  				return Gain_Q16
  3985  			}
  3986  			return 1
  3987  		}(), 32)
  3988  		inv_gain_Q16 = func() int32 {
  3989  			if (inv_gain_Q16) < (0x7FFF) {
  3990  				return inv_gain_Q16
  3991  			}
  3992  			return 0x7FFF
  3993  		}()
  3994  
  3995  		/* Calculate Gain adjustment factor */
  3996  		gain_adj_Q16 = (int32(1) << 16)
  3997  		if inv_gain_Q16 != (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_inv_gain_Q16 {
  3998  			gain_adj_Q16 = SKP_DIV32_varQ(tls, inv_gain_Q16, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_inv_gain_Q16, 16)
  3999  		}
  4000  
  4001  		/* Avoid abrupt transition from voiced PLC to unvoiced normal decoding */
  4002  		if ((((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt != 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype == 0)) && ((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype == 1)) && (k < (int32(4) >> 1)) {
  4003  
  4004  			libc.Xmemset(tls, B_Q14, 0, (uint32(5) * uint32(unsafe.Sizeof(int16(0)))))
  4005  			*(*int16)(unsafe.Pointer(B_Q14 + 2*2)) = (int16(int32(int32(int16(1))) << 12)) /* 0.25 */
  4006  
  4007  			sigtype = 0
  4008  			*(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + uintptr(k)*4)) = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlagPrev
  4009  		}
  4010  
  4011  		if sigtype == 0 {
  4012  			/* Voiced */
  4013  
  4014  			lag = *(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + uintptr(k)*4))
  4015  			/* Re-whitening */
  4016  			if (k & (3 - ((NLSF_interpolation_flag) << (1)))) == 0 {
  4017  				/* Rewhiten with new A coefs */
  4018  				start_idx = ((((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length - lag) - (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) - (5 / 2))
  4019  
  4020  				libc.Xmemset(tls, bp+32 /* &FiltState[0] */, 0, (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint32(unsafe.Sizeof(int32(0))))) /* Not really necessary, but Valgrind and Coverity will complain otherwise */
  4021  				SKP_Silk_MA_Prediction(tls, ((psDec + 9272 /* &.outBuf */) + uintptr((start_idx+(k*((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length>>2))))*2),
  4022  					A_Q12, bp+32 /* &FiltState[0] */, (bp + 96 /* &sLTP[0] */ + uintptr(start_idx)*2), ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length - start_idx), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  4023  
  4024  				/* After rewhitening the LTP state is unscaled */
  4025  				inv_gain_Q32 = ((inv_gain_Q16) << (16))
  4026  				if k == 0 {
  4027  					/* Do LTP downscaling */
  4028  					inv_gain_Q32 = (((((inv_gain_Q32) >> 16) * (int32(int16((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14)))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(int16((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14)))) >> 16)) << (2))
  4029  				}
  4030  				for i = 0; i < (lag + (5 / 2)); i++ {
  4031  					*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4)) = ((((inv_gain_Q32) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp + 96 /* &sLTP[0] */ + uintptr((((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length-i)-1))*2))))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp + 96 /* &sLTP[0] */ + uintptr((((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length-i)-1))*2))))) >> 16))
  4032  				}
  4033  			} else {
  4034  				/* Update LTP state when Gain changes */
  4035  				if gain_adj_Q16 != (int32(1) << 16) {
  4036  					for i = 0; i < (lag + (5 / 2)); i++ {
  4037  						*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
  4038  							if (16) == 1 {
  4039  								return (((*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4))) & 1))
  4040  							}
  4041  							return ((((*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-i)-1))*4))) >> ((16) - 1)) + 1) >> 1)
  4042  						}())))
  4043  					}
  4044  				}
  4045  			}
  4046  		}
  4047  
  4048  		/* Scale short term state */
  4049  		for i = 0; i < 16; i++ {
  4050  			*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
  4051  				if (16) == 1 {
  4052  					return (((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4))) & 1))
  4053  				}
  4054  				return ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  4055  			}())))
  4056  		}
  4057  
  4058  		/* Save inv_gain */
  4059  
  4060  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_inv_gain_Q16 = inv_gain_Q16
  4061  
  4062  		/* Long-term prediction */
  4063  		if sigtype == 0 {
  4064  			/* Setup pointer */
  4065  			pred_lag_ptr = ((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-lag)+(5/2)))*4)
  4066  			for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
  4067  				/* Unrolled loop */
  4068  				LTP_pred_Q14 = ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14))))) >> 16))
  4069  				LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 1*2))))) >> 16)))
  4070  				LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 2*2))))) >> 16)))
  4071  				LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 3*2))))) >> 16)))
  4072  				LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 4*2))))) >> 16)))
  4073  				pred_lag_ptr += 4
  4074  
  4075  				/* Generate LPC residual */
  4076  				*(*int32)(unsafe.Pointer(pres_Q10 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(pexc_Q10 + uintptr(i)*4))) + (func() int32 {
  4077  					if (4) == 1 {
  4078  						return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
  4079  					}
  4080  					return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
  4081  				}()))
  4082  
  4083  				/* Update states */
  4084  				*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(sLTP_buf_idx)*4)) = ((*(*int32)(unsafe.Pointer(pres_Q10 + uintptr(i)*4))) << (6))
  4085  				sLTP_buf_idx++
  4086  			}
  4087  		} else {
  4088  			libc.Xmemcpy(tls, pres_Q10, pexc_Q10, (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length) * uint32(unsafe.Sizeof(int32(0)))))
  4089  		}
  4090  
  4091  		SKP_Silk_decode_short_term_prediction(tls, bp+1056 /* &vec_Q10[0] */, pres_Q10, psDec+4888 /* &.sLPC_Q14 */, bp /* &A_Q12_tmp[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)
  4092  
  4093  		/* Scale with Gain */
  4094  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
  4095  			*(*int16)(unsafe.Pointer(pxq + uintptr(i)*2)) = func() int16 {
  4096  				if (func() int32 {
  4097  					if (10) == 1 {
  4098  						return (((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4099  							if (16) == 1 {
  4100  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4101  							}
  4102  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4103  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4104  							if (16) == 1 {
  4105  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4106  							}
  4107  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4108  						}()))) & 1))
  4109  					}
  4110  					return ((((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4111  						if (16) == 1 {
  4112  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4113  						}
  4114  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4115  					}()))) >> ((10) - 1)) + 1) >> 1)
  4116  				}()) > 0x7FFF {
  4117  					return int16(0x7FFF)
  4118  				}
  4119  				return func() int16 {
  4120  					if (func() int32 {
  4121  						if (10) == 1 {
  4122  							return (((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4123  								if (16) == 1 {
  4124  									return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4125  								}
  4126  								return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4127  							}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4128  								if (16) == 1 {
  4129  									return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4130  								}
  4131  								return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4132  							}()))) & 1))
  4133  						}
  4134  						return ((((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4135  							if (16) == 1 {
  4136  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4137  							}
  4138  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4139  						}()))) >> ((10) - 1)) + 1) >> 1)
  4140  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
  4141  						return libc.Int16FromInt32(0x8000)
  4142  					}
  4143  					return func() int16 {
  4144  						if (10) == 1 {
  4145  							return (int16(((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4146  								if (16) == 1 {
  4147  									return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4148  								}
  4149  								return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4150  							}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4151  								if (16) == 1 {
  4152  									return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4153  								}
  4154  								return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4155  							}()))) & 1)))
  4156  						}
  4157  						return (int16((((((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(Gain_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 1056 /* &vec_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
  4158  							if (16) == 1 {
  4159  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
  4160  							}
  4161  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
  4162  						}()))) >> ((10) - 1)) + 1) >> 1))
  4163  					}()
  4164  				}()
  4165  			}()
  4166  		}
  4167  
  4168  		/* Update LPC filter state */
  4169  		libc.Xmemcpy(tls, psDec+4888 /* &.sLPC_Q14 */, ((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)*4), (uint32(16) * uint32(unsafe.Sizeof(int32(0)))))
  4170  		pexc_Q10 += 4 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
  4171  		pres_Q10 += 4 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
  4172  		pxq += 2 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
  4173  	}
  4174  
  4175  	/* Copy to output */
  4176  	libc.Xmemcpy(tls, xq, ((psDec + 9272 /* &.outBuf */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length)*2), (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length) * uint32(unsafe.Sizeof(int16(0)))))
  4177  
  4178  }
  4179  
  4180  func SKP_Silk_decode_short_term_prediction(tls *libc.TLS, vec_Q10 uintptr, pres_Q10 uintptr, sLPC_Q14 uintptr, A_Q12_tmp uintptr, LPC_order int32, subfr_length int32) { /* SKP_Silk_decode_core.c:204:6: */
  4181  	var i int32
  4182  	var LPC_pred_Q10 int32
  4183  	var j int32
  4184  	for i = 0; i < subfr_length; i++ {
  4185  		/* Partially unrolled */
  4186  		LPC_pred_Q10 = ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-1))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-1))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp))))) >> 16))
  4187  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-2))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 1*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-2))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 1*2))))) >> 16)))
  4188  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-3))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 2*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-3))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 2*2))))) >> 16)))
  4189  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-4))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 3*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-4))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 3*2))))) >> 16)))
  4190  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-5))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 4*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-5))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 4*2))))) >> 16)))
  4191  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-6))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 5*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-6))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 5*2))))) >> 16)))
  4192  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-7))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 6*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-7))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 6*2))))) >> 16)))
  4193  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-8))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 7*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-8))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 7*2))))) >> 16)))
  4194  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-9))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 8*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-9))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 8*2))))) >> 16)))
  4195  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-10))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 9*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr(((16+i)-10))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + 9*2))))) >> 16)))
  4196  
  4197  		for j = 10; j < LPC_order; j++ {
  4198  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr((((16+i)-j)-1))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + uintptr(j)*2))))) + ((((*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr((((16+i)-j)-1))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12_tmp + uintptr(j)*2))))) >> 16)))
  4199  		}
  4200  
  4201  		/* Add prediction to LPC residual */
  4202  		*(*int32)(unsafe.Pointer(vec_Q10 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(pres_Q10 + uintptr(i)*4))) + (LPC_pred_Q10))
  4203  
  4204  		/* Update states */
  4205  		*(*int32)(unsafe.Pointer(sLPC_Q14 + uintptr((16+i))*4)) = ((*(*int32)(unsafe.Pointer(vec_Q10 + uintptr(i)*4))) << (4))
  4206  	}
  4207  }
  4208  
  4209  /***********************************************************************
  4210  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  4211  Redistribution and use in source and binary forms, with or without
  4212  modification, (subject to the limitations in the disclaimer below)
  4213  are permitted provided that the following conditions are met:
  4214  - Redistributions of source code must retain the above copyright notice,
  4215  this list of conditions and the following disclaimer.
  4216  - Redistributions in binary form must reproduce the above copyright
  4217  notice, this list of conditions and the following disclaimer in the
  4218  documentation and/or other materials provided with the distribution.
  4219  - Neither the name of Skype Limited, nor the names of specific
  4220  contributors, may be used to endorse or promote products derived from
  4221  this software without specific prior written permission.
  4222  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  4223  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  4224  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  4225  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  4226  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  4227  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  4228  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  4229  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  4230  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  4231  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  4232  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  4233  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  4234  ***********************************************************************/
  4235  
  4236  /****************/
  4237  /* Decode frame */
  4238  /****************/
  4239  func SKP_Silk_decode_frame(tls *libc.TLS, psDec uintptr, pOut uintptr, pN uintptr, pCode uintptr, nBytes int32, action int32, decBytes uintptr) int32 { /* SKP_Silk_decode_frame.c:35:9: */
  4240  	bp := tls.Alloc(2084)
  4241  	defer tls.Free(2084)
  4242  
  4243  	// var sDecCtrl SKP_Silk_decoder_control at bp, 164
  4244  
  4245  	var L int32
  4246  	var fs_Khz_old int32
  4247  	var ret int32 = 0
  4248  	// var Pulses [480]int32 at bp+164, 1920
  4249  
  4250  	L = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length
  4251  	(*SKP_Silk_decoder_control)(unsafe.Pointer(bp /* &sDecCtrl */)).FLTP_scale_Q14 = 0
  4252  
  4253  	/* Safety checks */
  4254  
  4255  	/********************************************/
  4256  	/* Decode Frame if packet is not lost  */
  4257  	/********************************************/
  4258  	*(*int32)(unsafe.Pointer(decBytes)) = 0
  4259  	if action == 0 {
  4260  		/********************************************/
  4261  		/* Initialize arithmetic coder              */
  4262  		/********************************************/
  4263  		fs_Khz_old = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz
  4264  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded == 0 {
  4265  			/* Initialize range decoder state */
  4266  			SKP_Silk_range_dec_init(tls, (psDec /* &.sRC */), pCode, nBytes)
  4267  		}
  4268  
  4269  		/********************************************/
  4270  		/* Decode parameters and pulse signal       */
  4271  		/********************************************/
  4272  		SKP_Silk_decode_parameters(tls, psDec, bp /* &sDecCtrl */, bp+164 /* &Pulses[0] */, 1)
  4273  
  4274  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsRC.Ferror != 0 {
  4275  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft = 0
  4276  
  4277  			action = 1 /* PLC operation */
  4278  			/* revert fs if changed in decode_parameters */
  4279  			SKP_Silk_decoder_set_fs(tls, psDec, fs_Khz_old)
  4280  
  4281  			/* Avoid crashing */
  4282  			*(*int32)(unsafe.Pointer(decBytes)) = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsRC.FbufferLength
  4283  
  4284  			if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsRC.Ferror == -8 {
  4285  				ret = -11
  4286  			} else {
  4287  				ret = -12
  4288  			}
  4289  		} else {
  4290  			*(*int32)(unsafe.Pointer(decBytes)) = ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsRC.FbufferLength - (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft)
  4291  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded++
  4292  
  4293  			/* Update lengths. Sampling frequency could have changed */
  4294  			L = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length
  4295  
  4296  			/********************************************************/
  4297  			/* Run inverse NSQ                                      */
  4298  			/********************************************************/
  4299  			SKP_Silk_decode_core(tls, psDec, bp /* &sDecCtrl */, pOut, bp+164 /* &Pulses[0] */)
  4300  
  4301  			/********************************************************/
  4302  			/* Update PLC state                                     */
  4303  			/********************************************************/
  4304  			SKP_Silk_PLC(tls, psDec, bp /* &sDecCtrl */, pOut, L, action)
  4305  
  4306  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt = 0
  4307  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype = (*SKP_Silk_decoder_control)(unsafe.Pointer(bp /* &sDecCtrl */)).Fsigtype
  4308  
  4309  			/* A frame has been decoded without errors */
  4310  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffirst_frame_after_reset = 0
  4311  		}
  4312  	}
  4313  	/*************************************************************/
  4314  	/* Generate Concealment frame if packet is lost, or corrupt  */
  4315  	/*************************************************************/
  4316  	if action == 1 {
  4317  		/* Handle packet loss by extrapolation */
  4318  		SKP_Silk_PLC(tls, psDec, bp /* &sDecCtrl */, pOut, L, action)
  4319  	}
  4320  
  4321  	/*************************/
  4322  	/* Update output buffer. */
  4323  	/*************************/
  4324  	libc.Xmemcpy(tls, psDec+9272 /* &.outBuf */, pOut, (uint32(L) * uint32(unsafe.Sizeof(int16(0)))))
  4325  
  4326  	/****************************************************************/
  4327  	/* Ensure smooth connection of extrapolated and good frames     */
  4328  	/****************************************************************/
  4329  	SKP_Silk_PLC_glue_frames(tls, psDec, bp /* &sDecCtrl */, pOut, L)
  4330  
  4331  	/************************************************/
  4332  	/* Comfort noise generation / estimation        */
  4333  	/************************************************/
  4334  	SKP_Silk_CNG(tls, psDec, bp /* &sDecCtrl */, pOut, L)
  4335  
  4336  	/********************************************/
  4337  	/* HP filter output                            */
  4338  	/********************************************/
  4339  
  4340  	SKP_Silk_biquad(tls, pOut, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_B, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FHP_A, psDec+11208 /* &.HPState */, pOut, L)
  4341  
  4342  	/********************************************/
  4343  	/* set output frame length                    */
  4344  	/********************************************/
  4345  	*(*int16)(unsafe.Pointer(pN)) = int16(L)
  4346  
  4347  	/* Update some decoder state variables */
  4348  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlagPrev = *(*int32)(unsafe.Pointer((bp /* &sDecCtrl */ /* &.pitchL */) + 3*4))
  4349  
  4350  	return ret
  4351  }
  4352  
  4353  /* Decode parameters from payload */
  4354  func SKP_Silk_decode_parameters(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, q uintptr, fullDecoding int32) { /* SKP_Silk_decode_parameters.c:31:6: */
  4355  	bp := tls.Alloc(208)
  4356  	defer tls.Free(208)
  4357  
  4358  	var i int32
  4359  	var k int32
  4360  	// var Ix int32 at bp, 4
  4361  
  4362  	var fs_kHz_dec int32
  4363  	// var nBytesUsed int32 at bp+204, 4
  4364  
  4365  	// var Ixs [4]int32 at bp+188, 16
  4366  
  4367  	// var GainsIndices [4]int32 at bp+4, 16
  4368  
  4369  	// var NLSFIndices [10]int32 at bp+20, 40
  4370  
  4371  	// var pNLSF_Q15 [16]int32 at bp+60, 64
  4372  
  4373  	// var pNLSF0_Q15 [16]int32 at bp+124, 64
  4374  
  4375  	var cbk_ptr_Q14 uintptr
  4376  	var psNLSF_CB uintptr = uintptr(0)
  4377  	var psRC uintptr = (psDec /* &.sRC */)
  4378  
  4379  	/************************/
  4380  	/* Decode sampling rate */
  4381  	/************************/
  4382  	/* only done for first frame of packet */
  4383  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded == 0 {
  4384  		SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_SamplingRates_CDF)), SKP_Silk_SamplingRates_offset)
  4385  
  4386  		/* check that sampling rate is supported */
  4387  		if (*(*int32)(unsafe.Pointer(bp /* Ix */)) < 0) || (*(*int32)(unsafe.Pointer(bp /* Ix */)) > 3) {
  4388  			(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -7
  4389  			return
  4390  		}
  4391  		fs_kHz_dec = SKP_Silk_SamplingRates_table[*(*int32)(unsafe.Pointer(bp /* Ix */))]
  4392  		SKP_Silk_decoder_set_fs(tls, psDec, fs_kHz_dec)
  4393  	}
  4394  
  4395  	/*******************************************/
  4396  	/* Decode signal type and quantizer offset */
  4397  	/*******************************************/
  4398  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded == 0 {
  4399  		/* first frame in packet: independent coding */
  4400  		SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_type_offset_CDF)), SKP_Silk_type_offset_CDF_offset)
  4401  	} else {
  4402  		/* condidtional coding */
  4403  		SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, (uintptr(unsafe.Pointer(&SKP_Silk_type_offset_joint_CDF)) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FtypeOffsetPrev)*10),
  4404  			SKP_Silk_type_offset_CDF_offset)
  4405  	}
  4406  	(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype = ((*(*int32)(unsafe.Pointer(bp /* Ix */))) >> (1))
  4407  	(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FQuantOffsetType = (*(*int32)(unsafe.Pointer(bp /* Ix */)) & 1)
  4408  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FtypeOffsetPrev = *(*int32)(unsafe.Pointer(bp /* Ix */))
  4409  
  4410  	/****************/
  4411  	/* Decode gains */
  4412  	/****************/
  4413  	/* first subframe */
  4414  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded == 0 {
  4415  		/* first frame in packet: independent coding */
  4416  		SKP_Silk_range_decoder(tls, (bp + 4 /* &GainsIndices */), psRC, (uintptr(unsafe.Pointer(&SKP_Silk_gain_CDF)) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype)*130), SKP_Silk_gain_CDF_offset)
  4417  	} else {
  4418  		/* condidtional coding */
  4419  		SKP_Silk_range_decoder(tls, (bp + 4 /* &GainsIndices */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_delta_gain_CDF)), SKP_Silk_delta_gain_CDF_offset)
  4420  	}
  4421  
  4422  	/* remaining subframes */
  4423  	for i = 1; i < 4; i++ {
  4424  		SKP_Silk_range_decoder(tls, (bp + 4 /* &GainsIndices */ + uintptr(i)*4), psRC, uintptr(unsafe.Pointer(&SKP_Silk_delta_gain_CDF)), SKP_Silk_delta_gain_CDF_offset)
  4425  	}
  4426  
  4427  	/* Dequant Gains */
  4428  	SKP_Silk_gains_dequant(tls, psDecCtrl+16 /* &.Gains_Q16 */, bp+4 /* &GainsIndices[0] */, (psDec + 11196 /* &.LastGainIndex */), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded)
  4429  	/****************/
  4430  	/* Decode NLSFs */
  4431  	/****************/
  4432  	/* Set pointer to NLSF VQ CB for the current signal type */
  4433  	psNLSF_CB = *(*uintptr)(unsafe.Pointer((psDec + 11500 /* &.psNLSF_CB */) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype)*4))
  4434  
  4435  	/* Range decode NLSF path */
  4436  	SKP_Silk_range_decoder_multi(tls, bp+20 /* &NLSFIndices[0] */, psRC, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FStartPtr, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FMiddleIx, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)
  4437  
  4438  	/* From the NLSF path, decode an NLSF vector */
  4439  	SKP_Silk_NLSF_MSVQ_decode(tls, bp+60 /* &pNLSF_Q15[0] */, psNLSF_CB, bp+20 /* &NLSFIndices[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  4440  
  4441  	/************************************/
  4442  	/* Decode NLSF interpolation factor */
  4443  	/************************************/
  4444  	SKP_Silk_range_decoder(tls, (psDecCtrl + 160 /* &.NLSFInterpCoef_Q2 */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_NLSF_interpolation_factor_CDF)),
  4445  		SKP_Silk_NLSF_interpolation_factor_offset)
  4446  
  4447  	/* If just reset, e.g., because internal Fs changed, do not allow interpolation */
  4448  	/* improves the case of packet loss in the first frame after a switch           */
  4449  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffirst_frame_after_reset == 1 {
  4450  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FNLSFInterpCoef_Q2 = 4
  4451  	}
  4452  
  4453  	if fullDecoding != 0 {
  4454  		/* Convert NLSF parameters to AR prediction filter coefficients */
  4455  		SKP_Silk_NLSF2A_stable(tls, ((psDecCtrl + 36 /* &.PredCoef_Q12 */) + 1*32), bp+60 /* &pNLSF_Q15[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  4456  
  4457  		if (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FNLSFInterpCoef_Q2 < 4 {
  4458  			/* Calculation of the interpolated NLSF0 vector from the interpolation factor, */
  4459  			/* the previous NLSF1, and the current NLSF1                                   */
  4460  			for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order; i++ {
  4461  				*(*int32)(unsafe.Pointer(bp + 124 /* &pNLSF0_Q15[0] */ + uintptr(i)*4)) = (*(*int32)(unsafe.Pointer((psDec + 11244 /* &.prevNLSF_Q15 */) + uintptr(i)*4)) + ((((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FNLSFInterpCoef_Q2) * (*(*int32)(unsafe.Pointer(bp + 60 /* &pNLSF_Q15[0] */ + uintptr(i)*4)) - *(*int32)(unsafe.Pointer((psDec + 11244 /* &.prevNLSF_Q15 */) + uintptr(i)*4)))) >> (2)))
  4462  			}
  4463  
  4464  			/* Convert NLSF parameters to AR prediction filter coefficients */
  4465  			SKP_Silk_NLSF2A_stable(tls, (psDecCtrl + 36 /* &.PredCoef_Q12 */), bp+124 /* &pNLSF0_Q15[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
  4466  		} else {
  4467  			/* Copy LPC coefficients for first half from second half */
  4468  			libc.Xmemcpy(tls, (psDecCtrl + 36 /* &.PredCoef_Q12 */), ((psDecCtrl + 36 /* &.PredCoef_Q12 */) + 1*32), (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint32(unsafe.Sizeof(int16(0)))))
  4469  		}
  4470  	}
  4471  
  4472  	libc.Xmemcpy(tls, psDec+11244 /* &.prevNLSF_Q15 */, bp+60 /* &pNLSF_Q15[0] */, (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint32(unsafe.Sizeof(int32(0)))))
  4473  
  4474  	/* After a packet loss do BWE of LPC coefs */
  4475  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt != 0 {
  4476  		SKP_Silk_bwexpander(tls, (psDecCtrl + 36 /* &.PredCoef_Q12 */), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order, 63570)
  4477  		SKP_Silk_bwexpander(tls, ((psDecCtrl + 36 /* &.PredCoef_Q12 */) + 1*32), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order, 63570)
  4478  	}
  4479  
  4480  	if (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype == 0 {
  4481  		/*********************/
  4482  		/* Decode pitch lags */
  4483  		/*********************/
  4484  		/* Get lag index */
  4485  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 8 {
  4486  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_NB_CDF)), SKP_Silk_pitch_lag_NB_CDF_offset)
  4487  		} else if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 12 {
  4488  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_MB_CDF)), SKP_Silk_pitch_lag_MB_CDF_offset)
  4489  		} else if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 16 {
  4490  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_WB_CDF)), SKP_Silk_pitch_lag_WB_CDF_offset)
  4491  		} else {
  4492  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_SWB_CDF)), SKP_Silk_pitch_lag_SWB_CDF_offset)
  4493  		}
  4494  
  4495  		/* Get countour index */
  4496  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz == 8 {
  4497  			/* Less codevectors used in 8 khz mode */
  4498  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */ + 1*4), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_contour_NB_CDF)), SKP_Silk_pitch_contour_NB_CDF_offset)
  4499  		} else {
  4500  			/* Joint for 12, 16, and 24 khz */
  4501  			SKP_Silk_range_decoder(tls, (bp + 188 /* &Ixs */ + 1*4), psRC, uintptr(unsafe.Pointer(&SKP_Silk_pitch_contour_CDF)), SKP_Silk_pitch_contour_CDF_offset)
  4502  		}
  4503  
  4504  		/* Decode pitch values */
  4505  		SKP_Silk_decode_pitch(tls, *(*int32)(unsafe.Pointer(bp + 188 /* &Ixs[0] */)), *(*int32)(unsafe.Pointer(bp + 188 /* &Ixs[0] */ + 1*4)), psDecCtrl /* &.pitchL */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz)
  4506  
  4507  		/********************/
  4508  		/* Decode LTP gains */
  4509  		/********************/
  4510  		/* Decode PERIndex value */
  4511  		SKP_Silk_range_decoder(tls, (psDecCtrl + 144 /* &.PERIndex */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_LTP_per_index_CDF)),
  4512  			SKP_Silk_LTP_per_index_CDF_offset)
  4513  
  4514  		/* Decode Codebook Index */
  4515  		cbk_ptr_Q14 = SKP_Silk_LTP_vq_ptrs_Q14[(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FPERIndex] /* set pointer to start of codebook */
  4516  
  4517  		for k = 0; k < 4; k++ {
  4518  			SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, SKP_Silk_LTP_gain_CDF_ptrs[(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FPERIndex],
  4519  				SKP_Silk_LTP_gain_CDF_offsets[(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FPERIndex])
  4520  
  4521  			for i = 0; i < 5; i++ {
  4522  				*(*int16)(unsafe.Pointer((psDecCtrl + 100 /* &.LTPCoef_Q14 */) + uintptr(((k*5)+i))*2)) = *(*int16)(unsafe.Pointer(cbk_ptr_Q14 + uintptr(((*(*int32)(unsafe.Pointer(bp /* Ix */))*5)+i))*2))
  4523  			}
  4524  		}
  4525  
  4526  		/**********************/
  4527  		/* Decode LTP scaling */
  4528  		/**********************/
  4529  		SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_LTPscale_CDF)), SKP_Silk_LTPscale_offset)
  4530  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14 = int32(SKP_Silk_LTPScales_table_Q14[*(*int32)(unsafe.Pointer(bp /* Ix */))])
  4531  	} else {
  4532  
  4533  		libc.Xmemset(tls, psDecCtrl /* &.pitchL */, 0, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
  4534  		libc.Xmemset(tls, psDecCtrl+100 /* &.LTPCoef_Q14 */, 0, ((uint32(5 * 4)) * uint32(unsafe.Sizeof(int16(0)))))
  4535  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FPERIndex = 0
  4536  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14 = 0
  4537  	}
  4538  
  4539  	/***************/
  4540  	/* Decode seed */
  4541  	/***************/
  4542  	SKP_Silk_range_decoder(tls, bp /* &Ix */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_Seed_CDF)), SKP_Silk_Seed_offset)
  4543  	(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FSeed = *(*int32)(unsafe.Pointer(bp /* Ix */))
  4544  	/*********************************************/
  4545  	/* Decode quantization indices of excitation */
  4546  	/*********************************************/
  4547  	SKP_Silk_decode_pulses(tls, psRC, psDecCtrl, q, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length)
  4548  
  4549  	/*********************************************/
  4550  	/* Decode VAD flag                           */
  4551  	/*********************************************/
  4552  	SKP_Silk_range_decoder(tls, (psDec + 11508 /* &.vadFlag */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_vadflag_CDF)), SKP_Silk_vadflag_offset)
  4553  
  4554  	/**************************************/
  4555  	/* Decode Frame termination indicator */
  4556  	/**************************************/
  4557  	SKP_Silk_range_decoder(tls, (psDec + 11328 /* &.FrameTermination */), psRC, uintptr(unsafe.Pointer(&SKP_Silk_FrameTermination_CDF)), SKP_Silk_FrameTermination_offset)
  4558  
  4559  	/****************************************/
  4560  	/* get number of bytes used so far      */
  4561  	/****************************************/
  4562  	SKP_Silk_range_coder_get_length(tls, psRC, bp+204 /* &nBytesUsed */)
  4563  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft = ((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength - *(*int32)(unsafe.Pointer(bp + 204 /* nBytesUsed */)))
  4564  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft < 0 {
  4565  		(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -6
  4566  	}
  4567  
  4568  	/****************************************/
  4569  	/* check remaining bits in last byte    */
  4570  	/****************************************/
  4571  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft == 0 {
  4572  		SKP_Silk_range_coder_check_after_decoding(tls, psRC)
  4573  	}
  4574  }
  4575  
  4576  func SKP_Silk_decode_pitch(tls *libc.TLS, lagIndex int32, contourIndex int32, pitch_lags uintptr, Fs_kHz int32) { /* SKP_Silk_decode_pitch.c:34:6: */
  4577  	var lag int32
  4578  	var i int32
  4579  	var min_lag int32
  4580  
  4581  	min_lag = ((int32(int16(2))) * (int32(int16(Fs_kHz))))
  4582  
  4583  	/* Only for 24 / 16 kHz version for now */
  4584  	lag = (min_lag + lagIndex)
  4585  	if Fs_kHz == 8 {
  4586  		/* Only a small codebook for 8 khz */
  4587  		for i = 0; i < 4; i++ {
  4588  			*(*int32)(unsafe.Pointer(pitch_lags + uintptr(i)*4)) = (lag + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2)) + uintptr(i)*22) + uintptr(contourIndex)*2))))
  4589  		}
  4590  	} else {
  4591  		for i = 0; i < 4; i++ {
  4592  			*(*int32)(unsafe.Pointer(pitch_lags + uintptr(i)*4)) = (lag + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3)) + uintptr(i)*68) + uintptr(contourIndex)*2))))
  4593  		}
  4594  	}
  4595  }
  4596  
  4597  /*********************************************/
  4598  /* Decode quantization indices of excitation */
  4599  /*********************************************/
  4600  func SKP_Silk_decode_pulses(tls *libc.TLS, psRC uintptr, psDecCtrl uintptr, q uintptr, frame_length int32) { /* SKP_Silk_decode_pulses.c:33:6: */
  4601  	bp := tls.Alloc(244)
  4602  	defer tls.Free(244)
  4603  
  4604  	var i int32
  4605  	var j int32
  4606  	var k int32
  4607  	var iter int32
  4608  	var abs_q int32
  4609  	var nLS int32
  4610  	// var bit int32 at bp+240, 4
  4611  
  4612  	// var sum_pulses [30]int32 at bp+120, 120
  4613  
  4614  	// var nLshifts [30]int32 at bp, 120
  4615  
  4616  	var pulses_ptr uintptr
  4617  	var cdf_ptr uintptr
  4618  
  4619  	/*********************/
  4620  	/* Decode rate level */
  4621  	/*********************/
  4622  	SKP_Silk_range_decoder(tls, (psDecCtrl + 148 /* &.RateLevelIndex */), psRC,
  4623  		(uintptr(unsafe.Pointer(&SKP_Silk_rate_levels_CDF)) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype)*20), SKP_Silk_rate_levels_CDF_offset)
  4624  
  4625  	/* Calculate number of shell blocks */
  4626  	iter = (frame_length / 16)
  4627  
  4628  	/***************************************************/
  4629  	/* Sum-Weighted-Pulses Decoding                    */
  4630  	/***************************************************/
  4631  	cdf_ptr = (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + uintptr((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FRateLevelIndex)*42)
  4632  	for i = 0; i < iter; i++ {
  4633  		*(*int32)(unsafe.Pointer(bp /* &nLshifts[0] */ + uintptr(i)*4)) = 0
  4634  		SKP_Silk_range_decoder(tls, (bp + 120 /* &sum_pulses */ + uintptr(i)*4), psRC, cdf_ptr, SKP_Silk_pulses_per_block_CDF_offset)
  4635  
  4636  		/* LSB indication */
  4637  		for *(*int32)(unsafe.Pointer(bp + 120 /* &sum_pulses[0] */ + uintptr(i)*4)) == (18 + 1) {
  4638  			*(*int32)(unsafe.Pointer(bp /* &nLshifts[0] */ + uintptr(i)*4))++
  4639  			SKP_Silk_range_decoder(tls, (bp + 120 /* &sum_pulses */ + uintptr(i)*4), psRC,
  4640  				(uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + 9*42), SKP_Silk_pulses_per_block_CDF_offset)
  4641  		}
  4642  	}
  4643  
  4644  	/***************************************************/
  4645  	/* Shell decoding                                  */
  4646  	/***************************************************/
  4647  	for i = 0; i < iter; i++ {
  4648  		if *(*int32)(unsafe.Pointer(bp + 120 /* &sum_pulses[0] */ + uintptr(i)*4)) > 0 {
  4649  			SKP_Silk_shell_decoder(tls, (q + uintptr(((int32(int16(i)))*(int32(int16(16)))))*4), psRC, *(*int32)(unsafe.Pointer(bp + 120 /* &sum_pulses[0] */ + uintptr(i)*4)))
  4650  		} else {
  4651  			libc.Xmemset(tls, (q + uintptr(((int32(int16(i)))*(int32(int16(16)))))*4), 0, (uint32(16) * uint32(unsafe.Sizeof(int32(0)))))
  4652  		}
  4653  	}
  4654  
  4655  	/***************************************************/
  4656  	/* LSB Decoding                                    */
  4657  	/***************************************************/
  4658  	for i = 0; i < iter; i++ {
  4659  		if *(*int32)(unsafe.Pointer(bp /* &nLshifts[0] */ + uintptr(i)*4)) > 0 {
  4660  			nLS = *(*int32)(unsafe.Pointer(bp /* &nLshifts[0] */ + uintptr(i)*4))
  4661  			pulses_ptr = (q + uintptr(((int32(int16(i)))*(int32(int16(16)))))*4)
  4662  			for k = 0; k < 16; k++ {
  4663  				abs_q = *(*int32)(unsafe.Pointer(pulses_ptr + uintptr(k)*4))
  4664  				for j = 0; j < nLS; j++ {
  4665  					abs_q = ((abs_q) << (1))
  4666  					SKP_Silk_range_decoder(tls, bp+240 /* &bit */, psRC, uintptr(unsafe.Pointer(&SKP_Silk_lsb_CDF)), 1)
  4667  					abs_q = abs_q + (*(*int32)(unsafe.Pointer(bp + 240 /* bit */)))
  4668  				}
  4669  				*(*int32)(unsafe.Pointer(pulses_ptr + uintptr(k)*4)) = abs_q
  4670  			}
  4671  		}
  4672  	}
  4673  
  4674  	/****************************************/
  4675  	/* Decode and add signs to pulse signal */
  4676  	/****************************************/
  4677  	SKP_Silk_decode_signs(tls, psRC, q, frame_length, (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype,
  4678  		(*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FQuantOffsetType, (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FRateLevelIndex)
  4679  }
  4680  
  4681  /***********************************************************************
  4682  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  4683  Redistribution and use in source and binary forms, with or without
  4684  modification, (subject to the limitations in the disclaimer below)
  4685  are permitted provided that the following conditions are met:
  4686  - Redistributions of source code must retain the above copyright notice,
  4687  this list of conditions and the following disclaimer.
  4688  - Redistributions in binary form must reproduce the above copyright
  4689  notice, this list of conditions and the following disclaimer in the
  4690  documentation and/or other materials provided with the distribution.
  4691  - Neither the name of Skype Limited, nor the names of specific
  4692  contributors, may be used to endorse or promote products derived from
  4693  this software without specific prior written permission.
  4694  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  4695  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  4696  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  4697  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  4698  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  4699  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  4700  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  4701  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  4702  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  4703  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  4704  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  4705  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  4706  ***********************************************************************/
  4707  
  4708  /***********************************************************************
  4709  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  4710  Redistribution and use in source and binary forms, with or without
  4711  modification, (subject to the limitations in the disclaimer below)
  4712  are permitted provided that the following conditions are met:
  4713  - Redistributions of source code must retain the above copyright notice,
  4714  this list of conditions and the following disclaimer.
  4715  - Redistributions in binary form must reproduce the above copyright
  4716  notice, this list of conditions and the following disclaimer in the
  4717  documentation and/or other materials provided with the distribution.
  4718  - Neither the name of Skype Limited, nor the names of specific
  4719  contributors, may be used to endorse or promote products derived from
  4720  this software without specific prior written permission.
  4721  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  4722  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  4723  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  4724  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  4725  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  4726  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  4727  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  4728  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  4729  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  4730  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  4731  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  4732  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  4733  ***********************************************************************/
  4734  
  4735  /******************/
  4736  /* Error messages */
  4737  /******************/
  4738  
  4739  /**************************/
  4740  /* Encoder error messages */
  4741  /**************************/
  4742  
  4743  /* Input length is not a multiplum of 10 ms, or length is longer than the packet length */
  4744  
  4745  /* Sampling frequency not 8000, 12000, 16000 or 24000 Hertz */
  4746  
  4747  /* Packet size not 20, 40, 60, 80 or 100 ms */
  4748  
  4749  /* Allocated payload buffer too short */
  4750  
  4751  /* Loss rate not between 0 and 100 percent */
  4752  
  4753  /* Complexity setting not valid, use 0, 1 or 2 */
  4754  
  4755  /* Inband FEC setting not valid, use 0 or 1 */
  4756  
  4757  /* DTX setting not valid, use 0 or 1 */
  4758  
  4759  /* Internal encoder error */
  4760  
  4761  /**************************/
  4762  /* Decoder error messages */
  4763  /**************************/
  4764  
  4765  /* Output sampling frequency lower than internal decoded sampling frequency */
  4766  
  4767  /* Payload size exceeded the maximum allowed 1024 bytes */
  4768  
  4769  /* Payload has bit errors */
  4770  
  4771  /* Struct for TOC (Table of Contents) */
  4772  type SKP_Silk_TOC_struct = struct {
  4773  	FframesInPacket int32
  4774  	Ffs_kHz         int32
  4775  	FinbandLBRR     int32
  4776  	Fcorrupt        int32
  4777  	FvadFlags       [5]int32
  4778  	FsigtypeFlags   [5]int32
  4779  } /* SKP_Silk_SDK_API.h:50:3 */
  4780  
  4781  /*********************/
  4782  /* Decoder functions */
  4783  /*********************/
  4784  
  4785  func SKP_Silk_SDK_Get_Decoder_Size(tls *libc.TLS, decSizeBytes uintptr) int32 { /* SKP_Silk_dec_API.c:35:9: */
  4786  	var ret int32 = 0
  4787  
  4788  	*(*int32)(unsafe.Pointer(decSizeBytes)) = int32(unsafe.Sizeof(SKP_Silk_decoder_state{}))
  4789  
  4790  	return ret
  4791  }
  4792  
  4793  /* Reset decoder state */
  4794  func SKP_Silk_SDK_InitDecoder(tls *libc.TLS, decState uintptr) int32 { /* SKP_Silk_dec_API.c:45:9: */
  4795  	var ret int32 = 0
  4796  	var struc uintptr
  4797  
  4798  	struc = decState
  4799  
  4800  	ret = SKP_Silk_init_decoder(tls, struc)
  4801  
  4802  	return ret
  4803  }
  4804  
  4805  /* Decode a frame */
  4806  func SKP_Silk_SDK_Decode(tls *libc.TLS, decState uintptr, decControl uintptr, lostFlag int32, inData uintptr, nBytesIn int32, samplesOut uintptr, nSamplesOut uintptr) int32 { /* SKP_Silk_dec_API.c:60:9: */
  4807  	bp := tls.Alloc(3844)
  4808  	defer tls.Free(3844)
  4809  
  4810  	var ret int32 = 0
  4811  	// var used_bytes int32 at bp+1920, 4
  4812  
  4813  	var prev_fs_kHz int32
  4814  	var psDec uintptr
  4815  	// var samplesOutInternal [960]int16 at bp, 1920
  4816  
  4817  	var pSamplesOutInternal uintptr
  4818  
  4819  	psDec = decState
  4820  
  4821  	/* We need this buffer to have room for an internal frame */
  4822  	pSamplesOutInternal = samplesOut
  4823  	if ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz * 1000) > (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate {
  4824  		pSamplesOutInternal = bp /* &samplesOutInternal[0] */
  4825  	}
  4826  
  4827  	/**********************************/
  4828  	/* Test if first frame in payload */
  4829  	/**********************************/
  4830  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames == 0 {
  4831  		/* First Frame in Payload */
  4832  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded = 0 /* Used to count frames in packet */
  4833  	}
  4834  
  4835  	if (((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames == 0) && (lostFlag == 0)) && (nBytesIn > 1024) { /* Too long payload         */
  4836  		/* Avoid trying to decode a too large packet */
  4837  		lostFlag = 1
  4838  		ret = -11
  4839  	}
  4840  
  4841  	/* Save previous sample frequency */
  4842  	prev_fs_kHz = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz
  4843  
  4844  	/* Call decoder for one frame */
  4845  	ret = ret + (SKP_Silk_decode_frame(tls, psDec, pSamplesOutInternal, nSamplesOut, inData, nBytesIn,
  4846  		lostFlag, bp+1920 /* &used_bytes */))
  4847  
  4848  	if *(*int32)(unsafe.Pointer(bp + 1920 /* used_bytes */)) != 0 { /* Only Call if not a packet loss */
  4849  		if (((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnBytesLeft > 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FFrameTermination == 1)) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded < 5) {
  4850  			/* We have more frames in the Payload */
  4851  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames = 1
  4852  		} else {
  4853  			/* Last frame in Payload */
  4854  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames = 0
  4855  			(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesInPacket = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesDecoded
  4856  
  4857  			/* Track inband FEC usage */
  4858  			if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FvadFlag == 1 {
  4859  				if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FFrameTermination == 0 {
  4860  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fno_FEC_counter++
  4861  					if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fno_FEC_counter > 10 {
  4862  						(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Finband_FEC_offset = 0
  4863  					}
  4864  				} else if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FFrameTermination == 2 {
  4865  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Finband_FEC_offset = 1 /* FEC info with 1 packet delay */
  4866  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fno_FEC_counter = 0
  4867  				} else if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FFrameTermination == 3 {
  4868  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Finband_FEC_offset = 2 /* FEC info with 2 packets delay */
  4869  					(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fno_FEC_counter = 0
  4870  				}
  4871  			}
  4872  		}
  4873  	}
  4874  
  4875  	if ((48 * 1000) < (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate) || (8000 > (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate) {
  4876  		ret = -10
  4877  		return ret
  4878  	}
  4879  
  4880  	/* Resample if needed */
  4881  	if ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz * 1000) != (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate {
  4882  		// var samplesOut_tmp [960]int16 at bp+1924, 1920
  4883  
  4884  		/* Copy to a tmp buffer as the resampling writes to samplesOut */
  4885  		libc.Xmemcpy(tls, bp+1924 /* &samplesOut_tmp[0] */, pSamplesOutInternal, (uint32(*(*int16)(unsafe.Pointer(nSamplesOut))) * uint32(unsafe.Sizeof(int16(0)))))
  4886  
  4887  		/* (Re-)initialize resampler state when switching internal sampling frequency */
  4888  		if (prev_fs_kHz != (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz) || ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_API_sampleRate != (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate) {
  4889  			ret = SKP_Silk_resampler_init(tls, (psDec + 11332 /* &.resampler_state */), ((int32(int16((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz))) * (int32(int16(1000)))), (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate)
  4890  		}
  4891  
  4892  		/* Resample the output to API_sampleRate */
  4893  		ret = ret + (SKP_Silk_resampler(tls, (psDec + 11332 /* &.resampler_state */), samplesOut, bp+1924 /* &samplesOut_tmp[0] */, int32(*(*int16)(unsafe.Pointer(nSamplesOut)))))
  4894  
  4895  		/* Update the number of output samples */
  4896  		*(*int16)(unsafe.Pointer(nSamplesOut)) = int16(((int32(*(*int16)(unsafe.Pointer(nSamplesOut))) * (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate) / ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz * 1000)))
  4897  	} else if (prev_fs_kHz * 1000) > (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate {
  4898  		libc.Xmemcpy(tls, samplesOut, pSamplesOutInternal, (uint32(*(*int16)(unsafe.Pointer(nSamplesOut))) * uint32(unsafe.Sizeof(int16(0)))))
  4899  	}
  4900  
  4901  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_API_sampleRate = (*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate
  4902  
  4903  	/* Copy all parameters that are needed out of internal structure to the control stucture */
  4904  	(*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FframeSize = int32((uint16((*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FAPI_sampleRate / 50)))
  4905  	(*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FframesPerPacket = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FnFramesInPacket
  4906  	(*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FinBandFECOffset = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Finband_FEC_offset
  4907  	(*SKP_SILK_SDK_DecControlStruct)(unsafe.Pointer(decControl)).FmoreInternalDecoderFrames = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FmoreInternalDecoderFrames
  4908  
  4909  	return ret
  4910  }
  4911  
  4912  /* Function to find LBRR information in a packet */
  4913  func SKP_Silk_SDK_search_for_LBRR(tls *libc.TLS, inData uintptr, nBytesIn int32, lost_offset int32, LBRRData uintptr, nLBRRBytes uintptr) { /* SKP_Silk_dec_API.c:173:6: */
  4914  	bp := tls.Alloc(15764)
  4915  	defer tls.Free(15764)
  4916  
  4917  	// var sDec SKP_Silk_decoder_state at bp, 13680
  4918  	// Local decoder state to avoid interfering with running decoder */
  4919  	// var sDecCtrl SKP_Silk_decoder_control at bp+13680, 164
  4920  
  4921  	// var TempQ [480]int32 at bp+13844, 1920
  4922  
  4923  	if (lost_offset < 1) || (lost_offset > 2) {
  4924  		/* No useful FEC in this packet */
  4925  		*(*int16)(unsafe.Pointer(nLBRRBytes)) = int16(0)
  4926  		return
  4927  	}
  4928  
  4929  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded = 0
  4930  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).Ffs_kHz = 0  /* Force update parameters LPC_order etc */
  4931  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FlossCnt = 0 /* Avoid running bw expansion of the LPC parameters when searching for LBRR data */
  4932  	libc.Xmemset(tls, bp /* &sDec */ +11244 /* &.prevNLSF_Q15 */, 0, (uint32(16) * uint32(unsafe.Sizeof(int32(0)))))
  4933  	SKP_Silk_range_dec_init(tls, (bp /* &sDec */ /* &.sRC */), inData, nBytesIn)
  4934  
  4935  	for 1 != 0 {
  4936  		SKP_Silk_decode_parameters(tls, bp /* &sDec */, bp+13680 /* &sDecCtrl */, bp+13844 /* &TempQ[0] */, 0)
  4937  
  4938  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FsRC.Ferror != 0 {
  4939  			/* Corrupt stream */
  4940  			*(*int16)(unsafe.Pointer(nLBRRBytes)) = int16(0)
  4941  			return
  4942  		}
  4943  
  4944  		if (((((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination - 1) & lost_offset) != 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination > 0)) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft >= 0) {
  4945  			/* The wanted FEC is present in the packet */
  4946  			*(*int16)(unsafe.Pointer(nLBRRBytes)) = int16((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft)
  4947  			libc.Xmemcpy(tls, LBRRData, (inData + uintptr((nBytesIn - (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft))), (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft) * uint32(unsafe.Sizeof(uint8(0)))))
  4948  			break
  4949  		}
  4950  		if ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft > 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination == 1) {
  4951  			(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded++
  4952  		} else {
  4953  			LBRRData = uintptr(0)
  4954  			*(*int16)(unsafe.Pointer(nLBRRBytes)) = int16(0)
  4955  			break
  4956  		}
  4957  	}
  4958  }
  4959  
  4960  /* Getting type of content for a packet */
  4961  func SKP_Silk_SDK_get_TOC(tls *libc.TLS, inData uintptr, nBytesIn int32, Silk_TOC uintptr) { /* SKP_Silk_dec_API.c:222:6: */
  4962  	bp := tls.Alloc(15764)
  4963  	defer tls.Free(15764)
  4964  
  4965  	// var sDec SKP_Silk_decoder_state at bp, 13680
  4966  	// Local Decoder state to avoid interfering with running decoder */
  4967  	// var sDecCtrl SKP_Silk_decoder_control at bp+13680, 164
  4968  
  4969  	// var TempQ [480]int32 at bp+13844, 1920
  4970  
  4971  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded = 0
  4972  	(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).Ffs_kHz = 0 /* Force update parameters LPC_order etc */
  4973  	SKP_Silk_range_dec_init(tls, (bp /* &sDec */ /* &.sRC */), inData, nBytesIn)
  4974  
  4975  	(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Fcorrupt = 0
  4976  	for 1 != 0 {
  4977  		SKP_Silk_decode_parameters(tls, bp /* &sDec */, bp+13680 /* &sDecCtrl */, bp+13844 /* &TempQ[0] */, 0)
  4978  
  4979  		*(*int32)(unsafe.Pointer((Silk_TOC + 16 /* &.vadFlags */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded)*4)) = (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FvadFlag
  4980  		*(*int32)(unsafe.Pointer((Silk_TOC + 36 /* &.sigtypeFlags */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded)*4)) = (*SKP_Silk_decoder_control)(unsafe.Pointer(bp + 13680 /* &sDecCtrl */)).Fsigtype
  4981  
  4982  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FsRC.Ferror != 0 {
  4983  			/* Corrupt stream */
  4984  			(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Fcorrupt = 1
  4985  			break
  4986  		}
  4987  
  4988  		if ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnBytesLeft > 0) && ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination == 1) {
  4989  			(*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded++
  4990  		} else {
  4991  			break
  4992  		}
  4993  	}
  4994  	if (((*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Fcorrupt != 0) || ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination == 1)) || ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesInPacket > 5) {
  4995  		/* Corrupt packet */
  4996  		libc.Xmemset(tls, Silk_TOC, 0, uint32(unsafe.Sizeof(SKP_Silk_TOC_struct{})))
  4997  		(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Fcorrupt = 1
  4998  	} else {
  4999  		(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).FframesInPacket = ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FnFramesDecoded + 1)
  5000  		(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).Ffs_kHz = (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).Ffs_kHz
  5001  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination == 0 {
  5002  			(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).FinbandLBRR = (*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination
  5003  		} else {
  5004  			(*SKP_Silk_TOC_struct)(unsafe.Pointer(Silk_TOC)).FinbandLBRR = ((*SKP_Silk_decoder_state)(unsafe.Pointer(bp /* &sDec */)).FFrameTermination - 1)
  5005  		}
  5006  	}
  5007  }
  5008  
  5009  /**************************/
  5010  /* Get the version number */
  5011  /**************************/
  5012  /* Return a pointer to string specifying the version */
  5013  func SKP_Silk_SDK_get_version(tls *libc.TLS) uintptr { /* SKP_Silk_dec_API.c:275:12: */
  5014  	return uintptr(unsafe.Pointer(&version))
  5015  }
  5016  
  5017  var version = *(*[6]int8)(unsafe.Pointer(ts /* "1.0.9" */)) /* SKP_Silk_dec_API.c:277:23 */
  5018  
  5019  // 7.18.2  Limits of specified-width integer types
  5020  
  5021  // 7.18.2.1  Limits of exact-width integer types
  5022  
  5023  // 7.18.2.2  Limits of minimum-width integer types
  5024  
  5025  // 7.18.2.3  Limits of fastest minimum-width integer types
  5026  
  5027  // 7.18.2.4  Limits of integer types capable of holding
  5028  //     object pointers
  5029  
  5030  // 7.18.2.5  Limits of greatest-width integer types
  5031  
  5032  // 7.18.3  Limits of other integer types
  5033  
  5034  // wint_t is unsigned short for compatibility with MS runtime
  5035  
  5036  // 7.18.4  Macros for integer constants
  5037  
  5038  // 7.18.4.1  Macros for minimum-width integer constants
  5039  //
  5040  //     Accoding to Douglas Gwyn <gwyn@arl.mil>:
  5041  // 	"This spec was changed in ISO/IEC 9899:1999 TC1; in ISO/IEC
  5042  // 	9899:1999 as initially published, the expansion was required
  5043  // 	to be an integer constant of precisely matching type, which
  5044  // 	is impossible to accomplish for the shorter types on most
  5045  // 	platforms, because C99 provides no standard way to designate
  5046  // 	an integer constant with width less than that of type int.
  5047  // 	TC1 changed this to require just an integer constant
  5048  // 	*expression* with *promoted* type."
  5049  //
  5050  // 	The trick used here is from Clive D W Feather.
  5051  
  5052  //  The 'trick' doesn't work in C89 for long long because, without
  5053  //     suffix, (val) will be evaluated as int, not intmax_t
  5054  
  5055  // 7.18.4.2  Macros for greatest-width integer constants
  5056  
  5057  /* assertions */
  5058  
  5059  /* Limits on bitrate */
  5060  
  5061  /* Transition bitrates between modes */
  5062  
  5063  /* Integration/hysteresis threshold for lowering internal sample frequency */
  5064  /* 30000000 -> 6 sec if bitrate is 5000 bps below limit; 3 sec if bitrate is 10000 bps below limit */
  5065  
  5066  /* DTX settings                                 */
  5067  
  5068  /* Amount of concecutive no FEC packets before telling JB */
  5069  
  5070  /* Maximum delay between real packet and LBRR packet */
  5071  
  5072  /* LBRR usage defines */
  5073  
  5074  /* Frame termination indicator defines */
  5075  
  5076  /* Number of Second order Sections for SWB detection HP filter */
  5077  
  5078  /* Low complexity setting */
  5079  
  5080  /* Activate bandwidth transition filtering for mode switching */
  5081  
  5082  /* Decoder Parameters */
  5083  
  5084  /* Maximum sampling frequency, should be 16 for some embedded platforms */
  5085  
  5086  /* Signal Types used by silk */
  5087  
  5088  /* VAD Types used by silk */
  5089  
  5090  /* Number of samples per frame */
  5091  
  5092  /* Milliseconds of lookahead for pitch analysis */
  5093  
  5094  /* Length of LPC window used in find pitch */
  5095  
  5096  /* Order of LPC used in find pitch */
  5097  
  5098  /* Milliseconds of lookahead for noise shape analysis */
  5099  
  5100  /* Max length of LPC window used in noise shape analysis */
  5101  
  5102  /* Max number of bytes in payload output buffer (may contain multiple frames) */
  5103  
  5104  /* dB level of lowest gain quantization level */
  5105  /* dB level of highest gain quantization level */
  5106  /* Number of gain quantization levels */
  5107  /* Max increase in gain quantization index */
  5108  /* Max decrease in gain quantization index */
  5109  
  5110  /* Quantization offsets (multiples of 4) */
  5111  
  5112  /* Maximum numbers of iterations used to stabilize a LPC vector */
  5113  
  5114  /* Find Pred Coef defines */
  5115  
  5116  /* LTP quantization settings */
  5117  
  5118  /* Number of subframes */
  5119  
  5120  /* Flag to use harmonic noise shaping */
  5121  
  5122  /* Max LPC order of noise shaping filters */
  5123  
  5124  /* Maximum number of delayed decision states */
  5125  
  5126  /* number of subframes for excitation entropy coding */
  5127  
  5128  /* number of rate levels, for entropy coding of excitation */
  5129  
  5130  /* maximum sum of pulses per shell coding frame */
  5131  
  5132  /***********************/
  5133  /* High pass filtering */
  5134  /***********************/
  5135  
  5136  /***************************/
  5137  /* Voice activity detector */
  5138  /***************************/
  5139  
  5140  /* Sigmoid settings */
  5141  
  5142  /* smoothing for SNR measurement */
  5143  
  5144  /******************/
  5145  /* NLSF quantizer */
  5146  /******************/
  5147  
  5148  /* Based on above defines, calculate how much memory is necessary to allocate */
  5149  
  5150  /* Transition filtering for mode switching */
  5151  
  5152  /* Row based */
  5153  
  5154  /* Column based */
  5155  
  5156  /* BWE factors to apply after packet loss */
  5157  
  5158  /* Defines for CN generation */
  5159  
  5160  func SKP_Silk_detect_SWB_input(tls *libc.TLS, psSWBdetect uintptr, samplesIn uintptr, nSamplesIn int32) { /* SKP_Silk_detect_SWB_input.c:34:6: */
  5161  	bp := tls.Alloc(968)
  5162  	defer tls.Free(968)
  5163  
  5164  	var HP_8_kHz_len int32
  5165  	var i int32
  5166  	// var shift int32 at bp+964, 4
  5167  
  5168  	// var in_HP_8_kHz [480]int16 at bp, 960
  5169  
  5170  	// var energy_32 int32 at bp+960, 4
  5171  
  5172  	/* High pass filter with cutoff at 8 khz */
  5173  	HP_8_kHz_len = SKP_min_int(tls, nSamplesIn, (20 * 24))
  5174  	HP_8_kHz_len = SKP_max_int(tls, HP_8_kHz_len, 0)
  5175  
  5176  	/* Cutoff around 9 khz */
  5177  	/* A = conv(conv([8192,14613, 6868], [8192,12883, 7337]), [8192,11586, 7911]); */
  5178  	/* B = conv(conv([575, -948, 575], [575, -221, 575]), [575, 104, 575]); */
  5179  	SKP_Silk_biquad(tls, samplesIn, (uintptr(unsafe.Pointer(&SKP_Silk_SWB_detect_B_HP_Q13))), (uintptr(unsafe.Pointer(&SKP_Silk_SWB_detect_A_HP_Q13))),
  5180  		(psSWBdetect /* &.S_HP_8_kHz */), bp /* &in_HP_8_kHz[0] */, HP_8_kHz_len)
  5181  	for i = 1; i < 3; i++ {
  5182  		SKP_Silk_biquad(tls, bp /* &in_HP_8_kHz[0] */, (uintptr(unsafe.Pointer(&SKP_Silk_SWB_detect_B_HP_Q13)) + uintptr(i)*6), (uintptr(unsafe.Pointer(&SKP_Silk_SWB_detect_A_HP_Q13)) + uintptr(i)*4),
  5183  			((psSWBdetect /* &.S_HP_8_kHz */) + uintptr(i)*8), bp /* &in_HP_8_kHz[0] */, HP_8_kHz_len)
  5184  	}
  5185  
  5186  	/* Calculate energy in HP signal */
  5187  	SKP_Silk_sum_sqr_shift(tls, bp+960 /* &energy_32 */, bp+964 /* &shift */, bp /* &in_HP_8_kHz[0] */, HP_8_kHz_len)
  5188  
  5189  	/* Count concecutive samples above threshold, after adjusting threshold for number of input samples and shift */
  5190  	if *(*int32)(unsafe.Pointer(bp + 960 /* energy_32 */)) > (((int32(int16(10))) * (int32(int16(HP_8_kHz_len)))) >> (*(*int32)(unsafe.Pointer(bp + 964 /* shift */)))) {
  5191  		*(*int32)(unsafe.Pointer(psSWBdetect + 24 /* &.ConsecSmplsAboveThres */)) += (nSamplesIn)
  5192  		if (*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FConsecSmplsAboveThres > (480 * 15) {
  5193  			(*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FSWB_detected = 1
  5194  		}
  5195  	} else {
  5196  		*(*int32)(unsafe.Pointer(psSWBdetect + 24 /* &.ConsecSmplsAboveThres */)) -= (nSamplesIn)
  5197  		(*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FConsecSmplsAboveThres = func() int32 {
  5198  			if ((*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FConsecSmplsAboveThres) > (0) {
  5199  				return (*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FConsecSmplsAboveThres
  5200  			}
  5201  			return 0
  5202  		}()
  5203  	}
  5204  
  5205  	/* If sufficient speech activity and no SWB detected, we detect the signal as being WB */
  5206  	if ((*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FActiveSpeech_ms > 15000) && ((*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FSWB_detected == 0) {
  5207  		(*SKP_Silk_detect_SWB_state)(unsafe.Pointer(psSWBdetect)).FWB_detected = 1
  5208  	}
  5209  }
  5210  
  5211  /***********************************************************************
  5212  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  5213  Redistribution and use in source and binary forms, with or without
  5214  modification, (subject to the limitations in the disclaimer below)
  5215  are permitted provided that the following conditions are met:
  5216  - Redistributions of source code must retain the above copyright notice,
  5217  this list of conditions and the following disclaimer.
  5218  - Redistributions in binary form must reproduce the above copyright
  5219  notice, this list of conditions and the following disclaimer in the
  5220  documentation and/or other materials provided with the distribution.
  5221  - Neither the name of Skype Limited, nor the names of specific
  5222  contributors, may be used to endorse or promote products derived from
  5223  this software without specific prior written permission.
  5224  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  5225  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  5226  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  5227  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  5228  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  5229  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  5230  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  5231  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  5232  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  5233  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  5234  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  5235  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  5236  ***********************************************************************/
  5237  
  5238  /*******************/
  5239  /* Pitch estimator */
  5240  /*******************/
  5241  
  5242  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  5243  
  5244  /* Bandwidth expansion for whitening filter in pitch analysis */
  5245  
  5246  /* Threshold used by pitch estimator for early escape */
  5247  
  5248  /*********************/
  5249  /* Linear prediction */
  5250  /*********************/
  5251  
  5252  /* LPC analysis defines: regularization and bandwidth expansion */
  5253  
  5254  /* LTP analysis defines */
  5255  
  5256  /* LTP quantization settings */
  5257  
  5258  /***********************/
  5259  /* High pass filtering */
  5260  /***********************/
  5261  
  5262  /* Smoothing parameters for low end of pitch frequency range estimation */
  5263  
  5264  /* Min and max values for low end of pitch frequency range estimation */
  5265  
  5266  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  5267  
  5268  /***********/
  5269  /* Various */
  5270  /***********/
  5271  
  5272  /* Required speech activity for counting frame as active */
  5273  
  5274  /* Speech Activity LBRR enable threshold (needs tuning) */
  5275  
  5276  /*************************/
  5277  /* Perceptual parameters */
  5278  /*************************/
  5279  
  5280  /* reduction in coding SNR during low speech activity */
  5281  
  5282  /* factor for reducing quantization noise during voiced speech */
  5283  
  5284  /* factor for reducing quantization noise for unvoiced sparse signals */
  5285  
  5286  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  5287  
  5288  /* warping control */
  5289  
  5290  /* fraction added to first autocorrelation value */
  5291  
  5292  /* noise shaping filter chirp factor */
  5293  
  5294  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  5295  
  5296  /* gain reduction for fricatives */
  5297  
  5298  /* extra harmonic boosting (signal shaping) at low bitrates */
  5299  
  5300  /* extra harmonic boosting (signal shaping) for noisy input signals */
  5301  
  5302  /* harmonic noise shaping */
  5303  
  5304  /* extra harmonic noise shaping for high bitrates or noisy input */
  5305  
  5306  /* parameter for shaping noise towards higher frequencies */
  5307  
  5308  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  5309  
  5310  /* parameter for applying a high-pass tilt to the input signal */
  5311  
  5312  /* parameter for extra high-pass tilt to the input signal at high rates */
  5313  
  5314  /* parameter for reducing noise at the very low frequencies */
  5315  
  5316  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  5317  
  5318  /* noise floor to put a lower limit on the quantization step size */
  5319  
  5320  /* noise floor relative to active speech gain level */
  5321  
  5322  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  5323  
  5324  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  5325  
  5326  /* parameters defining the R/D tradeoff in the residual quantizer */
  5327  
  5328  /****************/
  5329  /* Encode frame */
  5330  /****************/
  5331  func SKP_Silk_encode_frame_FIX(tls *libc.TLS, psEnc uintptr, pCode uintptr, pnBytesOut uintptr, pIn uintptr) int32 { /* SKP_Silk_encode_frame_FIX.c:34:9: */
  5332  	bp := tls.Alloc(5644)
  5333  	defer tls.Free(5644)
  5334  
  5335  	// var sEncCtrl SKP_Silk_encoder_control_FIX at bp+2020, 672
  5336  
  5337  	// var nBytes int32 at bp+5640, 4
  5338  
  5339  	var ret int32 = 0
  5340  	var x_frame uintptr
  5341  	var res_pitch_frame uintptr
  5342  	// var xfw [480]int16 at bp+3652, 960
  5343  
  5344  	// var pIn_HP [480]int16 at bp+2692, 960
  5345  
  5346  	// var res_pitch [1008]int16 at bp, 2016
  5347  
  5348  	var LBRR_idx int32
  5349  	var frame_terminator int32
  5350  	// var SNR_dB_Q7 int32 at bp+2016, 4
  5351  
  5352  	var FrameTermination_CDF uintptr
  5353  	/* Low bitrate redundancy parameters */
  5354  	// var LBRRpayload [1024]uint8 at bp+4612, 1024
  5355  
  5356  	// var nBytesLBRR int16 at bp+5636, 2
  5357  
  5358  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp + 2020 /* &sEncCtrl */)).FsCmn.FSeed = (libc.PostIncInt32(&(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FframeCounter, 1) & 3)
  5359  	/**************************************************************/
  5360  	/* Setup Input Pointers, and insert frame in input buffer    */
  5361  	/*************************************************************/
  5362  	x_frame = ((psEnc + 20748 /* &.x_buf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2)  /* start of frame to encode */
  5363  	res_pitch_frame = (bp /* &res_pitch[0] */ + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2) /* start of pitch LPC residual frame */
  5364  
  5365  	/****************************/
  5366  	/* Voice Activity Detection */
  5367  	/****************************/
  5368  	ret = SKP_Silk_VAD_GetSA_Q8(tls, (psEnc /* &.sCmn */ + 15032 /* &.sVAD */), (psEnc + 22932 /* &.speech_activity_Q8 */), bp+2016, /* &SNR_dB_Q7 */
  5369  		bp+2020 /* &sEncCtrl */ +620 /* &.input_quality_bands_Q15 */, (bp + 2020 /* &sEncCtrl */ + 636 /* &.input_tilt_Q15 */),
  5370  		pIn, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)
  5371  
  5372  	/*******************************************/
  5373  	/* High-pass filtering of the input signal */
  5374  	/*******************************************/
  5375  	/* Variable high-pass filter */
  5376  	SKP_Silk_HP_variable_cutoff_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp+2692 /* &pIn_HP[0] */, pIn)
  5377  
  5378  	/* Ensure smooth bandwidth transitions */
  5379  	SKP_Silk_LP_variable_cutoff(tls, (psEnc /* &.sCmn */ + 15016 /* &.sLP */), (x_frame + uintptr((5*(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))*2), bp+2692 /* &pIn_HP[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)
  5380  
  5381  	/*****************************************/
  5382  	/* Find pitch lags, initial LPC analysis */
  5383  	/*****************************************/
  5384  	SKP_Silk_find_pitch_lags_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp /* &res_pitch[0] */, x_frame)
  5385  
  5386  	/************************/
  5387  	/* Noise shape analysis */
  5388  	/************************/
  5389  	SKP_Silk_noise_shape_analysis_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, res_pitch_frame, x_frame)
  5390  
  5391  	/*****************************************/
  5392  	/* Prefiltering for noise shaper         */
  5393  	/*****************************************/
  5394  	SKP_Silk_prefilter_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp+3652 /* &xfw[0] */, x_frame)
  5395  
  5396  	/***************************************************/
  5397  	/* Find linear prediction coefficients (LPC + LTP) */
  5398  	/***************************************************/
  5399  	SKP_Silk_find_pred_coefs_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp /* &res_pitch[0] */)
  5400  
  5401  	/****************************************/
  5402  	/* Process gains                        */
  5403  	/****************************************/
  5404  	SKP_Silk_process_gains_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */)
  5405  
  5406  	/****************************************/
  5407  	/* Low Bitrate Redundant Encoding       */
  5408  	/****************************************/
  5409  	*(*int16)(unsafe.Pointer(bp + 5636 /* nBytesLBRR */)) = int16(1024)
  5410  	SKP_Silk_LBRR_encode_FIX(tls, psEnc, bp+2020 /* &sEncCtrl */, bp+4612 /* &LBRRpayload[0] */, bp+5636 /* &nBytesLBRR */, bp+3652 /* &xfw[0] */)
  5411  
  5412  	/*****************************************/
  5413  	/* Noise shaping quantization            */
  5414  	/*****************************************/
  5415  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnStatesDelayedDecision > 1) || ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0) {
  5416  		SKP_Silk_NSQ_del_dec(tls, (psEnc /* &.sCmn */), (bp + 2020 /* &sEncCtrl */ /* &.sCmn */), (psEnc /* &.sCmn */ + 2088 /* &.sNSQ */), bp+3652, /* &xfw[0] */
  5417  			psEnc /* &.sCmn */ +18572 /* &.q */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FsCmn.FNLSFInterpCoef_Q2,
  5418  			(bp + 2020 /* &sEncCtrl */ + 144 /* &.PredCoef_Q12 */), bp+2020 /* &sEncCtrl */ +208 /* &.LTPCoef_Q14 */, bp+2020 /* &sEncCtrl */ +380 /* &.AR2_Q13 */, bp+2020 /* &sEncCtrl */ +572, /* &.HarmShapeGain_Q14 */
  5419  			bp+2020 /* &sEncCtrl */ +556 /* &.Tilt_Q14 */, bp+2020 /* &sEncCtrl */ +508 /* &.LF_shp_Q14 */, bp+2020 /* &sEncCtrl */ +128 /* &.Gains_Q16 */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FLambda_Q10,
  5420  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FLTP_scale_Q14)
  5421  	} else {
  5422  		SKP_Silk_NSQ(tls, (psEnc /* &.sCmn */), (bp + 2020 /* &sEncCtrl */ /* &.sCmn */), (psEnc /* &.sCmn */ + 2088 /* &.sNSQ */), bp+3652, /* &xfw[0] */
  5423  			psEnc /* &.sCmn */ +18572 /* &.q */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FsCmn.FNLSFInterpCoef_Q2,
  5424  			(bp + 2020 /* &sEncCtrl */ + 144 /* &.PredCoef_Q12 */), bp+2020 /* &sEncCtrl */ +208 /* &.LTPCoef_Q14 */, bp+2020 /* &sEncCtrl */ +380 /* &.AR2_Q13 */, bp+2020 /* &sEncCtrl */ +572, /* &.HarmShapeGain_Q14 */
  5425  			bp+2020 /* &sEncCtrl */ +556 /* &.Tilt_Q14 */, bp+2020 /* &sEncCtrl */ +508 /* &.LF_shp_Q14 */, bp+2020 /* &sEncCtrl */ +128 /* &.Gains_Q16 */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FLambda_Q10,
  5426  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp+2020 /* &sEncCtrl */)).FLTP_scale_Q14)
  5427  	}
  5428  
  5429  	/**************************************************/
  5430  	/* Convert speech activity into VAD and DTX flags */
  5431  	/**************************************************/
  5432  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 < SKP_FIX_CONST(tls, 0.1, 8) {
  5433  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FvadFlag = 0
  5434  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter++
  5435  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter > 5 {
  5436  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinDTX = 1
  5437  		}
  5438  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter > (20 + 5) {
  5439  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter = 5
  5440  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinDTX = 0
  5441  		}
  5442  	} else {
  5443  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnoSpeechCounter = 0
  5444  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinDTX = 0
  5445  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FvadFlag = 1
  5446  	}
  5447  
  5448  	/****************************************/
  5449  	/* Initialize range coder               */
  5450  	/****************************************/
  5451  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf == 0 {
  5452  		SKP_Silk_range_enc_init(tls, (psEnc /* &.sCmn */ /* &.sRC */))
  5453  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf = 0
  5454  	}
  5455  
  5456  	/****************************************/
  5457  	/* Encode Parameters                    */
  5458  	/****************************************/
  5459  	SKP_Silk_encode_parameters(tls, (psEnc /* &.sCmn */), (bp + 2020 /* &sEncCtrl */ /* &.sCmn */), (psEnc /* &.sCmn */ /* &.sRC */), psEnc /* &.sCmn */ +18572 /* &.q */)
  5460  	FrameTermination_CDF = uintptr(unsafe.Pointer(&SKP_Silk_FrameTermination_CDF))
  5461  
  5462  	/****************************************/
  5463  	/* Update Buffers and State             */
  5464  	/****************************************/
  5465  	/* Update input buffer */
  5466  	libc.Xmemmove(tls, psEnc+20748 /* &.x_buf */, ((psEnc + 20748 /* &.x_buf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2), ((uint32((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length + (5 * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))) * uint32(unsafe.Sizeof(int16(0)))))
  5467  
  5468  	/* Parameters needed for next frame */
  5469  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_sigtype = (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp + 2020 /* &sEncCtrl */)).FsCmn.Fsigtype
  5470  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FprevLag = *(*int32)(unsafe.Pointer((bp + 2020 /* &sEncCtrl */ /* &.sCmn */ + 108 /* &.pitchL */) + 3*4))
  5471  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset = 0
  5472  
  5473  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsRC.Ferror != 0 {
  5474  		/* Encoder returned error: clear payload buffer */
  5475  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf = 0
  5476  	} else {
  5477  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf++
  5478  	}
  5479  
  5480  	/****************************************/
  5481  	/* Finalize payload and copy to output  */
  5482  	/****************************************/
  5483  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf * 20) >= (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms {
  5484  
  5485  		LBRR_idx = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx + 1) & 1)
  5486  
  5487  		/* Check if FEC information should be added */
  5488  		frame_terminator = 0
  5489  		if (*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ +16256 /* &.LBRR_buffer */)+uintptr(LBRR_idx)*1032)).Fusage == 1 {
  5490  			frame_terminator = 2
  5491  		}
  5492  		if (*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ +16256 /* &.LBRR_buffer */)+uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx)*1032)).Fusage == 2 {
  5493  			frame_terminator = 3
  5494  			LBRR_idx = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx
  5495  		}
  5496  
  5497  		/* Add the frame termination info to stream */
  5498  		SKP_Silk_range_encoder(tls, (psEnc /* &.sCmn */ /* &.sRC */), frame_terminator, FrameTermination_CDF)
  5499  
  5500  		/* Payload length so far */
  5501  		SKP_Silk_range_coder_get_length(tls, (psEnc /* &.sCmn */ /* &.sRC */), bp+5640 /* &nBytes */)
  5502  
  5503  		/* Check that there is enough space in external output buffer, and move data */
  5504  		if int32(*(*int16)(unsafe.Pointer(pnBytesOut))) >= *(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) {
  5505  			SKP_Silk_range_enc_wrap_up(tls, (psEnc /* &.sCmn */ /* &.sRC */))
  5506  			libc.Xmemcpy(tls, pCode, psEnc /* &.sCmn */ /* &.sRC */ +20 /* &.buffer */, (uint32(*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */))) * uint32(unsafe.Sizeof(uint8(0)))))
  5507  
  5508  			if (frame_terminator > 1) && (int32(*(*int16)(unsafe.Pointer(pnBytesOut))) >= (*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) + (*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ +16256 /* &.LBRR_buffer */)+uintptr(LBRR_idx)*1032)).FnBytes)) {
  5509  				/* Get old packet and add to payload. */
  5510  				libc.Xmemcpy(tls, (pCode + uintptr(*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)))), (psEnc /* &.sCmn */ +16256 /* &.LBRR_buffer */)+uintptr(LBRR_idx)*1032 /* &.payload */, (uint32((*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ +16256 /* &.LBRR_buffer */)+uintptr(LBRR_idx)*1032)).FnBytes) * uint32(unsafe.Sizeof(uint8(0)))))
  5511  				*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) += (*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ + 16256 /* &.LBRR_buffer */) + uintptr(LBRR_idx)*1032)).FnBytes
  5512  			}
  5513  
  5514  			*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)))
  5515  
  5516  			/* Update FEC buffer */
  5517  			libc.Xmemcpy(tls, (psEnc /* &.sCmn */ +16256 /* &.LBRR_buffer */)+uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx)*1032 /* &.payload */, bp+4612 /* &LBRRpayload[0] */, (uint32(*(*int16)(unsafe.Pointer(bp + 5636 /* nBytesLBRR */))) * uint32(unsafe.Sizeof(uint8(0)))))
  5518  			(*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ + 16256 /* &.LBRR_buffer */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx)*1032)).FnBytes = int32(*(*int16)(unsafe.Pointer(bp + 5636 /* nBytesLBRR */)))
  5519  			/* The line below describes how FEC should be used */
  5520  			(*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEnc /* &.sCmn */ + 16256 /* &.LBRR_buffer */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx)*1032)).Fusage = (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(bp + 2020 /* &sEncCtrl */)).FsCmn.FLBRR_usage
  5521  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Foldest_LBRR_idx + 1) & 1)
  5522  
  5523  		} else {
  5524  			/* Not enough space: Payload will be discarded */
  5525  			*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(0)
  5526  			*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) = 0
  5527  			ret = -4
  5528  		}
  5529  
  5530  		/* Reset the number of frames in payload buffer */
  5531  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf = 0
  5532  	} else {
  5533  		/* No payload this time */
  5534  		*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(0)
  5535  
  5536  		/* Encode that more frames follows */
  5537  		frame_terminator = 1
  5538  		SKP_Silk_range_encoder(tls, (psEnc /* &.sCmn */ /* &.sRC */), frame_terminator, FrameTermination_CDF)
  5539  
  5540  		/* Payload length so far */
  5541  		SKP_Silk_range_coder_get_length(tls, (psEnc /* &.sCmn */ /* &.sRC */), bp+5640 /* &nBytes */)
  5542  
  5543  	}
  5544  
  5545  	/* Check for arithmetic coder errors */
  5546  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsRC.Ferror != 0 {
  5547  		ret = -9
  5548  	}
  5549  
  5550  	/* Simulate number of ms buffered in channel because of exceeding TargetRate */
  5551  
  5552  	*(*int32)(unsafe.Pointer(psEnc + 22928 /* &.BufferedInChannel_ms */)) += (((8 * 1000) * (*(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf)) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps))
  5553  	*(*int32)(unsafe.Pointer(psEnc + 22928 /* &.BufferedInChannel_ms */)) -= (20)
  5554  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms = func() int32 {
  5555  		if (0) > (100) {
  5556  			return func() int32 {
  5557  				if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) > (0) {
  5558  					return 0
  5559  				}
  5560  				return func() int32 {
  5561  					if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) < (100) {
  5562  						return 100
  5563  					}
  5564  					return (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms
  5565  				}()
  5566  			}()
  5567  		}
  5568  		return func() int32 {
  5569  			if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) > (100) {
  5570  				return 100
  5571  			}
  5572  			return func() int32 {
  5573  				if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) < (0) {
  5574  					return 0
  5575  				}
  5576  				return (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms
  5577  			}()
  5578  		}()
  5579  	}()
  5580  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf = *(*int32)(unsafe.Pointer(bp + 5640 /* nBytes */))
  5581  
  5582  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 > SKP_FIX_CONST(tls, 0.7, 8) {
  5583  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FActiveSpeech_ms = func() int32 {
  5584  			if ((uint32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FActiveSpeech_ms) + (20))) & 0x80000000) != 0 {
  5585  				return 0x7FFFFFFF
  5586  			}
  5587  			return (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FActiveSpeech_ms) + (20))
  5588  		}()
  5589  	}
  5590  
  5591  	return ret
  5592  }
  5593  
  5594  /* Low BitRate Redundancy encoding functionality. Reuse all parameters but encode residual with lower bitrate */
  5595  func SKP_Silk_LBRR_encode_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, pCode uintptr, pnBytesOut uintptr, xfw uintptr) { /* SKP_Silk_encode_frame_FIX.c:279:6: */
  5596  	bp := tls.Alloc(36)
  5597  	defer tls.Free(36)
  5598  
  5599  	// var TempGainsIndices [4]int32 at bp, 16
  5600  
  5601  	var frame_terminator int32
  5602  	// var nBytes int32 at bp+32, 4
  5603  
  5604  	var nFramesInPayloadBuf int32
  5605  	// var TempGains_Q16 [4]int32 at bp+16, 16
  5606  
  5607  	var typeOffset int32
  5608  	var LTP_scaleIndex int32
  5609  	var Rate_only_parameters int32 = 0
  5610  	/*******************************************/
  5611  	/* Control use of inband LBRR              */
  5612  	/*******************************************/
  5613  	SKP_Silk_LBRR_ctrl_FIX(tls, psEnc, (psEncCtrl /* &.sCmn */))
  5614  
  5615  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_enabled != 0 {
  5616  		/* Save original gains */
  5617  		libc.Xmemcpy(tls, bp /* &TempGainsIndices[0] */, psEncCtrl /* &.sCmn */ +72 /* &.GainsIndices */, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
  5618  		libc.Xmemcpy(tls, bp+16 /* &TempGains_Q16[0] */, psEncCtrl+128 /* &.Gains_Q16 */, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
  5619  
  5620  		typeOffset = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FtypeOffsetPrev // Temp save as cannot be overwritten
  5621  		LTP_scaleIndex = (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex
  5622  
  5623  		/* Set max rate where quant signal is encoded */
  5624  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 8 {
  5625  			Rate_only_parameters = 13500
  5626  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 12 {
  5627  			Rate_only_parameters = 15500
  5628  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
  5629  			Rate_only_parameters = 17500
  5630  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 24 {
  5631  			Rate_only_parameters = 19500
  5632  		} else {
  5633  
  5634  		}
  5635  
  5636  		if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FComplexity > 0) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps > Rate_only_parameters) {
  5637  			if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf == 0 {
  5638  				/* First frame in packet; copy everything */
  5639  				libc.Xmemcpy(tls, (psEnc /* &.sCmn */ + 8548 /* &.sNSQ_LBRR */), (psEnc /* &.sCmn */ + 2088 /* &.sNSQ */), uint32(unsafe.Sizeof(SKP_Silk_nsq_state{})))
  5640  
  5641  				(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRRprevLastGainIndex = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsShape.FLastGainIndex
  5642  				/* Increase Gains to get target LBRR rate */
  5643  				*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */))) = (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */))) + (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLBRR_GainIncreases)
  5644  				*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */))) = func() int32 {
  5645  					if (0) > (64 - 1) {
  5646  						return func() int32 {
  5647  							if (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))) > (0) {
  5648  								return 0
  5649  							}
  5650  							return func() int32 {
  5651  								if (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))) < (64 - 1) {
  5652  									return (64 - 1)
  5653  								}
  5654  								return *(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))
  5655  							}()
  5656  						}()
  5657  					}
  5658  					return func() int32 {
  5659  						if (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))) > (64 - 1) {
  5660  							return (64 - 1)
  5661  						}
  5662  						return func() int32 {
  5663  							if (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))) < (0) {
  5664  								return 0
  5665  							}
  5666  							return *(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 72 /* &.GainsIndices */)))
  5667  						}()
  5668  					}()
  5669  				}()
  5670  			}
  5671  			/* Decode to get gains in sync with decoder         */
  5672  			/* Overwrite unquantized gains with quantized gains */
  5673  			SKP_Silk_gains_dequant(tls, psEncCtrl+128 /* &.Gains_Q16 */, psEncCtrl /* &.sCmn */ +72, /* &.GainsIndices */
  5674  				(psEnc /* &.sCmn */ + 15144 /* &.LBRRprevLastGainIndex */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf)
  5675  
  5676  			/*****************************************/
  5677  			/* Noise shaping quantization            */
  5678  			/*****************************************/
  5679  			if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnStatesDelayedDecision > 1) || ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0) {
  5680  				SKP_Silk_NSQ_del_dec(tls, (psEnc /* &.sCmn */), (psEncCtrl /* &.sCmn */), (psEnc /* &.sCmn */ + 8548 /* &.sNSQ_LBRR */), xfw, psEnc /* &.sCmn */ +19052, /* &.q_LBRR */
  5681  					(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2, (psEncCtrl + 144 /* &.PredCoef_Q12 */), psEncCtrl+208, /* &.LTPCoef_Q14 */
  5682  					psEncCtrl+380 /* &.AR2_Q13 */, psEncCtrl+572 /* &.HarmShapeGain_Q14 */, psEncCtrl+556 /* &.Tilt_Q14 */, psEncCtrl+508, /* &.LF_shp_Q14 */
  5683  					psEncCtrl+128 /* &.Gains_Q16 */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLambda_Q10, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTP_scale_Q14)
  5684  			} else {
  5685  				SKP_Silk_NSQ(tls, (psEnc /* &.sCmn */), (psEncCtrl /* &.sCmn */), (psEnc /* &.sCmn */ + 8548 /* &.sNSQ_LBRR */), xfw, psEnc /* &.sCmn */ +19052, /* &.q_LBRR */
  5686  					(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2, (psEncCtrl + 144 /* &.PredCoef_Q12 */), psEncCtrl+208, /* &.LTPCoef_Q14 */
  5687  					psEncCtrl+380 /* &.AR2_Q13 */, psEncCtrl+572 /* &.HarmShapeGain_Q14 */, psEncCtrl+556 /* &.Tilt_Q14 */, psEncCtrl+508, /* &.LF_shp_Q14 */
  5688  					psEncCtrl+128 /* &.Gains_Q16 */, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLambda_Q10, (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTP_scale_Q14)
  5689  			}
  5690  		} else {
  5691  			libc.Xmemset(tls, psEnc /* &.sCmn */ +19052 /* &.q_LBRR */, 0, (uint32((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length) * uint32(unsafe.Sizeof(int8(0)))))
  5692  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = 0
  5693  		}
  5694  		/****************************************/
  5695  		/* Initialize arithmetic coder          */
  5696  		/****************************************/
  5697  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf == 0 {
  5698  			SKP_Silk_range_enc_init(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */))
  5699  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnBytesInPayloadBuf = 0
  5700  		}
  5701  
  5702  		/****************************************/
  5703  		/* Encode Parameters                    */
  5704  		/****************************************/
  5705  		SKP_Silk_encode_parameters(tls, (psEnc /* &.sCmn */), (psEncCtrl /* &.sCmn */),
  5706  			(psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */), psEnc /* &.sCmn */ +19052 /* &.q_LBRR */)
  5707  
  5708  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsRC_LBRR.Ferror != 0 {
  5709  			/* Encoder returned error: clear payload buffer */
  5710  			nFramesInPayloadBuf = 0
  5711  		} else {
  5712  			nFramesInPayloadBuf = ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf + 1)
  5713  		}
  5714  
  5715  		/****************************************/
  5716  		/* Finalize payload and copy to output  */
  5717  		/****************************************/
  5718  		if ((int32(int16(nFramesInPayloadBuf))) * (int32(int16(20)))) >= (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms {
  5719  
  5720  			/* Check if FEC information should be added */
  5721  			frame_terminator = 0
  5722  
  5723  			/* Add the frame termination info to stream */
  5724  			SKP_Silk_range_encoder(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */), frame_terminator, uintptr(unsafe.Pointer(&SKP_Silk_FrameTermination_CDF)))
  5725  
  5726  			/* Payload length so far */
  5727  			SKP_Silk_range_coder_get_length(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */), bp+32 /* &nBytes */)
  5728  
  5729  			/* Check that there is enough space in external output buffer and move data */
  5730  			if int32(*(*int16)(unsafe.Pointer(pnBytesOut))) >= *(*int32)(unsafe.Pointer(bp + 32 /* nBytes */)) {
  5731  				SKP_Silk_range_enc_wrap_up(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */))
  5732  				libc.Xmemcpy(tls, pCode, psEnc /* &.sCmn */ +1044 /* &.sRC_LBRR */ +20 /* &.buffer */, (uint32(*(*int32)(unsafe.Pointer(bp + 32 /* nBytes */))) * uint32(unsafe.Sizeof(uint8(0)))))
  5733  
  5734  				*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(*(*int32)(unsafe.Pointer(bp + 32 /* nBytes */)))
  5735  			} else {
  5736  				/* Not enough space: payload will be discarded */
  5737  				*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(0)
  5738  
  5739  			}
  5740  		} else {
  5741  			/* No payload this time */
  5742  			*(*int16)(unsafe.Pointer(pnBytesOut)) = int16(0)
  5743  
  5744  			/* Encode that more frames follows */
  5745  			frame_terminator = 1
  5746  			SKP_Silk_range_encoder(tls, (psEnc /* &.sCmn */ + 1044 /* &.sRC_LBRR */), frame_terminator, uintptr(unsafe.Pointer(&SKP_Silk_FrameTermination_CDF)))
  5747  		}
  5748  
  5749  		/* Restore original Gains */
  5750  		libc.Xmemcpy(tls, psEncCtrl /* &.sCmn */ +72 /* &.GainsIndices */, bp /* &TempGainsIndices[0] */, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
  5751  		libc.Xmemcpy(tls, psEncCtrl+128 /* &.Gains_Q16 */, bp+16 /* &TempGains_Q16[0] */, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
  5752  
  5753  		/* Restore LTP scale index and typeoffset */
  5754  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = LTP_scaleIndex
  5755  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FtypeOffsetPrev = typeOffset
  5756  	}
  5757  }
  5758  
  5759  /*******************************************/
  5760  /* Encode parameters to create the payload */
  5761  /*******************************************/
  5762  func SKP_Silk_encode_parameters(tls *libc.TLS, psEncC uintptr, psEncCtrlC uintptr, psRC uintptr, q uintptr) { /* SKP_Silk_encode_parameters.c:33:6: */
  5763  	var i int32
  5764  	var k int32
  5765  	var typeOffset int32
  5766  	var psNLSF_CB uintptr
  5767  
  5768  	/************************/
  5769  	/* Encode sampling rate */
  5770  	/************************/
  5771  	/* only done for first frame in packet */
  5772  	if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnFramesInPayloadBuf == 0 {
  5773  		/* get sampling rate index */
  5774  		for i = 0; i < 3; i++ {
  5775  			if SKP_Silk_SamplingRates_table[i] == (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz {
  5776  				break
  5777  			}
  5778  		}
  5779  		SKP_Silk_range_encoder(tls, psRC, i, uintptr(unsafe.Pointer(&SKP_Silk_SamplingRates_CDF)))
  5780  	}
  5781  
  5782  	/*******************************************/
  5783  	/* Encode signal type and quantizer offset */
  5784  	/*******************************************/
  5785  	typeOffset = ((2 * (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype) + (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FQuantOffsetType)
  5786  	if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnFramesInPayloadBuf == 0 {
  5787  		/* first frame in packet: independent coding */
  5788  		SKP_Silk_range_encoder(tls, psRC, typeOffset, uintptr(unsafe.Pointer(&SKP_Silk_type_offset_CDF)))
  5789  	} else {
  5790  		/* condidtional coding */
  5791  		SKP_Silk_range_encoder(tls, psRC, typeOffset, (uintptr(unsafe.Pointer(&SKP_Silk_type_offset_joint_CDF)) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FtypeOffsetPrev)*10))
  5792  	}
  5793  	(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FtypeOffsetPrev = typeOffset
  5794  
  5795  	/****************/
  5796  	/* Encode gains */
  5797  	/****************/
  5798  	/* first subframe */
  5799  	if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnFramesInPayloadBuf == 0 {
  5800  		/* first frame in packet: independent coding */
  5801  		SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer((psEncCtrlC + 72 /* &.GainsIndices */))), (uintptr(unsafe.Pointer(&SKP_Silk_gain_CDF)) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype)*130))
  5802  	} else {
  5803  		/* condidtional coding */
  5804  		SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer((psEncCtrlC + 72 /* &.GainsIndices */))), uintptr(unsafe.Pointer(&SKP_Silk_delta_gain_CDF)))
  5805  	}
  5806  
  5807  	/* remaining subframes */
  5808  	for i = 1; i < 4; i++ {
  5809  		SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer((psEncCtrlC + 72 /* &.GainsIndices */) + uintptr(i)*4)), uintptr(unsafe.Pointer(&SKP_Silk_delta_gain_CDF)))
  5810  	}
  5811  
  5812  	/****************/
  5813  	/* Encode NLSFs */
  5814  	/****************/
  5815  	/* Range encoding of the NLSF path */
  5816  	psNLSF_CB = *(*uintptr)(unsafe.Pointer((psEncC + 16248 /* &.psNLSF_CB */) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype)*4))
  5817  	SKP_Silk_range_encoder_multi(tls, psRC, psEncCtrlC+28 /* &.NLSFIndices */, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FStartPtr, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)
  5818  
  5819  	/* Encode NLSF interpolation factor */
  5820  
  5821  	SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FNLSFInterpCoef_Q2, uintptr(unsafe.Pointer(&SKP_Silk_NLSF_interpolation_factor_CDF)))
  5822  
  5823  	if (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype == 0 {
  5824  		/*********************/
  5825  		/* Encode pitch lags */
  5826  		/*********************/
  5827  
  5828  		/* lag index */
  5829  		if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 8 {
  5830  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FlagIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_NB_CDF)))
  5831  		} else if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 12 {
  5832  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FlagIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_MB_CDF)))
  5833  		} else if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 16 {
  5834  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FlagIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_WB_CDF)))
  5835  		} else {
  5836  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FlagIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_lag_SWB_CDF)))
  5837  		}
  5838  
  5839  		/* countour index */
  5840  		if (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Ffs_kHz == 8 {
  5841  			/* Less codevectors used in 8 khz mode */
  5842  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FcontourIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_contour_NB_CDF)))
  5843  		} else {
  5844  			/* Joint for 12, 16, 24 khz */
  5845  			SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FcontourIndex, uintptr(unsafe.Pointer(&SKP_Silk_pitch_contour_CDF)))
  5846  		}
  5847  
  5848  		/********************/
  5849  		/* Encode LTP gains */
  5850  		/********************/
  5851  
  5852  		/* PERIndex value */
  5853  		SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FPERIndex, uintptr(unsafe.Pointer(&SKP_Silk_LTP_per_index_CDF)))
  5854  
  5855  		/* Codebook Indices */
  5856  		for k = 0; k < 4; k++ {
  5857  			SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer((psEncCtrlC + 12 /* &.LTPIndex */) + uintptr(k)*4)), SKP_Silk_LTP_gain_CDF_ptrs[(*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FPERIndex])
  5858  		}
  5859  
  5860  		/**********************/
  5861  		/* Encode LTP scaling */
  5862  		/**********************/
  5863  		SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FLTP_scaleIndex, uintptr(unsafe.Pointer(&SKP_Silk_LTPscale_CDF)))
  5864  	}
  5865  
  5866  	/***************/
  5867  	/* Encode seed */
  5868  	/***************/
  5869  	SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FSeed, uintptr(unsafe.Pointer(&SKP_Silk_Seed_CDF)))
  5870  
  5871  	/*********************************************/
  5872  	/* Encode quantization indices of excitation */
  5873  	/*********************************************/
  5874  	SKP_Silk_encode_pulses(tls, psRC, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FQuantOffsetType, q, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)
  5875  
  5876  	/*********************************************/
  5877  	/* Encode VAD flag                           */
  5878  	/*********************************************/
  5879  	SKP_Silk_range_encoder(tls, psRC, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FvadFlag, uintptr(unsafe.Pointer(&SKP_Silk_vadflag_CDF)))
  5880  }
  5881  
  5882  /*********************************************/
  5883  /* Encode quantization indices of excitation */
  5884  /*********************************************/
  5885  
  5886  func combine_and_check(tls *libc.TLS, pulses_comb uintptr, pulses_in uintptr, max_pulses int32, len int32) int32 { /* SKP_Silk_encode_pulses.c:34:20: */
  5887  	var k int32
  5888  	var sum int32
  5889  
  5890  	for k = 0; k < len; k++ {
  5891  		sum = (*(*int32)(unsafe.Pointer(pulses_in + uintptr((2*k))*4)) + *(*int32)(unsafe.Pointer(pulses_in + uintptr(((2*k)+1))*4)))
  5892  		if sum > max_pulses {
  5893  			return 1
  5894  		}
  5895  		*(*int32)(unsafe.Pointer(pulses_comb + uintptr(k)*4)) = sum
  5896  	}
  5897  
  5898  	return 0
  5899  }
  5900  
  5901  /* Encode quantization indices of excitation */
  5902  func SKP_Silk_encode_pulses(tls *libc.TLS, psRC uintptr, sigtype int32, QuantOffsetType int32, q uintptr, frame_length int32) { /* SKP_Silk_encode_pulses.c:55:6: */
  5903  	bp := tls.Alloc(2192)
  5904  	defer tls.Free(2192)
  5905  
  5906  	var i int32
  5907  	var k int32
  5908  	var j int32
  5909  	var iter int32
  5910  	var bit int32
  5911  	var nLS int32
  5912  	var scale_down int32
  5913  	var RateLevelIndex int32 = 0
  5914  	var abs_q int32
  5915  	var minSumBits_Q6 int32
  5916  	var sumBits_Q6 int32
  5917  	// var abs_pulses [480]int32 at bp+32, 1920
  5918  
  5919  	// var sum_pulses [30]int32 at bp+2072, 120
  5920  
  5921  	// var nRshifts [30]int32 at bp+1952, 120
  5922  
  5923  	// var pulses_comb [8]int32 at bp, 32
  5924  
  5925  	var abs_pulses_ptr uintptr
  5926  	var pulses_ptr uintptr
  5927  	var cdf_ptr uintptr
  5928  	var nBits_ptr uintptr
  5929  
  5930  	libc.Xmemset(tls, bp /* &pulses_comb[0] */, 0, (uint32(8) * uint32(unsafe.Sizeof(int32(0))))) // Fixing Valgrind reported problem
  5931  
  5932  	/****************************/
  5933  	/* Prepare for shell coding */
  5934  	/****************************/
  5935  	/* Calculate number of shell blocks */
  5936  	iter = (frame_length / 16)
  5937  
  5938  	/* Take the absolute value of the pulses */
  5939  	for i = 0; i < frame_length; i = i + (4) {
  5940  		*(*int32)(unsafe.Pointer(bp + 32 /* &abs_pulses[0] */ + uintptr((i+0))*4)) = func() int32 {
  5941  			if (int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 0)))))) > 0 {
  5942  				return int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 0)))))
  5943  			}
  5944  			return -int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 0)))))
  5945  		}()
  5946  		*(*int32)(unsafe.Pointer(bp + 32 /* &abs_pulses[0] */ + uintptr((i+1))*4)) = func() int32 {
  5947  			if (int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 1)))))) > 0 {
  5948  				return int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 1)))))
  5949  			}
  5950  			return -int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 1)))))
  5951  		}()
  5952  		*(*int32)(unsafe.Pointer(bp + 32 /* &abs_pulses[0] */ + uintptr((i+2))*4)) = func() int32 {
  5953  			if (int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 2)))))) > 0 {
  5954  				return int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 2)))))
  5955  			}
  5956  			return -int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 2)))))
  5957  		}()
  5958  		*(*int32)(unsafe.Pointer(bp + 32 /* &abs_pulses[0] */ + uintptr((i+3))*4)) = func() int32 {
  5959  			if (int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 3)))))) > 0 {
  5960  				return int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 3)))))
  5961  			}
  5962  			return -int32(*(*int8)(unsafe.Pointer(q + uintptr((i + 3)))))
  5963  		}()
  5964  	}
  5965  
  5966  	/* Calc sum pulses per shell code frame */
  5967  	abs_pulses_ptr = bp + 32 /* &abs_pulses[0] */
  5968  	for i = 0; i < iter; i++ {
  5969  		*(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) = 0
  5970  
  5971  		for 1 != 0 {
  5972  			/* 1+1 -> 2 */
  5973  			scale_down = combine_and_check(tls, bp /* &pulses_comb[0] */, abs_pulses_ptr, SKP_Silk_max_pulses_table[0], 8)
  5974  
  5975  			/* 2+2 -> 4 */
  5976  			scale_down = scale_down + (combine_and_check(tls, bp /* &pulses_comb[0] */, bp /* &pulses_comb[0] */, SKP_Silk_max_pulses_table[1], 4))
  5977  
  5978  			/* 4+4 -> 8 */
  5979  			scale_down = scale_down + (combine_and_check(tls, bp /* &pulses_comb[0] */, bp /* &pulses_comb[0] */, SKP_Silk_max_pulses_table[2], 2))
  5980  
  5981  			/* 8+8 -> 16 */
  5982  			*(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)) = (*(*int32)(unsafe.Pointer(bp /* &pulses_comb[0] */)) + *(*int32)(unsafe.Pointer(bp /* &pulses_comb[0] */ + 1*4)))
  5983  			if *(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)) > SKP_Silk_max_pulses_table[3] {
  5984  				scale_down++
  5985  			}
  5986  
  5987  			if scale_down != 0 {
  5988  				/* We need to down scale the quantization signal */
  5989  				*(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4))++
  5990  				for k = 0; k < 16; k++ {
  5991  					*(*int32)(unsafe.Pointer(abs_pulses_ptr + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(abs_pulses_ptr + uintptr(k)*4))) >> (1))
  5992  				}
  5993  			} else {
  5994  				/* Jump out of while(1) loop and go to next shell coding frame */
  5995  				break
  5996  			}
  5997  		}
  5998  		abs_pulses_ptr += 4 * (uintptr(16))
  5999  	}
  6000  
  6001  	/**************/
  6002  	/* Rate level */
  6003  	/**************/
  6004  	/* find rate level that leads to fewest bits for coding of pulses per block info */
  6005  	minSumBits_Q6 = 0x7FFFFFFF
  6006  	for k = 0; k < (10 - 1); k++ {
  6007  		nBits_ptr = (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_BITS_Q6)) + uintptr(k)*40)
  6008  		sumBits_Q6 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_rate_levels_BITS_Q6)) + uintptr(sigtype)*18) + uintptr(k)*2)))
  6009  		for i = 0; i < iter; i++ {
  6010  			if *(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) > 0 {
  6011  				sumBits_Q6 = sumBits_Q6 + (int32(*(*int16)(unsafe.Pointer(nBits_ptr + 19*2))))
  6012  			} else {
  6013  				sumBits_Q6 = sumBits_Q6 + (int32(*(*int16)(unsafe.Pointer(nBits_ptr + uintptr(*(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)))*2))))
  6014  			}
  6015  		}
  6016  		if sumBits_Q6 < minSumBits_Q6 {
  6017  			minSumBits_Q6 = sumBits_Q6
  6018  			RateLevelIndex = k
  6019  		}
  6020  	}
  6021  	SKP_Silk_range_encoder(tls, psRC, RateLevelIndex, (uintptr(unsafe.Pointer(&SKP_Silk_rate_levels_CDF)) + uintptr(sigtype)*20))
  6022  
  6023  	/***************************************************/
  6024  	/* Sum-Weighted-Pulses Encoding                    */
  6025  	/***************************************************/
  6026  	cdf_ptr = (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + uintptr(RateLevelIndex)*42)
  6027  	for i = 0; i < iter; i++ {
  6028  		if *(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) == 0 {
  6029  			SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)), cdf_ptr)
  6030  		} else {
  6031  			SKP_Silk_range_encoder(tls, psRC, (18 + 1), cdf_ptr)
  6032  			for k = 0; k < (*(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) - 1); k++ {
  6033  				SKP_Silk_range_encoder(tls, psRC, (18 + 1), (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + 9*42))
  6034  			}
  6035  			SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)), (uintptr(unsafe.Pointer(&SKP_Silk_pulses_per_block_CDF)) + 9*42))
  6036  		}
  6037  	}
  6038  
  6039  	/******************/
  6040  	/* Shell Encoding */
  6041  	/******************/
  6042  	for i = 0; i < iter; i++ {
  6043  		if *(*int32)(unsafe.Pointer(bp + 2072 /* &sum_pulses[0] */ + uintptr(i)*4)) > 0 {
  6044  			SKP_Silk_shell_encoder(tls, psRC, (bp + 32 /* &abs_pulses */ + uintptr((i*16))*4))
  6045  		}
  6046  	}
  6047  
  6048  	/****************/
  6049  	/* LSB Encoding */
  6050  	/****************/
  6051  	for i = 0; i < iter; i++ {
  6052  		if *(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) > 0 {
  6053  			pulses_ptr = (q + uintptr((i * 16)))
  6054  			nLS = (*(*int32)(unsafe.Pointer(bp + 1952 /* &nRshifts[0] */ + uintptr(i)*4)) - 1)
  6055  			for k = 0; k < 16; k++ {
  6056  				abs_q = int32(func() int8 {
  6057  					if (int32(*(*int8)(unsafe.Pointer(pulses_ptr + uintptr(k))))) > 0 {
  6058  						return *(*int8)(unsafe.Pointer(pulses_ptr + uintptr(k)))
  6059  					}
  6060  					return int8(-int32(*(*int8)(unsafe.Pointer(pulses_ptr + uintptr(k)))))
  6061  				}())
  6062  				for j = nLS; j > 0; j-- {
  6063  					bit = (((abs_q) >> (j)) & 1)
  6064  					SKP_Silk_range_encoder(tls, psRC, bit, uintptr(unsafe.Pointer(&SKP_Silk_lsb_CDF)))
  6065  				}
  6066  				bit = (abs_q & 1)
  6067  				SKP_Silk_range_encoder(tls, psRC, bit, uintptr(unsafe.Pointer(&SKP_Silk_lsb_CDF)))
  6068  			}
  6069  		}
  6070  	}
  6071  
  6072  	/****************/
  6073  	/* Encode signs */
  6074  	/****************/
  6075  	SKP_Silk_encode_signs(tls, psRC, q, frame_length, sigtype, QuantOffsetType, RateLevelIndex)
  6076  }
  6077  
  6078  /***********************************************************************
  6079  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6080  Redistribution and use in source and binary forms, with or without
  6081  modification, (subject to the limitations in the disclaimer below)
  6082  are permitted provided that the following conditions are met:
  6083  - Redistributions of source code must retain the above copyright notice,
  6084  this list of conditions and the following disclaimer.
  6085  - Redistributions in binary form must reproduce the above copyright
  6086  notice, this list of conditions and the following disclaimer in the
  6087  documentation and/or other materials provided with the distribution.
  6088  - Neither the name of Skype Limited, nor the names of specific
  6089  contributors, may be used to endorse or promote products derived from
  6090  this software without specific prior written permission.
  6091  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6092  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6093  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6094  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6095  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6096  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6097  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6098  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6099  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6100  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6101  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6102  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6103  ***********************************************************************/
  6104  
  6105  /***********************************************************************
  6106  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6107  Redistribution and use in source and binary forms, with or without
  6108  modification, (subject to the limitations in the disclaimer below)
  6109  are permitted provided that the following conditions are met:
  6110  - Redistributions of source code must retain the above copyright notice,
  6111  this list of conditions and the following disclaimer.
  6112  - Redistributions in binary form must reproduce the above copyright
  6113  notice, this list of conditions and the following disclaimer in the
  6114  documentation and/or other materials provided with the distribution.
  6115  - Neither the name of Skype Limited, nor the names of specific
  6116  contributors, may be used to endorse or promote products derived from
  6117  this software without specific prior written permission.
  6118  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6119  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6120  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6121  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6122  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6123  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6124  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6125  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6126  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6127  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6128  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6129  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6130  ***********************************************************************/
  6131  
  6132  /***********************************************************************
  6133  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6134  Redistribution and use in source and binary forms, with or without
  6135  modification, (subject to the limitations in the disclaimer below)
  6136  are permitted provided that the following conditions are met:
  6137  - Redistributions of source code must retain the above copyright notice,
  6138  this list of conditions and the following disclaimer.
  6139  - Redistributions in binary form must reproduce the above copyright
  6140  notice, this list of conditions and the following disclaimer in the
  6141  documentation and/or other materials provided with the distribution.
  6142  - Neither the name of Skype Limited, nor the names of specific
  6143  contributors, may be used to endorse or promote products derived from
  6144  this software without specific prior written permission.
  6145  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6146  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6147  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6148  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6149  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6150  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6151  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6152  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6153  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6154  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6155  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6156  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6157  ***********************************************************************/
  6158  
  6159  /****************************************/
  6160  /* Encoder functions                    */
  6161  /****************************************/
  6162  
  6163  func SKP_Silk_SDK_Get_Encoder_Size(tls *libc.TLS, encSizeBytes uintptr) int32 { /* SKP_Silk_enc_API.c:41:9: */
  6164  	var ret int32 = 0
  6165  
  6166  	*(*int32)(unsafe.Pointer(encSizeBytes)) = int32(unsafe.Sizeof(SKP_Silk_encoder_state_FIX{}))
  6167  
  6168  	return ret
  6169  }
  6170  
  6171  /***************************************/
  6172  /* Read control structure from encoder */
  6173  /***************************************/
  6174  func SKP_Silk_SDK_QueryEncoder(tls *libc.TLS, encState uintptr, encStatus uintptr) int32 { /* SKP_Silk_enc_API.c:54:9: */
  6175  	var psEnc uintptr
  6176  	var ret int32 = 0
  6177  
  6178  	psEnc = encState
  6179  
  6180  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FAPI_sampleRate = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz
  6181  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FmaxInternalSampleRate = ((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FmaxInternal_fs_kHz))) * (int32(int16(1000))))
  6182  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FpacketSize = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms) / (1000)) /* convert samples -> ms */
  6183  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FbitRate = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FTargetRate_bps
  6184  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FpacketLossPercentage = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc
  6185  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).Fcomplexity = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FComplexity
  6186  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FuseInBandFEC = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC
  6187  	(*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encStatus)).FuseDTX = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseDTX
  6188  	return ret
  6189  }
  6190  
  6191  /*************************/
  6192  /* Init or Reset encoder */
  6193  /*************************/
  6194  func SKP_Silk_SDK_InitEncoder(tls *libc.TLS, encState uintptr, encStatus uintptr) int32 { /* SKP_Silk_enc_API.c:78:9: */
  6195  	var psEnc uintptr
  6196  	var ret int32 = 0
  6197  
  6198  	psEnc = encState
  6199  
  6200  	/* Reset Encoder */
  6201  	if libc.AssignAddInt32(&ret, SKP_Silk_init_encoder_FIX(tls, psEnc)) != 0 {
  6202  
  6203  	}
  6204  
  6205  	/* Read control structure */
  6206  	if libc.AssignAddInt32(&ret, SKP_Silk_SDK_QueryEncoder(tls, encState, encStatus)) != 0 {
  6207  
  6208  	}
  6209  
  6210  	return ret
  6211  }
  6212  
  6213  /**************************/
  6214  /* Encode frame with Silk */
  6215  /**************************/
  6216  func SKP_Silk_SDK_Encode(tls *libc.TLS, encState uintptr, encControl uintptr, samplesIn uintptr, nSamplesIn int32, outData uintptr, nBytesOut uintptr) int32 { /* SKP_Silk_enc_API.c:106:9: */
  6217  	bp := tls.Alloc(2)
  6218  	defer tls.Free(2)
  6219  
  6220  	var max_internal_fs_kHz int32
  6221  	var PacketSize_ms int32
  6222  	var PacketLoss_perc int32
  6223  	var UseInBandFEC int32
  6224  	var UseDTX int32
  6225  	var ret int32 = 0
  6226  	var nSamplesToBuffer int32
  6227  	var Complexity int32
  6228  	var input_10ms int32
  6229  	var nSamplesFromInput int32 = 0
  6230  	var TargetRate_bps int32
  6231  	var API_fs_Hz int32
  6232  	// var MaxBytesOut int16 at bp, 2
  6233  
  6234  	var psEnc uintptr = encState
  6235  
  6236  	/* Check sampling frequency first, to avoid divide by zero later */
  6237  	if ((((((((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 8000) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 12000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 16000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 24000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 32000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 44100)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate != 48000)) || (((((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate != 8000) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate != 12000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate != 16000)) && ((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate != 24000)) {
  6238  		ret = -2
  6239  
  6240  		return ret
  6241  	}
  6242  
  6243  	/* Set encoder parameters from control structure */
  6244  	API_fs_Hz = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FAPI_sampleRate
  6245  	max_internal_fs_kHz = (((*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FmaxInternalSampleRate >> 10) + 1) /* convert Hz -> kHz */
  6246  	PacketSize_ms = ((1000 * (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FpacketSize) / (API_fs_Hz))
  6247  	TargetRate_bps = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FbitRate
  6248  	PacketLoss_perc = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FpacketLossPercentage
  6249  	UseInBandFEC = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FuseInBandFEC
  6250  	Complexity = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).Fcomplexity
  6251  	UseDTX = (*SKP_SILK_SDK_EncControlStruct)(unsafe.Pointer(encControl)).FuseDTX
  6252  
  6253  	/* Save values in state */
  6254  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FAPI_fs_Hz = API_fs_Hz
  6255  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FmaxInternal_fs_kHz = max_internal_fs_kHz
  6256  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInBandFEC = UseInBandFEC
  6257  
  6258  	/* Only accept input lengths that are a multiple of 10 ms */
  6259  	input_10ms = ((100 * nSamplesIn) / (API_fs_Hz))
  6260  	if ((input_10ms * API_fs_Hz) != (100 * nSamplesIn)) || (nSamplesIn < 0) {
  6261  		ret = -1
  6262  
  6263  		return ret
  6264  	}
  6265  
  6266  	TargetRate_bps = func() int32 {
  6267  		if (5000) > (100000) {
  6268  			return func() int32 {
  6269  				if (TargetRate_bps) > (5000) {
  6270  					return 5000
  6271  				}
  6272  				return func() int32 {
  6273  					if (TargetRate_bps) < (100000) {
  6274  						return 100000
  6275  					}
  6276  					return TargetRate_bps
  6277  				}()
  6278  			}()
  6279  		}
  6280  		return func() int32 {
  6281  			if (TargetRate_bps) > (100000) {
  6282  				return 100000
  6283  			}
  6284  			return func() int32 {
  6285  				if (TargetRate_bps) < (5000) {
  6286  					return 5000
  6287  				}
  6288  				return TargetRate_bps
  6289  			}()
  6290  		}()
  6291  	}()
  6292  	if (libc.AssignInt32(&ret, SKP_Silk_control_encoder_FIX(tls, psEnc, PacketSize_ms, TargetRate_bps,
  6293  		PacketLoss_perc, UseDTX, Complexity))) != 0 {
  6294  
  6295  		return ret
  6296  	}
  6297  
  6298  	/* Make sure no more than one packet can be produced */
  6299  	if (1000 * nSamplesIn) > ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms * API_fs_Hz) {
  6300  		ret = -1
  6301  
  6302  		return ret
  6303  	}
  6304  
  6305  	/* Detect energy above 8 kHz */
  6306  	if (((func() int32 {
  6307  		if (API_fs_Hz) < (1000 * max_internal_fs_kHz) {
  6308  			return API_fs_Hz
  6309  		}
  6310  		return (1000 * max_internal_fs_kHz)
  6311  	}()) == 24000) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FSWB_detected == 0)) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsSWBdetect.FWB_detected == 0) {
  6312  		SKP_Silk_detect_SWB_input(tls, (psEnc /* &.sCmn */ + 18532 /* &.sSWBdetect */), samplesIn, nSamplesIn)
  6313  	}
  6314  
  6315  	/* Input buffering/resampling and encoding */
  6316  	*(*int16)(unsafe.Pointer(bp /* MaxBytesOut */)) = int16(0) /* return 0 output bytes if no encoder called */
  6317  	for 1 != 0 {
  6318  		nSamplesToBuffer = ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx)
  6319  		if API_fs_Hz == ((int32(int16(1000))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz)))) {
  6320  			nSamplesToBuffer = SKP_min_int(tls, nSamplesToBuffer, nSamplesIn)
  6321  			nSamplesFromInput = nSamplesToBuffer
  6322  			/* Copy to buffer */
  6323  			libc.Xmemcpy(tls, ((psEnc /* &.sCmn */ + 15272 /* &.inputBuf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx)*2), samplesIn, (uint32(nSamplesFromInput) * uint32(unsafe.Sizeof(int16(0)))))
  6324  		} else {
  6325  			nSamplesToBuffer = func() int32 {
  6326  				if (nSamplesToBuffer) < ((10 * input_10ms) * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz) {
  6327  					return nSamplesToBuffer
  6328  				}
  6329  				return ((10 * input_10ms) * (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz)
  6330  			}()
  6331  			nSamplesFromInput = ((nSamplesToBuffer * API_fs_Hz) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz * 1000))
  6332  			/* Resample and write to buffer */
  6333  			ret = ret + (SKP_Silk_resampler(tls, (psEnc /* &.sCmn */ + 18348 /* &.resampler_state */),
  6334  				((psEnc /* &.sCmn */ + 15272 /* &.inputBuf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx)*2), samplesIn, nSamplesFromInput))
  6335  		}
  6336  		samplesIn += 2 * (uintptr(nSamplesFromInput))
  6337  		nSamplesIn = nSamplesIn - (nSamplesFromInput)
  6338  		*(*int32)(unsafe.Pointer(psEnc /* &.sCmn */ + 16232 /* &.inputBufIx */)) += (nSamplesToBuffer)
  6339  
  6340  		/* Silk encoder */
  6341  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx >= (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length {
  6342  
  6343  			/* Enough data in input buffer, so encode */
  6344  			if int32(*(*int16)(unsafe.Pointer(bp /* MaxBytesOut */))) == 0 {
  6345  				/* No payload obtained so far */
  6346  				*(*int16)(unsafe.Pointer(bp /* MaxBytesOut */)) = *(*int16)(unsafe.Pointer(nBytesOut))
  6347  				if (libc.AssignInt32(&ret, SKP_Silk_encode_frame_FIX(tls, psEnc, outData, bp /* &MaxBytesOut */, psEnc /* &.sCmn */ +15272 /* &.inputBuf */))) != 0 {
  6348  
  6349  				}
  6350  			} else {
  6351  				/* outData already contains a payload */
  6352  				if (libc.AssignInt32(&ret, SKP_Silk_encode_frame_FIX(tls, psEnc, outData, nBytesOut, psEnc /* &.sCmn */ +15272 /* &.inputBuf */))) != 0 {
  6353  
  6354  				}
  6355  				/* Check that no second payload was created */
  6356  
  6357  			}
  6358  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinputBufIx = 0
  6359  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fcontrolled_since_last_payload = 0
  6360  
  6361  			if nSamplesIn == 0 {
  6362  				break
  6363  			}
  6364  		} else {
  6365  			break
  6366  		}
  6367  	}
  6368  
  6369  	*(*int16)(unsafe.Pointer(nBytesOut)) = *(*int16)(unsafe.Pointer(bp /* MaxBytesOut */))
  6370  	if ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseDTX != 0) && ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FinDTX != 0) {
  6371  		/* DTX simulation */
  6372  		*(*int16)(unsafe.Pointer(nBytesOut)) = int16(0)
  6373  	}
  6374  
  6375  	return ret
  6376  }
  6377  
  6378  /***********************************************************************
  6379  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6380  Redistribution and use in source and binary forms, with or without
  6381  modification, (subject to the limitations in the disclaimer below)
  6382  are permitted provided that the following conditions are met:
  6383  - Redistributions of source code must retain the above copyright notice,
  6384  this list of conditions and the following disclaimer.
  6385  - Redistributions in binary form must reproduce the above copyright
  6386  notice, this list of conditions and the following disclaimer in the
  6387  documentation and/or other materials provided with the distribution.
  6388  - Neither the name of Skype Limited, nor the names of specific
  6389  contributors, may be used to endorse or promote products derived from
  6390  this software without specific prior written permission.
  6391  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6392  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6393  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6394  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6395  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6396  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6397  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6398  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6399  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6400  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6401  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6402  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6403  ***********************************************************************/
  6404  
  6405  /******************/
  6406  /* Error messages */
  6407  /******************/
  6408  
  6409  /**************************/
  6410  /* Encoder error messages */
  6411  /**************************/
  6412  
  6413  /* Input length is not a multiplum of 10 ms, or length is longer than the packet length */
  6414  
  6415  /* Sampling frequency not 8000, 12000, 16000 or 24000 Hertz */
  6416  
  6417  /* Packet size not 20, 40, 60, 80 or 100 ms */
  6418  
  6419  /* Allocated payload buffer too short */
  6420  
  6421  /* Loss rate not between 0 and 100 percent */
  6422  
  6423  /* Complexity setting not valid, use 0, 1 or 2 */
  6424  
  6425  /* Inband FEC setting not valid, use 0 or 1 */
  6426  
  6427  /* DTX setting not valid, use 0 or 1 */
  6428  
  6429  /* Internal encoder error */
  6430  
  6431  /**************************/
  6432  /* Decoder error messages */
  6433  /**************************/
  6434  
  6435  /* Output sampling frequency lower than internal decoded sampling frequency */
  6436  
  6437  /* Payload size exceeded the maximum allowed 1024 bytes */
  6438  
  6439  /* Payload has bit errors */
  6440  
  6441  /***********************************************************************
  6442  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  6443  Redistribution and use in source and binary forms, with or without
  6444  modification, (subject to the limitations in the disclaimer below)
  6445  are permitted provided that the following conditions are met:
  6446  - Redistributions of source code must retain the above copyright notice,
  6447  this list of conditions and the following disclaimer.
  6448  - Redistributions in binary form must reproduce the above copyright
  6449  notice, this list of conditions and the following disclaimer in the
  6450  documentation and/or other materials provided with the distribution.
  6451  - Neither the name of Skype Limited, nor the names of specific
  6452  contributors, may be used to endorse or promote products derived from
  6453  this software without specific prior written permission.
  6454  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  6455  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  6456  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  6457  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  6458  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6459  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6460  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  6461  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  6462  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  6463  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  6464  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  6465  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  6466  ***********************************************************************/
  6467  
  6468  /*******************/
  6469  /* Pitch estimator */
  6470  /*******************/
  6471  
  6472  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  6473  
  6474  /* Bandwidth expansion for whitening filter in pitch analysis */
  6475  
  6476  /* Threshold used by pitch estimator for early escape */
  6477  
  6478  /*********************/
  6479  /* Linear prediction */
  6480  /*********************/
  6481  
  6482  /* LPC analysis defines: regularization and bandwidth expansion */
  6483  
  6484  /* LTP analysis defines */
  6485  
  6486  /* LTP quantization settings */
  6487  
  6488  /***********************/
  6489  /* High pass filtering */
  6490  /***********************/
  6491  
  6492  /* Smoothing parameters for low end of pitch frequency range estimation */
  6493  
  6494  /* Min and max values for low end of pitch frequency range estimation */
  6495  
  6496  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  6497  
  6498  /***********/
  6499  /* Various */
  6500  /***********/
  6501  
  6502  /* Required speech activity for counting frame as active */
  6503  
  6504  /* Speech Activity LBRR enable threshold (needs tuning) */
  6505  
  6506  /*************************/
  6507  /* Perceptual parameters */
  6508  /*************************/
  6509  
  6510  /* reduction in coding SNR during low speech activity */
  6511  
  6512  /* factor for reducing quantization noise during voiced speech */
  6513  
  6514  /* factor for reducing quantization noise for unvoiced sparse signals */
  6515  
  6516  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  6517  
  6518  /* warping control */
  6519  
  6520  /* fraction added to first autocorrelation value */
  6521  
  6522  /* noise shaping filter chirp factor */
  6523  
  6524  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  6525  
  6526  /* gain reduction for fricatives */
  6527  
  6528  /* extra harmonic boosting (signal shaping) at low bitrates */
  6529  
  6530  /* extra harmonic boosting (signal shaping) for noisy input signals */
  6531  
  6532  /* harmonic noise shaping */
  6533  
  6534  /* extra harmonic noise shaping for high bitrates or noisy input */
  6535  
  6536  /* parameter for shaping noise towards higher frequencies */
  6537  
  6538  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  6539  
  6540  /* parameter for applying a high-pass tilt to the input signal */
  6541  
  6542  /* parameter for extra high-pass tilt to the input signal at high rates */
  6543  
  6544  /* parameter for reducing noise at the very low frequencies */
  6545  
  6546  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  6547  
  6548  /* noise floor to put a lower limit on the quantization step size */
  6549  
  6550  /* noise floor relative to active speech gain level */
  6551  
  6552  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  6553  
  6554  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  6555  
  6556  /* parameters defining the R/D tradeoff in the residual quantizer */
  6557  
  6558  /* Finds LPC vector from correlations, and converts to NLSF */
  6559  func SKP_Silk_find_LPC_FIX(tls *libc.TLS, NLSF_Q15 uintptr, interpIndex uintptr, prev_NLSFq_Q15 uintptr, useInterpolatedNLSFs int32, LPC_order int32, x uintptr, subfr_length int32) { /* SKP_Silk_find_LPC_FIX.c:32:6: */
  6560  	bp := tls.Alloc(832)
  6561  	defer tls.Free(832)
  6562  
  6563  	var k int32
  6564  	// var a_Q16 [16]int32 at bp+8, 64
  6565  
  6566  	var isInterpLower int32
  6567  	var shift int32
  6568  	// var S [16]int16 at bp+240, 32
  6569  
  6570  	// var res_nrg0 int32 at bp+816, 4
  6571  
  6572  	// var res_nrg1 int32 at bp+824, 4
  6573  
  6574  	// var rshift0 int32 at bp+820, 4
  6575  
  6576  	// var rshift1 int32 at bp+828, 4
  6577  
  6578  	/* Used only for LSF interpolation */
  6579  	// var a_tmp_Q16 [16]int32 at bp+80, 64
  6580  
  6581  	var res_nrg_interp int32
  6582  	// var res_nrg int32 at bp, 4
  6583  
  6584  	// var res_tmp_nrg int32 at bp+72, 4
  6585  
  6586  	var res_nrg_interp_Q int32
  6587  	// var res_nrg_Q int32 at bp+4, 4
  6588  
  6589  	// var res_tmp_nrg_Q int32 at bp+76, 4
  6590  
  6591  	// var a_tmp_Q12 [16]int16 at bp+208, 32
  6592  
  6593  	// var NLSF0_Q15 [16]int32 at bp+144, 64
  6594  
  6595  	// var LPC_res [272]int16 at bp+272, 544
  6596  
  6597  	/* Default: no interpolation */
  6598  	*(*int32)(unsafe.Pointer(interpIndex)) = 4
  6599  
  6600  	/* Burg AR analysis for the full frame */
  6601  	SKP_Silk_burg_modified(tls, bp /* &res_nrg */, bp+4 /* &res_nrg_Q */, bp+8 /* &a_Q16[0] */, x, subfr_length, 4, SKP_FIX_CONST(tls, 2.5e-5, 32), LPC_order)
  6602  
  6603  	SKP_Silk_bwexpander_32(tls, bp+8 /* &a_Q16[0] */, LPC_order, SKP_FIX_CONST(tls, 0.99995, 16))
  6604  
  6605  	if useInterpolatedNLSFs == 1 {
  6606  
  6607  		/* Optimal solution for last 10 ms */
  6608  		SKP_Silk_burg_modified(tls, bp+72 /* &res_tmp_nrg */, bp+76 /* &res_tmp_nrg_Q */, bp+80 /* &a_tmp_Q16[0] */, (x + uintptr(((int32(4)>>1)*subfr_length))*2),
  6609  			subfr_length, (int32(4) >> 1), SKP_FIX_CONST(tls, 2.5e-5, 32), LPC_order)
  6610  
  6611  		SKP_Silk_bwexpander_32(tls, bp+80 /* &a_tmp_Q16[0] */, LPC_order, SKP_FIX_CONST(tls, 0.99995, 16))
  6612  
  6613  		/* subtract residual energy here, as that's easier than adding it to the    */
  6614  		/* residual energy of the first 10 ms in each iteration of the search below */
  6615  		shift = (*(*int32)(unsafe.Pointer(bp + 76 /* res_tmp_nrg_Q */)) - *(*int32)(unsafe.Pointer(bp + 4 /* res_nrg_Q */)))
  6616  		if shift >= 0 {
  6617  			if shift < 32 {
  6618  				*(*int32)(unsafe.Pointer(bp /* res_nrg */)) = (*(*int32)(unsafe.Pointer(bp /* res_nrg */)) - ((*(*int32)(unsafe.Pointer(bp + 72 /* res_tmp_nrg */))) >> (shift)))
  6619  			}
  6620  		} else {
  6621  
  6622  			*(*int32)(unsafe.Pointer(bp /* res_nrg */)) = (((*(*int32)(unsafe.Pointer(bp /* res_nrg */))) >> (-shift)) - *(*int32)(unsafe.Pointer(bp + 72 /* res_tmp_nrg */)))
  6623  			*(*int32)(unsafe.Pointer(bp + 4 /* res_nrg_Q */)) = *(*int32)(unsafe.Pointer(bp + 76 /* res_tmp_nrg_Q */))
  6624  		}
  6625  
  6626  		/* Convert to NLSFs */
  6627  		SKP_Silk_A2NLSF(tls, NLSF_Q15, bp+80 /* &a_tmp_Q16[0] */, LPC_order)
  6628  
  6629  		/* Search over interpolation indices to find the one with lowest residual energy */
  6630  		for k = 3; k >= 0; k-- {
  6631  			/* Interpolate NLSFs for first half */
  6632  			SKP_Silk_interpolate(tls, bp+144 /* &NLSF0_Q15[0] */, prev_NLSFq_Q15, NLSF_Q15, k, LPC_order)
  6633  
  6634  			/* Convert to LPC for residual energy evaluation */
  6635  			SKP_Silk_NLSF2A_stable(tls, bp+208 /* &a_tmp_Q12[0] */, bp+144 /* &NLSF0_Q15[0] */, LPC_order)
  6636  
  6637  			/* Calculate residual energy with NLSF interpolation */
  6638  			libc.Xmemset(tls, bp+240 /* &S[0] */, 0, (uint32(LPC_order) * uint32(unsafe.Sizeof(int16(0)))))
  6639  			SKP_Silk_LPC_analysis_filter(tls, x, bp+208 /* &a_tmp_Q12[0] */, bp+240 /* &S[0] */, bp+272 /* &LPC_res[0] */, (2 * subfr_length), LPC_order)
  6640  
  6641  			SKP_Silk_sum_sqr_shift(tls, bp+816 /* &res_nrg0 */, bp+820 /* &rshift0 */, (bp + 272 /* &LPC_res[0] */ + uintptr(LPC_order)*2), (subfr_length - LPC_order))
  6642  			SKP_Silk_sum_sqr_shift(tls, bp+824 /* &res_nrg1 */, bp+828 /* &rshift1 */, ((bp + 272 /* &LPC_res[0] */ + uintptr(LPC_order)*2) + uintptr(subfr_length)*2), (subfr_length - LPC_order))
  6643  
  6644  			/* Add subframe energies from first half frame */
  6645  			shift = (*(*int32)(unsafe.Pointer(bp + 820 /* rshift0 */)) - *(*int32)(unsafe.Pointer(bp + 828 /* rshift1 */)))
  6646  			if shift >= 0 {
  6647  				*(*int32)(unsafe.Pointer(bp + 824 /* res_nrg1 */)) = ((*(*int32)(unsafe.Pointer(bp + 824 /* res_nrg1 */))) >> (shift))
  6648  				res_nrg_interp_Q = -*(*int32)(unsafe.Pointer(bp + 820 /* rshift0 */))
  6649  			} else {
  6650  				*(*int32)(unsafe.Pointer(bp + 816 /* res_nrg0 */)) = ((*(*int32)(unsafe.Pointer(bp + 816 /* res_nrg0 */))) >> (-shift))
  6651  				res_nrg_interp_Q = -*(*int32)(unsafe.Pointer(bp + 828 /* rshift1 */))
  6652  			}
  6653  			res_nrg_interp = ((*(*int32)(unsafe.Pointer(bp + 816 /* res_nrg0 */))) + (*(*int32)(unsafe.Pointer(bp + 824 /* res_nrg1 */))))
  6654  
  6655  			/* Compare with first half energy without NLSF interpolation, or best interpolated value so far */
  6656  			shift = (res_nrg_interp_Q - *(*int32)(unsafe.Pointer(bp + 4 /* res_nrg_Q */)))
  6657  			if shift >= 0 {
  6658  				if ((res_nrg_interp) >> (shift)) < *(*int32)(unsafe.Pointer(bp /* res_nrg */)) {
  6659  					isInterpLower = 1
  6660  				} else {
  6661  					isInterpLower = 0
  6662  				}
  6663  			} else {
  6664  				if -shift < 32 {
  6665  					if res_nrg_interp < ((*(*int32)(unsafe.Pointer(bp /* res_nrg */))) >> (-shift)) {
  6666  						isInterpLower = 1
  6667  					} else {
  6668  						isInterpLower = 0
  6669  					}
  6670  				} else {
  6671  					isInterpLower = 0
  6672  				}
  6673  			}
  6674  
  6675  			/* Determine whether current interpolated NLSFs are best so far */
  6676  			if isInterpLower == 1 {
  6677  				/* Interpolation has lower residual energy */
  6678  				*(*int32)(unsafe.Pointer(bp /* res_nrg */)) = res_nrg_interp
  6679  				*(*int32)(unsafe.Pointer(bp + 4 /* res_nrg_Q */)) = res_nrg_interp_Q
  6680  				*(*int32)(unsafe.Pointer(interpIndex)) = k
  6681  			}
  6682  		}
  6683  	}
  6684  
  6685  	if *(*int32)(unsafe.Pointer(interpIndex)) == 4 {
  6686  		/* NLSF interpolation is currently inactive, calculate NLSFs from full frame AR coefficients */
  6687  		SKP_Silk_A2NLSF(tls, NLSF_Q15, bp+8 /* &a_Q16[0] */, LPC_order)
  6688  	}
  6689  }
  6690  
  6691  func SKP_Silk_find_LTP_FIX(tls *libc.TLS, b_Q14 uintptr, WLTP uintptr, LTPredCodGain_Q7 uintptr, r_first uintptr, r_last uintptr, lag uintptr, Wght_Q15 uintptr, subfr_length int32, mem_offset int32, corr_rshifts uintptr) { /* SKP_Silk_find_LTP_FIX.c:39:6: */
  6692  	bp := tls.Alloc(128)
  6693  	defer tls.Free(128)
  6694  
  6695  	var i int32
  6696  	var k int32
  6697  	var lshift int32
  6698  	var r_ptr uintptr
  6699  	var lag_ptr uintptr
  6700  	var b_Q14_ptr uintptr
  6701  	var regu int32
  6702  	var WLTP_ptr uintptr
  6703  	// var b_Q16 [5]int32 at bp+40, 20
  6704  
  6705  	// var delta_b_Q14 [5]int32 at bp+108, 20
  6706  
  6707  	// var d_Q14 [4]int32 at bp+92, 16
  6708  
  6709  	// var nrg [4]int32 at bp+60, 16
  6710  
  6711  	var g_Q26 int32
  6712  	// var w [4]int32 at bp+76, 16
  6713  
  6714  	var WLTP_max int32
  6715  	var max_abs_d_Q14 int32
  6716  	var max_w_bits int32
  6717  	var temp32 int32
  6718  	var denom32 int32
  6719  	var extra_shifts int32
  6720  	// var rr_shifts int32 at bp+16, 4
  6721  
  6722  	var maxRshifts int32
  6723  	var maxRshifts_wxtra int32
  6724  	var LZs int32
  6725  	var LPC_res_nrg int32
  6726  	var LPC_LTP_res_nrg int32
  6727  	var div_Q16 int32
  6728  	// var Rr [5]int32 at bp+20, 20
  6729  
  6730  	// var rr [4]int32 at bp, 16
  6731  
  6732  	var wd int32
  6733  	var m_Q12 int32
  6734  
  6735  	b_Q14_ptr = b_Q14
  6736  	WLTP_ptr = WLTP
  6737  	r_ptr = (r_first + uintptr(mem_offset)*2)
  6738  	for k = 0; k < 4; k++ {
  6739  		if k == (int32(4) >> 1) { /* shift residual for last 10 ms */
  6740  			r_ptr = (r_last + uintptr(mem_offset)*2)
  6741  		}
  6742  		lag_ptr = (r_ptr - uintptr((*(*int32)(unsafe.Pointer(lag + uintptr(k)*4))+(5/2)))*2)
  6743  
  6744  		SKP_Silk_sum_sqr_shift(tls, (bp /* &rr */ + uintptr(k)*4), bp+16 /* &rr_shifts */, r_ptr, subfr_length) /* rr[ k ] in Q( -rr_shifts ) */
  6745  
  6746  		/* Assure headroom */
  6747  		LZs = SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4)))
  6748  		if LZs < 2 {
  6749  			*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4)) = func() int32 {
  6750  				if (2 - LZs) == 1 {
  6751  					return (((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) & 1))
  6752  				}
  6753  				return ((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> ((2 - LZs) - 1)) + 1) >> 1)
  6754  			}()
  6755  			*(*int32)(unsafe.Pointer(bp + 16 /* rr_shifts */)) += (2 - LZs)
  6756  		}
  6757  		*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) = *(*int32)(unsafe.Pointer(bp + 16 /* rr_shifts */))
  6758  		SKP_Silk_corrMatrix_FIX(tls, lag_ptr, subfr_length, 5, 2, WLTP_ptr, (corr_rshifts + uintptr(k)*4)) /* WLTP_fix_ptr in Q( -corr_rshifts[ k ] ) */
  6759  
  6760  		/* The correlation vector always has lower max abs value than rr and/or RR so head room is assured */
  6761  		SKP_Silk_corrVector_FIX(tls, lag_ptr, r_ptr, subfr_length, 5, bp+20 /* &Rr[0] */, *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))) /* Rr_fix_ptr   in Q( -corr_rshifts[ k ] ) */
  6762  		if *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) > *(*int32)(unsafe.Pointer(bp + 16 /* rr_shifts */)) {
  6763  			*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(bp + 16 /* rr_shifts */)))) /* rr[ k ] in Q( -corr_rshifts[ k ] ) */
  6764  		}
  6765  
  6766  		regu = 1
  6767  		regu = ((regu) + ((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) + ((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) >> 16)))
  6768  		regu = ((regu) + ((((*(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((0)*(5))+(0)))*4)))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) + ((((*(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((0)*(5))+(0)))*4)))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) >> 16)))
  6769  		regu = ((regu) + ((((*(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((5-1)*(5))+(5-1)))*4)))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) + ((((*(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((5-1)*(5))+(5-1)))*4)))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, (float64(float32(0.01) / float32(3))), 16))))) >> 16)))
  6770  		SKP_Silk_regularize_correlations_FIX(tls, WLTP_ptr, (bp /* &rr */ + uintptr(k)*4), regu, 5)
  6771  
  6772  		SKP_Silk_solve_LDL_FIX(tls, WLTP_ptr, 5, bp+20 /* &Rr[0] */, bp+40 /* &b_Q16[0] */) /* WLTP_fix_ptr and Rr_fix_ptr both in Q(-corr_rshifts[k]) */
  6773  
  6774  		/* Limit and store in Q14 */
  6775  		SKP_Silk_fit_LTP(tls, bp+40 /* &b_Q16[0] */, b_Q14_ptr)
  6776  
  6777  		/* Calculate residual energy */
  6778  		*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4)) = SKP_Silk_residual_energy16_covar_FIX(tls, b_Q14_ptr, WLTP_ptr, bp+20 /* &Rr[0] */, *(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4)), 5, 14) /* nrg_fix in Q( -corr_rshifts[ k ] ) */
  6779  
  6780  		/* temp = Wght[ k ] / ( nrg[ k ] * Wght[ k ] + 0.01f * subfr_length ); */
  6781  		extra_shifts = SKP_min_int(tls, *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)), 2)
  6782  		denom32 = (((func() int32 {
  6783  			if (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts)) > (int32((0x7FFFFFFF)) >> (1 + extra_shifts)) {
  6784  				return func() int32 {
  6785  					if ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) > (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts)) {
  6786  						return (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts))
  6787  					}
  6788  					return func() int32 {
  6789  						if ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) < (int32((0x7FFFFFFF)) >> (1 + extra_shifts)) {
  6790  							return (int32((0x7FFFFFFF)) >> (1 + extra_shifts))
  6791  						}
  6792  						return ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16))
  6793  					}()
  6794  				}()
  6795  			}
  6796  			return func() int32 {
  6797  				if ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) > (int32((0x7FFFFFFF)) >> (1 + extra_shifts)) {
  6798  					return (int32((0x7FFFFFFF)) >> (1 + extra_shifts))
  6799  				}
  6800  				return func() int32 {
  6801  					if ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) < (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts)) {
  6802  						return (int32((libc.Int32FromUint32(0x80000000))) >> (1 + extra_shifts))
  6803  					}
  6804  					return ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16))
  6805  				}()
  6806  			}()
  6807  		}()) << (1 + extra_shifts)) + (((((subfr_length) >> 16) * (int32(int16(655)))) + ((((subfr_length) & 0x0000FFFF) * (int32(int16(655)))) >> 16)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - extra_shifts))) /* Q( -corr_rshifts[ k ] + extra_shifts ) */
  6808  		denom32 = func() int32 {
  6809  			if (denom32) > (1) {
  6810  				return denom32
  6811  			}
  6812  			return 1
  6813  		}()
  6814  		/* Wght always < 0.5 in Q0 */
  6815  		temp32 = (((*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4))) << (16)) / (denom32))                        /* Q( 15 + 16 + corr_rshifts[k] - extra_shifts ) */
  6816  		temp32 = ((temp32) >> (((31 + *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))) - extra_shifts) - 26)) /* Q26 */
  6817  
  6818  		/* Limit temp such that the below scaling never wraps around */
  6819  		WLTP_max = 0
  6820  		for i = 0; i < (5 * 5); i++ {
  6821  			WLTP_max = func() int32 {
  6822  				if (*(*int32)(unsafe.Pointer(WLTP_ptr + uintptr(i)*4))) > (WLTP_max) {
  6823  					return *(*int32)(unsafe.Pointer(WLTP_ptr + uintptr(i)*4))
  6824  				}
  6825  				return WLTP_max
  6826  			}()
  6827  		}
  6828  		lshift = ((SKP_Silk_CLZ32(tls, WLTP_max) - 1) - 3) /* keep 3 bits free for vq_nearest_neighbor_fix */
  6829  
  6830  		if ((26 - 18) + lshift) < 31 {
  6831  			temp32 = SKP_min_32(tls, temp32, (int32((1)) << ((26 - 18) + lshift)))
  6832  		}
  6833  
  6834  		SKP_Silk_scale_vector32_Q26_lshift_18(tls, WLTP_ptr, temp32, (5 * 5)) /* WLTP_ptr in Q( 18 - corr_rshifts[ k ] ) */
  6835  
  6836  		*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4)) = *(*int32)(unsafe.Pointer((WLTP_ptr + uintptr((((int32(5)>>1)*(5))+(int32(5)>>1)))*4))) /* w in Q( 18 - corr_rshifts[ k ] ) */
  6837  
  6838  		r_ptr += 2 * (uintptr(subfr_length))
  6839  		b_Q14_ptr += 2 * (uintptr(5))
  6840  		WLTP_ptr += 4 * (uintptr(5 * 5))
  6841  	}
  6842  
  6843  	maxRshifts = 0
  6844  	for k = 0; k < 4; k++ {
  6845  		maxRshifts = SKP_max_int(tls, *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)), maxRshifts)
  6846  	}
  6847  
  6848  	/* Compute LTP coding gain */
  6849  	if LTPredCodGain_Q7 != (uintptr(0)) {
  6850  		LPC_LTP_res_nrg = 0
  6851  		LPC_res_nrg = 0
  6852  		/* Check that no overflow will happen when adding */
  6853  		for k = 0; k < 4; k++ {
  6854  			LPC_res_nrg = ((LPC_res_nrg) + ((((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &rr[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) + (1)) >> (1 + (maxRshifts - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))))))                     /*  Q( -maxRshifts ) */
  6855  			LPC_LTP_res_nrg = ((LPC_LTP_res_nrg) + ((((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 60 /* &nrg[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(Wght_Q15 + uintptr(k)*4)))))) >> 16)) + (1)) >> (1 + (maxRshifts - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)))))) /*  Q( -maxRshifts ) */
  6856  		}
  6857  		LPC_LTP_res_nrg = func() int32 {
  6858  			if (LPC_LTP_res_nrg) > (1) {
  6859  				return LPC_LTP_res_nrg
  6860  			}
  6861  			return 1
  6862  		}() /* avoid division by zero */
  6863  
  6864  		div_Q16 = SKP_DIV32_varQ(tls, LPC_res_nrg, LPC_LTP_res_nrg, 16)
  6865  		*(*int32)(unsafe.Pointer(LTPredCodGain_Q7)) = ((int32(int16(3))) * (int32((int16(SKP_Silk_lin2log(tls, div_Q16) - (int32(16) << 7))))))
  6866  
  6867  	}
  6868  
  6869  	/* smoothing */
  6870  	/* d = sum( B, 1 ); */
  6871  	b_Q14_ptr = b_Q14
  6872  	for k = 0; k < 4; k++ {
  6873  		*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4)) = 0
  6874  		for i = 0; i < 5; i++ {
  6875  			*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14 */ + uintptr(k)*4)) += (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))))
  6876  		}
  6877  		b_Q14_ptr += 2 * (uintptr(5))
  6878  	}
  6879  
  6880  	/* m = ( w * d' ) / ( sum( w ) + 1e-3 ); */
  6881  
  6882  	/* Find maximum absolute value of d_Q14 and the bits used by w in Q0 */
  6883  	max_abs_d_Q14 = 0
  6884  	max_w_bits = 0
  6885  	for k = 0; k < 4; k++ {
  6886  		max_abs_d_Q14 = SKP_max_32(tls, max_abs_d_Q14, func() int32 {
  6887  			if (*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) > 0 {
  6888  				return *(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))
  6889  			}
  6890  			return -*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))
  6891  		}())
  6892  		/* w[ k ] is in Q( 18 - corr_rshifts[ k ] ) */
  6893  		/* Find bits needed in Q( 18 - maxRshifts ) */
  6894  		max_w_bits = SKP_max_32(tls, max_w_bits, (((32 - SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4)))) + *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))) - maxRshifts))
  6895  	}
  6896  
  6897  	/* max_abs_d_Q14 = (5 << 15); worst case, i.e. LTP_ORDER * -SKP_int16_MIN */
  6898  
  6899  	/* How many bits is needed for w*d' in Q( 18 - maxRshifts ) in the worst case, of all d_Q14's being equal to max_abs_d_Q14 */
  6900  	extra_shifts = (((max_w_bits + 32) - SKP_Silk_CLZ32(tls, max_abs_d_Q14)) - 14)
  6901  
  6902  	/* Subtract what we got available; bits in output var plus maxRshifts */
  6903  	extra_shifts = extra_shifts - (((32 - 1) - 2) + maxRshifts) /* Keep sign bit free as well as 2 bits for accumulation */
  6904  	extra_shifts = SKP_max_int(tls, extra_shifts, 0)
  6905  
  6906  	maxRshifts_wxtra = (maxRshifts + extra_shifts)
  6907  
  6908  	temp32 = ((int32((262)) >> (maxRshifts + extra_shifts)) + 1) /* 1e-3f in Q( 18 - (maxRshifts + extra_shifts) ) */
  6909  	wd = 0
  6910  	for k = 0; k < 4; k++ {
  6911  		/* w has at least 2 bits of headroom so no overflow should happen */
  6912  		temp32 = ((temp32) + ((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (maxRshifts_wxtra - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))))) /* Q( 18 - maxRshifts_wxtra ) */
  6913  		wd = ((wd) + (((((((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (maxRshifts_wxtra - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4)))))) + (((((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (maxRshifts_wxtra - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4)))))) >> 16)) + (((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (maxRshifts_wxtra - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)))) * (func() int32 {
  6914  			if (16) == 1 {
  6915  				return (((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) & 1))
  6916  			}
  6917  			return ((((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> ((16) - 1)) + 1) >> 1)
  6918  		}()))) << (2))) /* Q( 18 - maxRshifts_wxtra ) */
  6919  	}
  6920  	m_Q12 = SKP_DIV32_varQ(tls, wd, temp32, 12)
  6921  
  6922  	b_Q14_ptr = b_Q14
  6923  	for k = 0; k < 4; k++ {
  6924  		/* w_fix[ k ] from Q( 18 - corr_rshifts[ k ] ) to Q( 16 ) */
  6925  		if (2 - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))) > 0 {
  6926  			temp32 = ((*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) >> (2 - *(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4))))
  6927  		} else {
  6928  			temp32 = ((func() int32 {
  6929  				if (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) > (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6930  					return func() int32 {
  6931  						if (*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) > (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6932  							return (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6933  						}
  6934  						return func() int32 {
  6935  							if (*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) < (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6936  								return (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6937  							}
  6938  							return *(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))
  6939  						}()
  6940  					}()
  6941  				}
  6942  				return func() int32 {
  6943  					if (*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) > (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6944  						return (int32((0x7FFFFFFF)) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6945  					}
  6946  					return func() int32 {
  6947  						if (*(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))) < (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2)) {
  6948  							return (int32((libc.Int32FromUint32(0x80000000))) >> (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6949  						}
  6950  						return *(*int32)(unsafe.Pointer(bp + 76 /* &w[0] */ + uintptr(k)*4))
  6951  					}()
  6952  				}()
  6953  			}()) << (*(*int32)(unsafe.Pointer(corr_rshifts + uintptr(k)*4)) - 2))
  6954  		}
  6955  
  6956  		g_Q26 = (((SKP_FIX_CONST(tls, 0.1, 26)) / (((SKP_FIX_CONST(tls, 0.1, 26)) >> (10)) + temp32)) * ((func() int32 {
  6957  			if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  6958  				return func() int32 {
  6959  					if (func() int32 {
  6960  						if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  6961  							return func() int32 {
  6962  								if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  6963  									return libc.Int32FromUint32(0x80000000)
  6964  								}
  6965  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6966  							}()
  6967  						}
  6968  						return func() int32 {
  6969  							if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  6970  								return 0x7FFFFFFF
  6971  							}
  6972  							return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6973  						}()
  6974  					}()) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  6975  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  6976  					}
  6977  					return func() int32 {
  6978  						if (func() int32 {
  6979  							if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  6980  								return func() int32 {
  6981  									if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  6982  										return libc.Int32FromUint32(0x80000000)
  6983  									}
  6984  									return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6985  								}()
  6986  							}
  6987  							return func() int32 {
  6988  								if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  6989  									return 0x7FFFFFFF
  6990  								}
  6991  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  6992  							}()
  6993  						}()) < (int32((0x7FFFFFFF)) >> (4)) {
  6994  							return (int32((0x7FFFFFFF)) >> (4))
  6995  						}
  6996  						return func() int32 {
  6997  							if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  6998  								return func() int32 {
  6999  									if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  7000  										return libc.Int32FromUint32(0x80000000)
  7001  									}
  7002  									return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7003  								}()
  7004  							}
  7005  							return func() int32 {
  7006  								if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  7007  									return 0x7FFFFFFF
  7008  								}
  7009  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7010  							}()
  7011  						}()
  7012  					}()
  7013  				}()
  7014  			}
  7015  			return func() int32 {
  7016  				if (func() int32 {
  7017  					if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  7018  						return func() int32 {
  7019  							if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  7020  								return libc.Int32FromUint32(0x80000000)
  7021  							}
  7022  							return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7023  						}()
  7024  					}
  7025  					return func() int32 {
  7026  						if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  7027  							return 0x7FFFFFFF
  7028  						}
  7029  						return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7030  					}()
  7031  				}()) > (int32((0x7FFFFFFF)) >> (4)) {
  7032  					return (int32((0x7FFFFFFF)) >> (4))
  7033  				}
  7034  				return func() int32 {
  7035  					if (func() int32 {
  7036  						if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  7037  							return func() int32 {
  7038  								if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  7039  									return libc.Int32FromUint32(0x80000000)
  7040  								}
  7041  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7042  							}()
  7043  						}
  7044  						return func() int32 {
  7045  							if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  7046  								return 0x7FFFFFFF
  7047  							}
  7048  							return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7049  						}()
  7050  					}()) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7051  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7052  					}
  7053  					return func() int32 {
  7054  						if ((uint32((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) == uint32(0) {
  7055  							return func() int32 {
  7056  								if (((uint32(m_Q12)) & ((uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2))) ^ 0x80000000)) & 0x80000000) != 0 {
  7057  									return libc.Int32FromUint32(0x80000000)
  7058  								}
  7059  								return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7060  							}()
  7061  						}
  7062  						return func() int32 {
  7063  							if ((((uint32(m_Q12)) ^ 0x80000000) & (uint32((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))) & 0x80000000) != 0 {
  7064  								return 0x7FFFFFFF
  7065  							}
  7066  							return ((m_Q12) - ((*(*int32)(unsafe.Pointer(bp + 92 /* &d_Q14[0] */ + uintptr(k)*4))) >> (2)))
  7067  						}()
  7068  					}()
  7069  				}()
  7070  			}()
  7071  		}()) << (4))) /* Q16 */
  7072  
  7073  		temp32 = 0
  7074  		for i = 0; i < 5; i++ {
  7075  			*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)) = int32(SKP_max_16(tls, *(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2)), int16(1638))) /* 1638_Q14 = 0.1_Q0 */
  7076  			temp32 = temp32 + (*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))                                                                        /* Q14 */
  7077  		}
  7078  		temp32 = ((g_Q26) / (temp32)) /* Q14->Q12 */
  7079  		for i = 0; i < 5; i++ {
  7080  			*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2)) = func() int16 {
  7081  				if (-16000) > (28000) {
  7082  					return func() int16 {
  7083  						if (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7084  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7085  								return func() int32 {
  7086  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7087  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7088  									}
  7089  									return func() int32 {
  7090  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7091  											return (int32((0x7FFFFFFF)) >> (4))
  7092  										}
  7093  										return temp32
  7094  									}()
  7095  								}()
  7096  							}
  7097  							return func() int32 {
  7098  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7099  									return (int32((0x7FFFFFFF)) >> (4))
  7100  								}
  7101  								return func() int32 {
  7102  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7103  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7104  									}
  7105  									return temp32
  7106  								}()
  7107  							}()
  7108  						}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7109  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7110  								return func() int32 {
  7111  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7112  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7113  									}
  7114  									return func() int32 {
  7115  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7116  											return (int32((0x7FFFFFFF)) >> (4))
  7117  										}
  7118  										return temp32
  7119  									}()
  7120  								}()
  7121  							}
  7122  							return func() int32 {
  7123  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7124  									return (int32((0x7FFFFFFF)) >> (4))
  7125  								}
  7126  								return func() int32 {
  7127  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7128  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7129  									}
  7130  									return temp32
  7131  								}()
  7132  							}()
  7133  						}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))) > (-16000) {
  7134  							return int16(-16000)
  7135  						}
  7136  						return func() int16 {
  7137  							if (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7138  								if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7139  									return func() int32 {
  7140  										if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7141  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7142  										}
  7143  										return func() int32 {
  7144  											if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7145  												return (int32((0x7FFFFFFF)) >> (4))
  7146  											}
  7147  											return temp32
  7148  										}()
  7149  									}()
  7150  								}
  7151  								return func() int32 {
  7152  									if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7153  										return (int32((0x7FFFFFFF)) >> (4))
  7154  									}
  7155  									return func() int32 {
  7156  										if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7157  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7158  										}
  7159  										return temp32
  7160  									}()
  7161  								}()
  7162  							}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7163  								if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7164  									return func() int32 {
  7165  										if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7166  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7167  										}
  7168  										return func() int32 {
  7169  											if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7170  												return (int32((0x7FFFFFFF)) >> (4))
  7171  											}
  7172  											return temp32
  7173  										}()
  7174  									}()
  7175  								}
  7176  								return func() int32 {
  7177  									if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7178  										return (int32((0x7FFFFFFF)) >> (4))
  7179  									}
  7180  									return func() int32 {
  7181  										if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7182  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7183  										}
  7184  										return temp32
  7185  									}()
  7186  								}()
  7187  							}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))) < (28000) {
  7188  								return int16(28000)
  7189  							}
  7190  							return (int16(int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7191  								if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7192  									return func() int32 {
  7193  										if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7194  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7195  										}
  7196  										return func() int32 {
  7197  											if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7198  												return (int32((0x7FFFFFFF)) >> (4))
  7199  											}
  7200  											return temp32
  7201  										}()
  7202  									}()
  7203  								}
  7204  								return func() int32 {
  7205  									if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7206  										return (int32((0x7FFFFFFF)) >> (4))
  7207  									}
  7208  									return func() int32 {
  7209  										if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7210  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7211  										}
  7212  										return temp32
  7213  									}()
  7214  								}()
  7215  							}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7216  								if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7217  									return func() int32 {
  7218  										if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7219  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7220  										}
  7221  										return func() int32 {
  7222  											if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7223  												return (int32((0x7FFFFFFF)) >> (4))
  7224  											}
  7225  											return temp32
  7226  										}()
  7227  									}()
  7228  								}
  7229  								return func() int32 {
  7230  									if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7231  										return (int32((0x7FFFFFFF)) >> (4))
  7232  									}
  7233  									return func() int32 {
  7234  										if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7235  											return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7236  										}
  7237  										return temp32
  7238  									}()
  7239  								}()
  7240  							}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))))
  7241  						}()
  7242  					}()
  7243  				}
  7244  				return func() int16 {
  7245  					if (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7246  						if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7247  							return func() int32 {
  7248  								if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7249  									return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7250  								}
  7251  								return func() int32 {
  7252  									if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7253  										return (int32((0x7FFFFFFF)) >> (4))
  7254  									}
  7255  									return temp32
  7256  								}()
  7257  							}()
  7258  						}
  7259  						return func() int32 {
  7260  							if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7261  								return (int32((0x7FFFFFFF)) >> (4))
  7262  							}
  7263  							return func() int32 {
  7264  								if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7265  									return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7266  								}
  7267  								return temp32
  7268  							}()
  7269  						}()
  7270  					}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7271  						if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7272  							return func() int32 {
  7273  								if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7274  									return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7275  								}
  7276  								return func() int32 {
  7277  									if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7278  										return (int32((0x7FFFFFFF)) >> (4))
  7279  									}
  7280  									return temp32
  7281  								}()
  7282  							}()
  7283  						}
  7284  						return func() int32 {
  7285  							if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7286  								return (int32((0x7FFFFFFF)) >> (4))
  7287  							}
  7288  							return func() int32 {
  7289  								if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7290  									return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7291  								}
  7292  								return temp32
  7293  							}()
  7294  						}()
  7295  					}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))) > (28000) {
  7296  						return int16(28000)
  7297  					}
  7298  					return func() int16 {
  7299  						if (int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7300  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7301  								return func() int32 {
  7302  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7303  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7304  									}
  7305  									return func() int32 {
  7306  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7307  											return (int32((0x7FFFFFFF)) >> (4))
  7308  										}
  7309  										return temp32
  7310  									}()
  7311  								}()
  7312  							}
  7313  							return func() int32 {
  7314  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7315  									return (int32((0x7FFFFFFF)) >> (4))
  7316  								}
  7317  								return func() int32 {
  7318  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7319  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7320  									}
  7321  									return temp32
  7322  								}()
  7323  							}()
  7324  						}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7325  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7326  								return func() int32 {
  7327  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7328  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7329  									}
  7330  									return func() int32 {
  7331  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7332  											return (int32((0x7FFFFFFF)) >> (4))
  7333  										}
  7334  										return temp32
  7335  									}()
  7336  								}()
  7337  							}
  7338  							return func() int32 {
  7339  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7340  									return (int32((0x7FFFFFFF)) >> (4))
  7341  								}
  7342  								return func() int32 {
  7343  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7344  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7345  									}
  7346  									return temp32
  7347  								}()
  7348  							}()
  7349  						}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))) < (-16000) {
  7350  							return int16(-16000)
  7351  						}
  7352  						return (int16(int32(*(*int16)(unsafe.Pointer(b_Q14_ptr + uintptr(i)*2))) + (((((func() int32 {
  7353  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7354  								return func() int32 {
  7355  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7356  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7357  									}
  7358  									return func() int32 {
  7359  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7360  											return (int32((0x7FFFFFFF)) >> (4))
  7361  										}
  7362  										return temp32
  7363  									}()
  7364  								}()
  7365  							}
  7366  							return func() int32 {
  7367  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7368  									return (int32((0x7FFFFFFF)) >> (4))
  7369  								}
  7370  								return func() int32 {
  7371  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7372  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7373  									}
  7374  									return temp32
  7375  								}()
  7376  							}()
  7377  						}()) << (4)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) + (((((func() int32 {
  7378  							if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  7379  								return func() int32 {
  7380  									if (temp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7381  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7382  									}
  7383  									return func() int32 {
  7384  										if (temp32) < (int32((0x7FFFFFFF)) >> (4)) {
  7385  											return (int32((0x7FFFFFFF)) >> (4))
  7386  										}
  7387  										return temp32
  7388  									}()
  7389  								}()
  7390  							}
  7391  							return func() int32 {
  7392  								if (temp32) > (int32((0x7FFFFFFF)) >> (4)) {
  7393  									return (int32((0x7FFFFFFF)) >> (4))
  7394  								}
  7395  								return func() int32 {
  7396  									if (temp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  7397  										return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  7398  									}
  7399  									return temp32
  7400  								}()
  7401  							}()
  7402  						}()) << (4)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp + 108 /* &delta_b_Q14[0] */ + uintptr(i)*4)))))) >> 16))))
  7403  					}()
  7404  				}()
  7405  			}()
  7406  		}
  7407  		b_Q14_ptr += 2 * (uintptr(5))
  7408  	}
  7409  }
  7410  
  7411  func SKP_Silk_fit_LTP(tls *libc.TLS, LTP_coefs_Q16 uintptr, LTP_coefs_Q14 uintptr) { /* SKP_Silk_find_LTP_FIX.c:233:6: */
  7412  	var i int32
  7413  
  7414  	for i = 0; i < 5; i++ {
  7415  		*(*int16)(unsafe.Pointer(LTP_coefs_Q14 + uintptr(i)*2)) = func() int16 {
  7416  			if (func() int32 {
  7417  				if (2) == 1 {
  7418  					return (((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) & 1))
  7419  				}
  7420  				return ((((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> ((2) - 1)) + 1) >> 1)
  7421  			}()) > 0x7FFF {
  7422  				return int16(0x7FFF)
  7423  			}
  7424  			return func() int16 {
  7425  				if (func() int32 {
  7426  					if (2) == 1 {
  7427  						return (((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) & 1))
  7428  					}
  7429  					return ((((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> ((2) - 1)) + 1) >> 1)
  7430  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
  7431  					return libc.Int16FromInt32(0x8000)
  7432  				}
  7433  				return func() int16 {
  7434  					if (2) == 1 {
  7435  						return (int16(((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) & 1)))
  7436  					}
  7437  					return (int16((((*(*int32)(unsafe.Pointer(LTP_coefs_Q16 + uintptr(i)*4))) >> ((2) - 1)) + 1) >> 1))
  7438  				}()
  7439  			}()
  7440  		}()
  7441  	}
  7442  }
  7443  
  7444  /***********************************************************************
  7445  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  7446  Redistribution and use in source and binary forms, with or without
  7447  modification, (subject to the limitations in the disclaimer below)
  7448  are permitted provided that the following conditions are met:
  7449  - Redistributions of source code must retain the above copyright notice,
  7450  this list of conditions and the following disclaimer.
  7451  - Redistributions in binary form must reproduce the above copyright
  7452  notice, this list of conditions and the following disclaimer in the
  7453  documentation and/or other materials provided with the distribution.
  7454  - Neither the name of Skype Limited, nor the names of specific
  7455  contributors, may be used to endorse or promote products derived from
  7456  this software without specific prior written permission.
  7457  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  7458  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  7459  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  7460  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  7461  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  7462  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  7463  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  7464  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  7465  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  7466  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  7467  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  7468  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  7469  ***********************************************************************/
  7470  
  7471  /*******************/
  7472  /* Pitch estimator */
  7473  /*******************/
  7474  
  7475  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  7476  
  7477  /* Bandwidth expansion for whitening filter in pitch analysis */
  7478  
  7479  /* Threshold used by pitch estimator for early escape */
  7480  
  7481  /*********************/
  7482  /* Linear prediction */
  7483  /*********************/
  7484  
  7485  /* LPC analysis defines: regularization and bandwidth expansion */
  7486  
  7487  /* LTP analysis defines */
  7488  
  7489  /* LTP quantization settings */
  7490  
  7491  /***********************/
  7492  /* High pass filtering */
  7493  /***********************/
  7494  
  7495  /* Smoothing parameters for low end of pitch frequency range estimation */
  7496  
  7497  /* Min and max values for low end of pitch frequency range estimation */
  7498  
  7499  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  7500  
  7501  /***********/
  7502  /* Various */
  7503  /***********/
  7504  
  7505  /* Required speech activity for counting frame as active */
  7506  
  7507  /* Speech Activity LBRR enable threshold (needs tuning) */
  7508  
  7509  /*************************/
  7510  /* Perceptual parameters */
  7511  /*************************/
  7512  
  7513  /* reduction in coding SNR during low speech activity */
  7514  
  7515  /* factor for reducing quantization noise during voiced speech */
  7516  
  7517  /* factor for reducing quantization noise for unvoiced sparse signals */
  7518  
  7519  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  7520  
  7521  /* warping control */
  7522  
  7523  /* fraction added to first autocorrelation value */
  7524  
  7525  /* noise shaping filter chirp factor */
  7526  
  7527  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  7528  
  7529  /* gain reduction for fricatives */
  7530  
  7531  /* extra harmonic boosting (signal shaping) at low bitrates */
  7532  
  7533  /* extra harmonic boosting (signal shaping) for noisy input signals */
  7534  
  7535  /* harmonic noise shaping */
  7536  
  7537  /* extra harmonic noise shaping for high bitrates or noisy input */
  7538  
  7539  /* parameter for shaping noise towards higher frequencies */
  7540  
  7541  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  7542  
  7543  /* parameter for applying a high-pass tilt to the input signal */
  7544  
  7545  /* parameter for extra high-pass tilt to the input signal at high rates */
  7546  
  7547  /* parameter for reducing noise at the very low frequencies */
  7548  
  7549  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  7550  
  7551  /* noise floor to put a lower limit on the quantization step size */
  7552  
  7553  /* noise floor relative to active speech gain level */
  7554  
  7555  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  7556  
  7557  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  7558  
  7559  /* parameters defining the R/D tradeoff in the residual quantizer */
  7560  
  7561  /* Find pitch lags */
  7562  func SKP_Silk_find_pitch_lags_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, res uintptr, x uintptr) { /* SKP_Silk_find_pitch_lags_FIX.c:32:6: */
  7563  	bp := tls.Alloc(1416)
  7564  	defer tls.Free(1416)
  7565  
  7566  	var psPredSt uintptr = (psEnc + 20672 /* &.sPred */)
  7567  	var buf_len int32
  7568  	var i int32
  7569  	// var scale int32 at bp+1220, 4
  7570  
  7571  	var thrhld_Q15 int32
  7572  	var res_nrg int32
  7573  	var x_buf uintptr
  7574  	var x_buf_ptr uintptr
  7575  	// var Wsig [576]int16 at bp, 1152
  7576  
  7577  	var Wsig_ptr uintptr
  7578  	// var auto_corr [17]int32 at bp+1152, 68
  7579  
  7580  	// var rc_Q15 [16]int16 at bp+1224, 32
  7581  
  7582  	// var A_Q24 [16]int32 at bp+1256, 64
  7583  
  7584  	// var FiltState [16]int32 at bp+1352, 64
  7585  
  7586  	// var A_Q12 [16]int16 at bp+1320, 32
  7587  
  7588  	/******************************************/
  7589  	/* Setup buffer lengths etc based on Fs   */
  7590  	/******************************************/
  7591  	buf_len = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length) << (1)))
  7592  
  7593  	/* Safty check */
  7594  
  7595  	x_buf = (x - uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2)
  7596  
  7597  	/*************************************/
  7598  	/* Estimate LPC AR coefficients      */
  7599  	/*************************************/
  7600  
  7601  	/* Calculate windowed signal */
  7602  
  7603  	/* First LA_LTP samples */
  7604  	x_buf_ptr = ((x_buf + uintptr(buf_len)*2) - uintptr((*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length)*2)
  7605  	Wsig_ptr = bp /* &Wsig[0] */
  7606  	SKP_Silk_apply_sine_window(tls, Wsig_ptr, x_buf_ptr, 1, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch)
  7607  
  7608  	/* Middle un - windowed samples */
  7609  	Wsig_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch))
  7610  	x_buf_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch))
  7611  	libc.Xmemcpy(tls, Wsig_ptr, x_buf_ptr, ((uint32((*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch) << (1)))) * uint32(unsafe.Sizeof(int16(0)))))
  7612  
  7613  	/* Last LA_LTP samples */
  7614  	Wsig_ptr += 2 * (uintptr((*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch) << (1))))
  7615  	x_buf_ptr += 2 * (uintptr((*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch) << (1))))
  7616  	SKP_Silk_apply_sine_window(tls, Wsig_ptr, x_buf_ptr, 2, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_pitch)
  7617  
  7618  	/* Calculate autocorrelation sequence */
  7619  	SKP_Silk_autocorr(tls, bp+1152 /* &auto_corr[0] */, bp+1220 /* &scale */, bp /* &Wsig[0] */, (*SKP_Silk_predict_state_FIX)(unsafe.Pointer(psPredSt)).Fpitch_LPC_win_length, ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder + 1))
  7620  
  7621  	/* Add white noise, as fraction of energy */
  7622  	*(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */)) = ((*(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */))) + ((((*(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 16))))) + ((((*(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 16))))) >> 16)))
  7623  
  7624  	/* Calculate the reflection coefficients using schur */
  7625  	res_nrg = SKP_Silk_schur(tls, bp+1224 /* &rc_Q15[0] */, bp+1152 /* &auto_corr[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder)
  7626  
  7627  	/* Prediction gain */
  7628  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FpredGain_Q16 = SKP_DIV32_varQ(tls, *(*int32)(unsafe.Pointer(bp + 1152 /* &auto_corr[0] */)), SKP_max_int(tls, res_nrg, 1), 16)
  7629  
  7630  	/* Convert reflection coefficients to prediction coefficients */
  7631  	SKP_Silk_k2a(tls, bp+1256 /* &A_Q24[0] */, bp+1224 /* &rc_Q15[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder)
  7632  
  7633  	/* Convert From 32 bit Q24 to 16 bit Q12 coefs */
  7634  	for i = 0; i < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder; i++ {
  7635  		*(*int16)(unsafe.Pointer(bp + 1320 /* &A_Q12[0] */ + uintptr(i)*2)) = func() int16 {
  7636  			if ((*(*int32)(unsafe.Pointer(bp + 1256 /* &A_Q24[0] */ + uintptr(i)*4))) >> (12)) > 0x7FFF {
  7637  				return int16(0x7FFF)
  7638  			}
  7639  			return func() int16 {
  7640  				if ((*(*int32)(unsafe.Pointer(bp + 1256 /* &A_Q24[0] */ + uintptr(i)*4))) >> (12)) < (int32(libc.Int16FromInt32(0x8000))) {
  7641  					return libc.Int16FromInt32(0x8000)
  7642  				}
  7643  				return (int16((*(*int32)(unsafe.Pointer(bp + 1256 /* &A_Q24[0] */ + uintptr(i)*4))) >> (12)))
  7644  			}()
  7645  		}()
  7646  	}
  7647  
  7648  	/* Do BWE */
  7649  	SKP_Silk_bwexpander(tls, bp+1320 /* &A_Q12[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder, SKP_FIX_CONST(tls, 0.99, 16))
  7650  
  7651  	/*****************************************/
  7652  	/* LPC analysis filtering                */
  7653  	/*****************************************/
  7654  	libc.Xmemset(tls, bp+1352 /* &FiltState[0] */, 0, (uint32((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder) * uint32(unsafe.Sizeof(int32(0))))) /* Not really necessary, but Valgrind will complain otherwise */
  7655  	SKP_Silk_MA_Prediction(tls, x_buf, bp+1320 /* &A_Q12[0] */, bp+1352 /* &FiltState[0] */, res, buf_len, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder)
  7656  	libc.Xmemset(tls, res, 0, (uint32((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder) * uint32(unsafe.Sizeof(int16(0)))))
  7657  
  7658  	/* Threshold for pitch estimator */
  7659  	thrhld_Q15 = SKP_FIX_CONST(tls, 0.45, 15)
  7660  	thrhld_Q15 = ((thrhld_Q15) + ((int32(int16(SKP_FIX_CONST(tls, -0.004, 15)))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationLPCOrder)))))
  7661  	thrhld_Q15 = ((thrhld_Q15) + ((int32(int16(SKP_FIX_CONST(tls, -0.1, 7)))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))))
  7662  	thrhld_Q15 = ((thrhld_Q15) + ((int32(int16(SKP_FIX_CONST(tls, 0.15, 15)))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_sigtype)))))
  7663  	thrhld_Q15 = ((thrhld_Q15) + ((((SKP_FIX_CONST(tls, -0.1, 16)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15)))) + ((((SKP_FIX_CONST(tls, -0.1, 16)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15)))) >> 16)))
  7664  	thrhld_Q15 = func() int32 {
  7665  		if (thrhld_Q15) > 0x7FFF {
  7666  			return 0x7FFF
  7667  		}
  7668  		return func() int32 {
  7669  			if (thrhld_Q15) < (int32(libc.Int16FromInt32(0x8000))) {
  7670  				return int32(libc.Int16FromInt32(0x8000))
  7671  			}
  7672  			return thrhld_Q15
  7673  		}()
  7674  	}()
  7675  
  7676  	/*****************************************/
  7677  	/* Call pitch estimator                  */
  7678  	/*****************************************/
  7679  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype = SKP_Silk_pitch_analysis_core(tls, res, psEncCtrl /* &.sCmn */ +108 /* &.pitchL */, (psEncCtrl /* &.sCmn */ /* &.lagIndex */),
  7680  		(psEncCtrl /* &.sCmn */ + 4 /* &.contourIndex */), (psEnc + 22908 /* &.LTPCorr_Q15 */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FprevLag, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationThreshold_Q16,
  7681  		int32(int16(thrhld_Q15)), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpitchEstimationComplexity, 0)
  7682  }
  7683  
  7684  func SKP_Silk_find_pred_coefs_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, res_pitch uintptr) { /* SKP_Silk_find_pred_coefs_FIX.c:31:6: */
  7685  	bp := tls.Alloc(1616)
  7686  	defer tls.Free(1616)
  7687  
  7688  	var i int32
  7689  	// var WLTP [100]int32 at bp+48, 400
  7690  
  7691  	// var invGains_Q16 [4]int32 at bp, 16
  7692  
  7693  	// var local_gains [4]int32 at bp+32, 16
  7694  
  7695  	// var Wght_Q15 [4]int32 at bp+16, 16
  7696  
  7697  	// var NLSF_Q15 [16]int32 at bp+1552, 64
  7698  
  7699  	var x_ptr uintptr
  7700  	var x_pre_ptr uintptr
  7701  	// var LPC_in_pre [544]int16 at bp+464, 1088
  7702  
  7703  	var tmp int32
  7704  	var min_gain_Q16 int32
  7705  	// var LTP_corrs_rshift [4]int32 at bp+448, 16
  7706  
  7707  	/* weighting for weighted least squares */
  7708  	min_gain_Q16 = (int32(0x7FFFFFFF) >> 6)
  7709  	for i = 0; i < 4; i++ {
  7710  		min_gain_Q16 = func() int32 {
  7711  			if (min_gain_Q16) < (*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(i)*4))) {
  7712  				return min_gain_Q16
  7713  			}
  7714  			return *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(i)*4))
  7715  		}()
  7716  	}
  7717  	for i = 0; i < 4; i++ {
  7718  		/* Divide to Q16 */
  7719  
  7720  		/* Invert and normalize gains, and ensure that maximum invGains_Q16 is within range of a 16 bit int */
  7721  		*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)) = SKP_DIV32_varQ(tls, min_gain_Q16, *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(i)*4)), (16 - 2))
  7722  
  7723  		/* Ensure Wght_Q15 a minimum value 1 */
  7724  		*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)) = func() int32 {
  7725  			if (*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))) > (363) {
  7726  				return *(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))
  7727  			}
  7728  			return 363
  7729  		}()
  7730  
  7731  		/* Square the inverted gains */
  7732  
  7733  		tmp = ((((*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)))))) >> 16))
  7734  		*(*int32)(unsafe.Pointer(bp + 16 /* &Wght_Q15[0] */ + uintptr(i)*4)) = ((tmp) >> (1))
  7735  
  7736  		/* Invert the inverted and normalized gains */
  7737  		*(*int32)(unsafe.Pointer(bp + 32 /* &local_gains[0] */ + uintptr(i)*4)) = ((int32(1) << 16) / (*(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4))))
  7738  	}
  7739  
  7740  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
  7741  		/**********/
  7742  		/* VOICED */
  7743  		/**********/
  7744  
  7745  		/* LTP analysis */
  7746  		SKP_Silk_find_LTP_FIX(tls, psEncCtrl+208 /* &.LTPCoef_Q14 */, bp+48 /* &WLTP[0] */, (psEncCtrl + 616 /* &.LTPredCodGain_Q7 */), res_pitch,
  7747  			(res_pitch + uintptr((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)>>(1)))*2), psEncCtrl /* &.sCmn */ +108 /* &.pitchL */, bp+16, /* &Wght_Q15[0] */
  7748  			(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length, bp+448 /* &LTP_corrs_rshift[0] */)
  7749  
  7750  		/* Quantize LTP gain parameters */
  7751  		SKP_Silk_quant_LTP_gains_FIX(tls, psEncCtrl+208 /* &.LTPCoef_Q14 */, psEncCtrl /* &.sCmn */ +12 /* &.LTPIndex */, (psEncCtrl /* &.sCmn */ + 8 /* &.PERIndex */),
  7752  			bp+48 /* &WLTP[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fmu_LTP_Q8, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FLTPQuantLowComplexity)
  7753  
  7754  		/* Control LTP scaling */
  7755  		SKP_Silk_LTP_scale_ctrl_FIX(tls, psEnc, psEncCtrl)
  7756  
  7757  		/* Create LTP residual */
  7758  		SKP_Silk_LTP_analysis_filter_FIX(tls, bp+464 /* &LPC_in_pre[0] */, (((psEnc + 20748 /* &.x_buf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2) - uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)*2),
  7759  			psEncCtrl+208 /* &.LTPCoef_Q14 */, psEncCtrl /* &.sCmn */ +108 /* &.pitchL */, bp /* &invGains_Q16[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
  7760  
  7761  	} else {
  7762  		/************/
  7763  		/* UNVOICED */
  7764  		/************/
  7765  		/* Create signal with prepended subframes, scaled by inverse gains */
  7766  		x_ptr = (((psEnc + 20748 /* &.x_buf */) + uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)*2) - uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)*2)
  7767  		x_pre_ptr = bp + 464 /* &LPC_in_pre[0] */
  7768  		for i = 0; i < 4; i++ {
  7769  			SKP_Silk_scale_copy_vector16(tls, x_pre_ptr, x_ptr, *(*int32)(unsafe.Pointer(bp /* &invGains_Q16[0] */ + uintptr(i)*4)),
  7770  				((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length + (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder))
  7771  			x_pre_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length + (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder))
  7772  			x_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
  7773  		}
  7774  
  7775  		libc.Xmemset(tls, psEncCtrl+208 /* &.LTPCoef_Q14 */, 0, ((uint32(4 * 5)) * uint32(unsafe.Sizeof(int16(0)))))
  7776  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 = 0
  7777  	}
  7778  
  7779  	/* LPC_in_pre contains the LTP-filtered input for voiced, and the unfiltered input for unvoiced */
  7780  
  7781  	SKP_Silk_find_LPC_FIX(tls, bp+1552 /* &NLSF_Q15[0] */, (psEncCtrl /* &.sCmn */ + 68 /* &.NLSFInterpCoef_Q2 */), psEnc+20672 /* &.sPred */ +12, /* &.prev_NLSFq_Q15 */
  7782  		((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInterpolatedNLSFs * (1 - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset)), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder,
  7783  		bp+464 /* &LPC_in_pre[0] */, ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length + (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder))
  7784  
  7785  	/* Quantize LSFs */
  7786  
  7787  	SKP_Silk_process_NLSFs_FIX(tls, psEnc, psEncCtrl, bp+1552 /* &NLSF_Q15[0] */)
  7788  
  7789  	/* Calculate residual energy using quantized LPC coefficients */
  7790  	SKP_Silk_residual_energy_FIX(tls, psEncCtrl+640 /* &.ResNrg */, psEncCtrl+656 /* &.ResNrgQ */, bp+464 /* &LPC_in_pre[0] */, psEncCtrl+144 /* &.PredCoef_Q12 */, bp+32, /* &local_gains[0] */
  7791  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
  7792  
  7793  	/* Copy to prediction struct for use in next frame for fluctuation reduction */
  7794  	libc.Xmemcpy(tls, psEnc+20672 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, bp+1552 /* &NLSF_Q15[0] */, (uint32((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder) * uint32(unsafe.Sizeof(int32(0)))))
  7795  
  7796  }
  7797  
  7798  /* Gain scalar quantization with hysteresis, uniform on log scale */
  7799  func SKP_Silk_gains_quant(tls *libc.TLS, ind uintptr, gain_Q16 uintptr, prev_ind uintptr, conditional int32) { /* SKP_Silk_gain_quant.c:35:6: */
  7800  	var k int32
  7801  
  7802  	for k = 0; k < 4; k++ {
  7803  		/* Add half of previous quantization error, convert to log scale, scale, floor() */
  7804  		*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) = (((int32(((65536 * (64 - 1)) / (((86 - 6) * 128) / 6))) >> 16) * (int32((int16(SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer(gain_Q16 + uintptr(k)*4))) - (((6 * 128) / 6) + (16 * 128))))))) + (((((65536 * (64 - 1)) / (((86 - 6) * 128) / 6)) & 0x0000FFFF) * (int32((int16(SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer(gain_Q16 + uintptr(k)*4))) - (((6 * 128) / 6) + (16 * 128))))))) >> 16))
  7805  
  7806  		/* Round towards previous quantized gain (hysteresis) */
  7807  		if *(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) < *(*int32)(unsafe.Pointer(prev_ind)) {
  7808  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))++
  7809  		}
  7810  
  7811  		/* Compute delta indices and limit */
  7812  		if (k == 0) && (conditional == 0) {
  7813  			/* Full index */
  7814  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) = func() int32 {
  7815  				if (0) > (64 - 1) {
  7816  					return func() int32 {
  7817  						if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))) > (0) {
  7818  							return 0
  7819  						}
  7820  						return func() int32 {
  7821  							if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))) < (64 - 1) {
  7822  								return (64 - 1)
  7823  							}
  7824  							return *(*int32)(unsafe.Pointer(ind + uintptr(k)*4))
  7825  						}()
  7826  					}()
  7827  				}
  7828  				return func() int32 {
  7829  					if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))) > (64 - 1) {
  7830  						return (64 - 1)
  7831  					}
  7832  					return func() int32 {
  7833  						if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4))) < (0) {
  7834  							return 0
  7835  						}
  7836  						return *(*int32)(unsafe.Pointer(ind + uintptr(k)*4))
  7837  					}()
  7838  				}()
  7839  			}()
  7840  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) = SKP_max_int(tls, *(*int32)(unsafe.Pointer(ind + uintptr(k)*4)), (*(*int32)(unsafe.Pointer(prev_ind)) + -4))
  7841  			*(*int32)(unsafe.Pointer(prev_ind)) = *(*int32)(unsafe.Pointer(ind + uintptr(k)*4))
  7842  		} else {
  7843  			/* Delta index */
  7844  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) = func() int32 {
  7845  				if (-4) > (40) {
  7846  					return func() int32 {
  7847  						if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind))) > (-4) {
  7848  							return -4
  7849  						}
  7850  						return func() int32 {
  7851  							if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind))) < (40) {
  7852  								return 40
  7853  							}
  7854  							return (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind)))
  7855  						}()
  7856  					}()
  7857  				}
  7858  				return func() int32 {
  7859  					if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind))) > (40) {
  7860  						return 40
  7861  					}
  7862  					return func() int32 {
  7863  						if (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind))) < (-4) {
  7864  							return -4
  7865  						}
  7866  						return (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) - *(*int32)(unsafe.Pointer(prev_ind)))
  7867  					}()
  7868  				}()
  7869  			}()
  7870  			/* Accumulate deltas */
  7871  			*(*int32)(unsafe.Pointer(prev_ind)) += (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)))
  7872  			/* Shift to make non-negative */
  7873  			*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) -= (-4)
  7874  		}
  7875  
  7876  		/* Convert to linear scale and scale */
  7877  		*(*int32)(unsafe.Pointer(gain_Q16 + uintptr(k)*4)) = SKP_Silk_log2lin(tls, SKP_min_32(tls, ((((int32(((65536*(((86-6)*128)/6))/(64-1)))>>16)*(int32(int16(*(*int32)(unsafe.Pointer(prev_ind))))))+(((((65536*(((86-6)*128)/6))/(64-1))&0x0000FFFF)*(int32(int16(*(*int32)(unsafe.Pointer(prev_ind))))))>>16))+(((6*128)/6)+(16*128))), 3967)) /* 3968 = 31 in Q7 */
  7878  	}
  7879  }
  7880  
  7881  /* Gains scalar dequantization, uniform on log scale */
  7882  func SKP_Silk_gains_dequant(tls *libc.TLS, gain_Q16 uintptr, ind uintptr, prev_ind uintptr, conditional int32) { /* SKP_Silk_gain_quant.c:74:6: */
  7883  	var k int32
  7884  
  7885  	for k = 0; k < 4; k++ {
  7886  		if (k == 0) && (conditional == 0) {
  7887  			*(*int32)(unsafe.Pointer(prev_ind)) = *(*int32)(unsafe.Pointer(ind + uintptr(k)*4))
  7888  		} else {
  7889  			/* Delta index */
  7890  			*(*int32)(unsafe.Pointer(prev_ind)) += (*(*int32)(unsafe.Pointer(ind + uintptr(k)*4)) + -4)
  7891  		}
  7892  
  7893  		/* Convert to linear scale and scale */
  7894  		*(*int32)(unsafe.Pointer(gain_Q16 + uintptr(k)*4)) = SKP_Silk_log2lin(tls, SKP_min_32(tls, ((((int32(((65536*(((86-6)*128)/6))/(64-1)))>>16)*(int32(int16(*(*int32)(unsafe.Pointer(prev_ind))))))+(((((65536*(((86-6)*128)/6))/(64-1))&0x0000FFFF)*(int32(int16(*(*int32)(unsafe.Pointer(prev_ind))))))>>16))+(((6*128)/6)+(16*128))), 3967)) /* 3968 = 31 in Q7 */
  7895  	}
  7896  }
  7897  
  7898  /***********************************************************************
  7899  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  7900  Redistribution and use in source and binary forms, with or without
  7901  modification, (subject to the limitations in the disclaimer below)
  7902  are permitted provided that the following conditions are met:
  7903  - Redistributions of source code must retain the above copyright notice,
  7904  this list of conditions and the following disclaimer.
  7905  - Redistributions in binary form must reproduce the above copyright
  7906  notice, this list of conditions and the following disclaimer in the
  7907  documentation and/or other materials provided with the distribution.
  7908  - Neither the name of Skype Limited, nor the names of specific
  7909  contributors, may be used to endorse or promote products derived from
  7910  this software without specific prior written permission.
  7911  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  7912  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  7913  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  7914  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  7915  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  7916  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  7917  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  7918  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  7919  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  7920  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  7921  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  7922  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  7923  ***********************************************************************/
  7924  
  7925  /*******************/
  7926  /* Pitch estimator */
  7927  /*******************/
  7928  
  7929  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  7930  
  7931  /* Bandwidth expansion for whitening filter in pitch analysis */
  7932  
  7933  /* Threshold used by pitch estimator for early escape */
  7934  
  7935  /*********************/
  7936  /* Linear prediction */
  7937  /*********************/
  7938  
  7939  /* LPC analysis defines: regularization and bandwidth expansion */
  7940  
  7941  /* LTP analysis defines */
  7942  
  7943  /* LTP quantization settings */
  7944  
  7945  /***********************/
  7946  /* High pass filtering */
  7947  /***********************/
  7948  
  7949  /* Smoothing parameters for low end of pitch frequency range estimation */
  7950  
  7951  /* Min and max values for low end of pitch frequency range estimation */
  7952  
  7953  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  7954  
  7955  /***********/
  7956  /* Various */
  7957  /***********/
  7958  
  7959  /* Required speech activity for counting frame as active */
  7960  
  7961  /* Speech Activity LBRR enable threshold (needs tuning) */
  7962  
  7963  /*************************/
  7964  /* Perceptual parameters */
  7965  /*************************/
  7966  
  7967  /* reduction in coding SNR during low speech activity */
  7968  
  7969  /* factor for reducing quantization noise during voiced speech */
  7970  
  7971  /* factor for reducing quantization noise for unvoiced sparse signals */
  7972  
  7973  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  7974  
  7975  /* warping control */
  7976  
  7977  /* fraction added to first autocorrelation value */
  7978  
  7979  /* noise shaping filter chirp factor */
  7980  
  7981  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  7982  
  7983  /* gain reduction for fricatives */
  7984  
  7985  /* extra harmonic boosting (signal shaping) at low bitrates */
  7986  
  7987  /* extra harmonic boosting (signal shaping) for noisy input signals */
  7988  
  7989  /* harmonic noise shaping */
  7990  
  7991  /* extra harmonic noise shaping for high bitrates or noisy input */
  7992  
  7993  /* parameter for shaping noise towards higher frequencies */
  7994  
  7995  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  7996  
  7997  /* parameter for applying a high-pass tilt to the input signal */
  7998  
  7999  /* parameter for extra high-pass tilt to the input signal at high rates */
  8000  
  8001  /* parameter for reducing noise at the very low frequencies */
  8002  
  8003  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  8004  
  8005  /* noise floor to put a lower limit on the quantization step size */
  8006  
  8007  /* noise floor relative to active speech gain level */
  8008  
  8009  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  8010  
  8011  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  8012  
  8013  /* parameters defining the R/D tradeoff in the residual quantizer */
  8014  
  8015  /* High-pass filter with cutoff frequency adaptation based on pitch lag statistics */
  8016  func SKP_Silk_HP_variable_cutoff_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, out uintptr, in uintptr) { /* SKP_Silk_HP_variable_cutoff_FIX.c:37:6: */
  8017  	bp := tls.Alloc(20)
  8018  	defer tls.Free(20)
  8019  
  8020  	var quality_Q15 int32
  8021  	// var B_Q28 [3]int32 at bp, 12
  8022  
  8023  	// var A_Q28 [2]int32 at bp+12, 8
  8024  
  8025  	var Fc_Q19 int32
  8026  	var r_Q28 int32
  8027  	var r_Q22 int32
  8028  	var pitch_freq_Hz_Q16 int32
  8029  	var pitch_freq_log_Q7 int32
  8030  	var delta_freq_Q7 int32
  8031  
  8032  	/*********************************************/
  8033  	/* Estimate Low End of Pitch Frequency Range */
  8034  	/*********************************************/
  8035  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fprev_sigtype == 0 {
  8036  		/* difference, in log domain */
  8037  		pitch_freq_Hz_Q16 = (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz) * (1000)) << (16)) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FprevLag))
  8038  		pitch_freq_log_Q7 = (SKP_Silk_lin2log(tls, pitch_freq_Hz_Q16) - (int32(16) << 7)) //0x70
  8039  
  8040  		/* adjustment based on quality */
  8041  		quality_Q15 = *(*int32)(unsafe.Pointer((psEncCtrl + 620 /* &.input_quality_bands_Q15 */)))
  8042  		pitch_freq_log_Q7 = ((pitch_freq_log_Q7) - ((((((((quality_Q15) << (2)) >> 16) * (int32(int16(quality_Q15)))) + (((((quality_Q15) << (2)) & 0x0000FFFF) * (int32(int16(quality_Q15)))) >> 16)) >> 16) * (int32((int16(pitch_freq_log_Q7 - 809))))) + ((((((((quality_Q15) << (2)) >> 16) * (int32(int16(quality_Q15)))) + (((((quality_Q15) << (2)) & 0x0000FFFF) * (int32(int16(quality_Q15)))) >> 16)) & 0x0000FFFF) * (int32((int16(pitch_freq_log_Q7 - 809))))) >> 16)))
  8043  		pitch_freq_log_Q7 = ((pitch_freq_log_Q7) + ((SKP_FIX_CONST(tls, 0.6, 15) - quality_Q15) >> (9)))
  8044  
  8045  		//delta_freq = pitch_freq_log - psEnc->variable_HP_smth1;
  8046  		delta_freq_Q7 = (pitch_freq_log_Q7 - (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15) >> (8)))
  8047  		if delta_freq_Q7 < 0 {
  8048  			/* less smoothing for decreasing pitch frequency, to track something close to the minimum */
  8049  			delta_freq_Q7 = ((delta_freq_Q7) * (3))
  8050  		}
  8051  
  8052  		/* limit delta, to reduce impact of outliers */
  8053  		delta_freq_Q7 = func() int32 {
  8054  			if (-SKP_FIX_CONST(tls, 0.4, 7)) > (SKP_FIX_CONST(tls, 0.4, 7)) {
  8055  				return func() int32 {
  8056  					if (delta_freq_Q7) > (-SKP_FIX_CONST(tls, 0.4, 7)) {
  8057  						return -SKP_FIX_CONST(tls, 0.4, 7)
  8058  					}
  8059  					return func() int32 {
  8060  						if (delta_freq_Q7) < (SKP_FIX_CONST(tls, 0.4, 7)) {
  8061  							return SKP_FIX_CONST(tls, 0.4, 7)
  8062  						}
  8063  						return delta_freq_Q7
  8064  					}()
  8065  				}()
  8066  			}
  8067  			return func() int32 {
  8068  				if (delta_freq_Q7) > (SKP_FIX_CONST(tls, 0.4, 7)) {
  8069  					return SKP_FIX_CONST(tls, 0.4, 7)
  8070  				}
  8071  				return func() int32 {
  8072  					if (delta_freq_Q7) < (-SKP_FIX_CONST(tls, 0.4, 7)) {
  8073  						return -SKP_FIX_CONST(tls, 0.4, 7)
  8074  					}
  8075  					return delta_freq_Q7
  8076  				}()
  8077  			}()
  8078  		}()
  8079  
  8080  		/* update smoother */
  8081  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15 = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15) + (((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8) << (1)) * (delta_freq_Q7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + (((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8) << (1)) * (delta_freq_Q7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16)))
  8082  	}
  8083  	/* second smoother */
  8084  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15 = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15) + (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15 - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.015, 16))))) + (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15 - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.015, 16))))) >> 16)))
  8085  
  8086  	/* convert from log scale to Hertz */
  8087  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz = SKP_Silk_log2lin(tls, (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15) >> (8)))
  8088  
  8089  	/* limit frequency range */
  8090  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz = func() int32 {
  8091  		if (SKP_FIX_CONST(tls, 80.0, 0)) > (SKP_FIX_CONST(tls, 150.0, 0)) {
  8092  			return func() int32 {
  8093  				if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz) > (SKP_FIX_CONST(tls, 80.0, 0)) {
  8094  					return SKP_FIX_CONST(tls, 80.0, 0)
  8095  				}
  8096  				return func() int32 {
  8097  					if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz) < (SKP_FIX_CONST(tls, 150.0, 0)) {
  8098  						return SKP_FIX_CONST(tls, 150.0, 0)
  8099  					}
  8100  					return (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz
  8101  				}()
  8102  			}()
  8103  		}
  8104  		return func() int32 {
  8105  			if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz) > (SKP_FIX_CONST(tls, 150.0, 0)) {
  8106  				return SKP_FIX_CONST(tls, 150.0, 0)
  8107  			}
  8108  			return func() int32 {
  8109  				if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz) < (SKP_FIX_CONST(tls, 80.0, 0)) {
  8110  					return SKP_FIX_CONST(tls, 80.0, 0)
  8111  				}
  8112  				return (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz
  8113  			}()
  8114  		}()
  8115  	}()
  8116  
  8117  	/********************************/
  8118  	/* Compute Filter Coefficients  */
  8119  	/********************************/
  8120  	/* compute cut-off frequency, in radians */
  8121  	//Fc_num   = (SKP_float)( 0.45f * 2.0f * 3.14159265359 * psEncCtrl->pitch_freq_low_Hz );
  8122  	//Fc_denom = (SKP_float)( 1e3f * psEnc->sCmn.fs_kHz );
  8123  
  8124  	Fc_Q19 = (((int32(int16(1482))) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fpitch_freq_low_Hz)))) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz)) // range: 3704 - 27787, 11-15 bits
  8125  
  8126  	r_Q28 = (SKP_FIX_CONST(tls, 1.0, 28) - ((SKP_FIX_CONST(tls, 0.92, 9)) * (Fc_Q19)))
  8127  
  8128  	/* b = r * [ 1; -2; 1 ]; */
  8129  	/* a = [ 1; -2 * r * ( 1 - 0.5 * Fc^2 ); r^2 ]; */
  8130  	*(*int32)(unsafe.Pointer(bp /* &B_Q28[0] */)) = r_Q28
  8131  	*(*int32)(unsafe.Pointer(bp /* &B_Q28[0] */ + 1*4)) = ((-r_Q28) << (1))
  8132  	*(*int32)(unsafe.Pointer(bp /* &B_Q28[0] */ + 2*4)) = r_Q28
  8133  
  8134  	// -r * ( 2 - Fc * Fc );
  8135  	r_Q22 = ((r_Q28) >> (6))
  8136  	*(*int32)(unsafe.Pointer(bp + 12 /* &A_Q28[0] */)) = (((((r_Q22) >> 16) * (int32((int16((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8137  		if (16) == 1 {
  8138  			return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8139  		}
  8140  		return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8141  	}()))) - SKP_FIX_CONST(tls, 2.0, 22)))))) + ((((r_Q22) & 0x0000FFFF) * (int32((int16((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8142  		if (16) == 1 {
  8143  			return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8144  		}
  8145  		return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8146  	}()))) - SKP_FIX_CONST(tls, 2.0, 22)))))) >> 16)) + ((r_Q22) * (func() int32 {
  8147  		if (16) == 1 {
  8148  			return ((((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8149  				if (16) == 1 {
  8150  					return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8151  				}
  8152  				return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8153  			}()))) - SKP_FIX_CONST(tls, 2.0, 22)) >> 1) + (((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8154  				if (16) == 1 {
  8155  					return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8156  				}
  8157  				return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8158  			}()))) - SKP_FIX_CONST(tls, 2.0, 22)) & 1))
  8159  		}
  8160  		return (((((((((Fc_Q19) >> 16) * (int32(int16(Fc_Q19)))) + ((((Fc_Q19) & 0x0000FFFF) * (int32(int16(Fc_Q19)))) >> 16)) + ((Fc_Q19) * (func() int32 {
  8161  			if (16) == 1 {
  8162  				return (((Fc_Q19) >> 1) + ((Fc_Q19) & 1))
  8163  			}
  8164  			return ((((Fc_Q19) >> ((16) - 1)) + 1) >> 1)
  8165  		}()))) - SKP_FIX_CONST(tls, 2.0, 22)) >> ((16) - 1)) + 1) >> 1)
  8166  	}())))
  8167  	*(*int32)(unsafe.Pointer(bp + 12 /* &A_Q28[0] */ + 1*4)) = (((((r_Q22) >> 16) * (int32(int16(r_Q22)))) + ((((r_Q22) & 0x0000FFFF) * (int32(int16(r_Q22)))) >> 16)) + ((r_Q22) * (func() int32 {
  8168  		if (16) == 1 {
  8169  			return (((r_Q22) >> 1) + ((r_Q22) & 1))
  8170  		}
  8171  		return ((((r_Q22) >> ((16) - 1)) + 1) >> 1)
  8172  	}())))
  8173  
  8174  	/********************************/
  8175  	/* High-Pass Filter             */
  8176  	/********************************/
  8177  	SKP_Silk_biquad_alt(tls, in, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, psEnc /* &.sCmn */ +15008 /* &.In_HP_State */, out, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fframe_length)
  8178  }
  8179  
  8180  /*********************************/
  8181  /* Initialize Silk Encoder state */
  8182  /*********************************/
  8183  func SKP_Silk_init_encoder_FIX(tls *libc.TLS, psEnc uintptr) int32 { /* SKP_Silk_init_encoder_FIX.c:33:9: */
  8184  	var ret int32 = 0
  8185  	/* Clear the entire encoder state */
  8186  	libc.Xmemset(tls, psEnc, 0, uint32(unsafe.Sizeof(SKP_Silk_encoder_state_FIX{})))
  8187  
  8188  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth1_Q15 = 200844 /* = SKP_Silk_log2(70)_Q0; */
  8189  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fvariable_HP_smth2_Q15 = 200844 /* = SKP_Silk_log2(70)_Q0; */
  8190  
  8191  	/* Used to deactivate e.g. LSF interpolation and fluctuation reduction */
  8192  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset = 1
  8193  
  8194  	/* Initialize Silk VAD */
  8195  	ret = ret + (SKP_Silk_VAD_Init(tls, (psEnc /* &.sCmn */ + 15032 /* &.sVAD */)))
  8196  
  8197  	/* Initialize NSQ */
  8198  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ.Fprev_inv_gain_Q16 = 65536
  8199  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FsNSQ_LBRR.Fprev_inv_gain_Q16 = 65536
  8200  
  8201  	return ret
  8202  }
  8203  
  8204  /* sum= for(i=0;i<len;i++)inVec1[i]*inVec2[i];      ---        inner product    */
  8205  /* Note for ARM asm:                                                            */
  8206  /*        * inVec1 and inVec2 should be at least 2 byte aligned.    (Or defined as short/int16) */
  8207  /*        * len should be positive 16bit integer.                               */
  8208  /*        * only when len>6, memory access can be reduced by half.              */
  8209  
  8210  func SKP_Silk_inner_prod_aligned(tls *libc.TLS, inVec1 uintptr, inVec2 uintptr, len int32) int32 { /* SKP_Silk_inner_prod_aligned.c:43:11: */
  8211  	var i int32
  8212  	var sum int32 = 0
  8213  	for i = 0; i < len; i++ {
  8214  		sum = ((sum) + ((int32(*(*int16)(unsafe.Pointer(inVec1 + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(inVec2 + uintptr(i)*2))))))
  8215  	}
  8216  	return sum
  8217  }
  8218  
  8219  func SKP_Silk_inner_prod16_aligned_64(tls *libc.TLS, inVec1 uintptr, inVec2 uintptr, len int32) int64_t { /* SKP_Silk_inner_prod_aligned.c:57:11: */
  8220  	var i int32
  8221  	var sum int64_t = int64(0)
  8222  	for i = 0; i < len; i++ {
  8223  		sum = ((sum) + (int64_t((int32(*(*int16)(unsafe.Pointer(inVec1 + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(inVec2 + uintptr(i)*2)))))))
  8224  	}
  8225  	return sum
  8226  }
  8227  
  8228  /* Interpolate two vectors */
  8229  func SKP_Silk_interpolate(tls *libc.TLS, xi uintptr, x0 uintptr, x1 uintptr, ifact_Q2 int32, d int32) { /* SKP_Silk_interpolate.c:31:6: */
  8230  	var i int32
  8231  
  8232  	for i = 0; i < d; i++ {
  8233  		*(*int32)(unsafe.Pointer(xi + uintptr(i)*4)) = (*(*int32)(unsafe.Pointer(x0 + uintptr(i)*4)) + (((*(*int32)(unsafe.Pointer(x1 + uintptr(i)*4)) - *(*int32)(unsafe.Pointer(x0 + uintptr(i)*4))) * (ifact_Q2)) >> (2)))
  8234  	}
  8235  }
  8236  
  8237  /* Step up function, converts reflection coefficients to prediction coefficients */
  8238  func SKP_Silk_k2a(tls *libc.TLS, A_Q24 uintptr, rc_Q15 uintptr, order int32) { /* SKP_Silk_k2a.c:40:6: */
  8239  	bp := tls.Alloc(64)
  8240  	defer tls.Free(64)
  8241  
  8242  	var k int32
  8243  	var n int32
  8244  	// var Atmp [16]int32 at bp, 64
  8245  
  8246  	for k = 0; k < order; k++ {
  8247  		for n = 0; n < k; n++ {
  8248  			*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(n)*4)) = *(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4))
  8249  		}
  8250  		for n = 0; n < k; n++ {
  8251  			*(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4)) = ((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4))) + (((((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) << (1)) >> 16) * (int32(*(*int16)(unsafe.Pointer(rc_Q15 + uintptr(k)*2))))) + (((((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) << (1)) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(rc_Q15 + uintptr(k)*2))))) >> 16)))
  8252  		}
  8253  		*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4)) = -((int32(*(*int16)(unsafe.Pointer(rc_Q15 + uintptr(k)*2)))) << (9))
  8254  	}
  8255  }
  8256  
  8257  /* Step up function, converts reflection coefficients to prediction coefficients */
  8258  func SKP_Silk_k2a_Q16(tls *libc.TLS, A_Q24 uintptr, rc_Q16 uintptr, order int32) { /* SKP_Silk_k2a_Q16.c:40:6: */
  8259  	bp := tls.Alloc(64)
  8260  	defer tls.Free(64)
  8261  
  8262  	var k int32
  8263  	var n int32
  8264  	// var Atmp [16]int32 at bp, 64
  8265  
  8266  	for k = 0; k < order; k++ {
  8267  		for n = 0; n < k; n++ {
  8268  			*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(n)*4)) = *(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4))
  8269  		}
  8270  		for n = 0; n < k; n++ {
  8271  			*(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4)) = (((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(n)*4))) + ((((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(bp /* &Atmp[0] */ + uintptr(((k-n)-1))*4))) * (func() int32 {
  8272  				if (16) == 1 {
  8273  					return (((*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4))) & 1))
  8274  				}
  8275  				return ((((*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4))) >> ((16) - 1)) + 1) >> 1)
  8276  			}())))
  8277  		}
  8278  		*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4)) = -((*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4))) << (8))
  8279  	}
  8280  }
  8281  
  8282  /* Resets LBRR buffer, used if packet size changes */
  8283  func SKP_Silk_LBRR_reset(tls *libc.TLS, psEncC uintptr) { /* SKP_Silk_LBRR_reset.c:31:6: */
  8284  	var i int32
  8285  
  8286  	for i = 0; i < 2; i++ {
  8287  		(*SKP_SILK_LBRR_struct)(unsafe.Pointer((psEncC + 16256 /* &.LBRR_buffer */) + uintptr(i)*1032)).Fusage = 0
  8288  	}
  8289  }
  8290  
  8291  /* Approximation of 128 * log2() (very close inverse of approx 2^() below) */
  8292  /* Convert input to a log scale    */
  8293  func SKP_Silk_lin2log(tls *libc.TLS, inLin int32) int32 { /* SKP_Silk_lin2log.c:40:11: */
  8294  	bp := tls.Alloc(8)
  8295  	defer tls.Free(8)
  8296  
  8297  	// var lz int32 at bp, 4
  8298  
  8299  	// var frac_Q7 int32 at bp+4, 4
  8300  
  8301  	SKP_Silk_CLZ_FRAC(tls, inLin, bp /* &lz */, bp+4 /* &frac_Q7 */)
  8302  
  8303  	/* Piece-wise parabolic approximation */
  8304  	return (((31 - *(*int32)(unsafe.Pointer(bp /* lz */))) << (7)) + ((*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */))) + (((((*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */))) * (128 - *(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */)))) >> 16) * (int32(int16(179)))) + (((((*(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */))) * (128 - *(*int32)(unsafe.Pointer(bp + 4 /* frac_Q7 */)))) & 0x0000FFFF) * (int32(int16(179)))) >> 16))))
  8305  }
  8306  
  8307  /* Approximation of 2^() (very close inverse of SKP_Silk_lin2log()) */
  8308  /* Convert input to a linear scale    */
  8309  func SKP_Silk_log2lin(tls *libc.TLS, inLog_Q7 int32) int32 { /* SKP_Silk_log2lin.c:40:11: */
  8310  	var out int32
  8311  	var frac_Q7 int32
  8312  
  8313  	if inLog_Q7 < 0 {
  8314  		return 0
  8315  	} else if inLog_Q7 >= (int32(31) << 7) {
  8316  		/* Saturate, and prevent wrap-around */
  8317  		return 0x7FFFFFFF
  8318  	}
  8319  
  8320  	out = (int32((1)) << ((inLog_Q7) >> (7)))
  8321  	frac_Q7 = (inLog_Q7 & 0x7F)
  8322  	if inLog_Q7 < 2048 {
  8323  		/* Piece-wise parabolic approximation */
  8324  		out = ((out) + (((out) * ((frac_Q7) + (((((frac_Q7) * (128 - frac_Q7)) >> 16) * (int32(int16(-174)))) + (((((frac_Q7) * (128 - frac_Q7)) & 0x0000FFFF) * (int32(int16(-174)))) >> 16)))) >> (7)))
  8325  	} else {
  8326  		/* Piece-wise parabolic approximation */
  8327  		out = ((out) + (((out) >> (7)) * ((frac_Q7) + (((((frac_Q7) * (128 - frac_Q7)) >> 16) * (int32(int16(-174)))) + (((((frac_Q7) * (128 - frac_Q7)) & 0x0000FFFF) * (int32(int16(-174)))) >> 16)))))
  8328  	}
  8329  	return out
  8330  }
  8331  
  8332  /* Compute inverse of LPC prediction gain, and                          */
  8333  /* test if LPC coefficients are stable (all poles within unit circle)   */
  8334  func LPC_inverse_pred_gain_QA(tls *libc.TLS, invGain_Q30 uintptr, A_QA uintptr, order int32) int32 { /* SKP_Silk_LPC_inv_pred_gain.c:43:16: */
  8335  	var k int32
  8336  	var n int32
  8337  	var headrm int32
  8338  	var rc_Q31 int32
  8339  	var rc_mult1_Q30 int32
  8340  	var rc_mult2_Q16 int32
  8341  	var tmp_QA int32
  8342  	var Aold_QA uintptr
  8343  	var Anew_QA uintptr
  8344  
  8345  	Anew_QA = A_QA + uintptr((order&1))*64
  8346  
  8347  	*(*int32)(unsafe.Pointer(invGain_Q30)) = (int32(1) << 30)
  8348  	for k = (order - 1); k > 0; k-- {
  8349  		/* Check for stability */
  8350  		if (*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4)) > SKP_FIX_CONST(tls, 0.99975, 16)) || (*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4)) < -SKP_FIX_CONST(tls, 0.99975, 16)) {
  8351  			return 1
  8352  		}
  8353  
  8354  		/* Set RC equal to negated AR coef */
  8355  		rc_Q31 = -((*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4))) << (31 - 16))
  8356  
  8357  		/* rc_mult1_Q30 range: [ 1 : 2^30-1 ] */
  8358  		rc_mult1_Q30 = ((int32(0x7FFFFFFF) >> 1) - (int32(((int64_t(rc_Q31)) * (int64_t(rc_Q31))) >> (32))))
  8359  		/* reduce A_LIMIT if fails */
  8360  
  8361  		/* rc_mult2_Q16 range: [ 2^16 : SKP_int32_MAX ] */
  8362  		rc_mult2_Q16 = SKP_INVERSE32_varQ(tls, rc_mult1_Q30, 46) /* 16 = 46 - 30 */
  8363  
  8364  		/* Update inverse gain */
  8365  		/* invGain_Q30 range: [ 0 : 2^30 ] */
  8366  		*(*int32)(unsafe.Pointer(invGain_Q30)) = ((int32(((int64_t(*(*int32)(unsafe.Pointer(invGain_Q30)))) * (int64_t(rc_mult1_Q30))) >> (32))) << (2))
  8367  
  8368  		/* Swap pointers */
  8369  		Aold_QA = Anew_QA
  8370  		Anew_QA = A_QA + uintptr((k&1))*64
  8371  
  8372  		/* Update AR coefficient */
  8373  		headrm = (SKP_Silk_CLZ32(tls, rc_mult2_Q16) - 1)
  8374  		rc_mult2_Q16 = ((rc_mult2_Q16) << (headrm)) /* Q: 16 + headrm */
  8375  		for n = 0; n < k; n++ {
  8376  			tmp_QA = (*(*int32)(unsafe.Pointer(Aold_QA + uintptr(n)*4)) - ((int32(((int64_t(*(*int32)(unsafe.Pointer(Aold_QA + uintptr(((k-n)-1))*4)))) * (int64_t(rc_Q31))) >> (32))) << (1)))
  8377  			*(*int32)(unsafe.Pointer(Anew_QA + uintptr(n)*4)) = ((int32(((int64_t(tmp_QA)) * (int64_t(rc_mult2_Q16))) >> (32))) << (16 - headrm))
  8378  		}
  8379  	}
  8380  
  8381  	/* Check for stability */
  8382  	if (*(*int32)(unsafe.Pointer(Anew_QA)) > SKP_FIX_CONST(tls, 0.99975, 16)) || (*(*int32)(unsafe.Pointer(Anew_QA)) < -SKP_FIX_CONST(tls, 0.99975, 16)) {
  8383  		return 1
  8384  	}
  8385  
  8386  	/* Set RC equal to negated AR coef */
  8387  	rc_Q31 = -((*(*int32)(unsafe.Pointer(Anew_QA))) << (31 - 16))
  8388  
  8389  	/* Range: [ 1 : 2^30 ] */
  8390  	rc_mult1_Q30 = ((int32(0x7FFFFFFF) >> 1) - (int32(((int64_t(rc_Q31)) * (int64_t(rc_Q31))) >> (32))))
  8391  
  8392  	/* Update inverse gain */
  8393  	/* Range: [ 0 : 2^30 ] */
  8394  	*(*int32)(unsafe.Pointer(invGain_Q30)) = ((int32(((int64_t(*(*int32)(unsafe.Pointer(invGain_Q30)))) * (int64_t(rc_mult1_Q30))) >> (32))) << (2))
  8395  
  8396  	return 0
  8397  }
  8398  
  8399  /* For input in Q12 domain */
  8400  func SKP_Silk_LPC_inverse_pred_gain(tls *libc.TLS, invGain_Q30 uintptr, A_Q12 uintptr, order int32) int32 { /* SKP_Silk_LPC_inv_pred_gain.c:113:9: */
  8401  	bp := tls.Alloc(128)
  8402  	defer tls.Free(128)
  8403  
  8404  	var k int32
  8405  	// var Atmp_QA [2][16]int32 at bp, 128
  8406  
  8407  	var Anew_QA uintptr
  8408  
  8409  	Anew_QA = (bp /* &Atmp_QA[0] */ + uintptr((order&1))*64)
  8410  
  8411  	/* Increase Q domain of the AR coefficients */
  8412  	for k = 0; k < order; k++ {
  8413  		*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4)) = ((int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr(k)*2)))) << (16 - 12))
  8414  	}
  8415  
  8416  	return LPC_inverse_pred_gain_QA(tls, invGain_Q30, bp /* &Atmp_QA[0] */, order)
  8417  }
  8418  
  8419  /* For input in Q24 domain */
  8420  func SKP_Silk_LPC_inverse_pred_gain_Q24(tls *libc.TLS, invGain_Q30 uintptr, A_Q24 uintptr, order int32) int32 { /* SKP_Silk_LPC_inv_pred_gain.c:134:9: */
  8421  	bp := tls.Alloc(128)
  8422  	defer tls.Free(128)
  8423  
  8424  	var k int32
  8425  	// var Atmp_QA [2][16]int32 at bp, 128
  8426  
  8427  	var Anew_QA uintptr
  8428  
  8429  	Anew_QA = (bp /* &Atmp_QA[0] */ + uintptr((order&1))*64)
  8430  
  8431  	/* Increase Q domain of the AR coefficients */
  8432  	for k = 0; k < order; k++ {
  8433  		*(*int32)(unsafe.Pointer(Anew_QA + uintptr(k)*4)) = func() int32 {
  8434  			if (24 - 16) == 1 {
  8435  				return (((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4))) & 1))
  8436  			}
  8437  			return ((((*(*int32)(unsafe.Pointer(A_Q24 + uintptr(k)*4))) >> ((24 - 16) - 1)) + 1) >> 1)
  8438  		}()
  8439  	}
  8440  
  8441  	return LPC_inverse_pred_gain_QA(tls, invGain_Q30, bp /* &Atmp_QA[0] */, order)
  8442  }
  8443  
  8444  /* even order AR filter */
  8445  func SKP_Silk_LPC_synthesis_filter(tls *libc.TLS, in uintptr, A_Q12 uintptr, Gain_Q26 int32, S uintptr, out uintptr, len int32, Order int32) { /* SKP_Silk_LPC_synthesis_filter.c:37:6: */
  8446  	var k int32
  8447  	var j int32
  8448  	var idx int32
  8449  	var Order_half int32 = ((Order) >> (1))
  8450  	var SA int32
  8451  	var SB int32
  8452  	var out32_Q10 int32
  8453  	var out32 int32
  8454  
  8455  	/* Order must be even */
  8456  
  8457  	/* S[] values are in Q14 */
  8458  	for k = 0; k < len; k++ {
  8459  		SA = *(*int32)(unsafe.Pointer(S + uintptr((Order-1))*4))
  8460  		out32_Q10 = 0
  8461  		for j = 0; j < (Order_half - 1); j++ {
  8462  			idx = (((int32(int16(2))) * (int32(int16(j)))) + 1)
  8463  			SB = *(*int32)(unsafe.Pointer(S + uintptr(((Order-1)-idx))*4))
  8464  			*(*int32)(unsafe.Pointer(S + uintptr(((Order-1)-idx))*4)) = SA
  8465  			out32_Q10 = ((out32_Q10) + ((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((j<<1))*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((j<<1))*2))))) >> 16)))
  8466  			out32_Q10 = ((out32_Q10) + ((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr(((j<<1)+1))*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr(((j<<1)+1))*2))))) >> 16)))
  8467  			SA = *(*int32)(unsafe.Pointer(S + uintptr(((Order-2)-idx))*4))
  8468  			*(*int32)(unsafe.Pointer(S + uintptr(((Order-2)-idx))*4)) = SB
  8469  		}
  8470  
  8471  		/* unrolled loop: epilog */
  8472  		SB = *(*int32)(unsafe.Pointer(S))
  8473  		*(*int32)(unsafe.Pointer(S)) = SA
  8474  		out32_Q10 = ((out32_Q10) + ((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((Order-2))*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((Order-2))*2))))) >> 16)))
  8475  		out32_Q10 = ((out32_Q10) + ((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((Order-1))*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + uintptr((Order-1))*2))))) >> 16)))
  8476  		/* apply gain to excitation signal and add to prediction */
  8477  		out32_Q10 = func() int32 {
  8478  			if ((uint32((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) == uint32(0) {
  8479  				return func() int32 {
  8480  					if ((uint32((out32_Q10) & ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) != uint32(0) {
  8481  						return libc.Int32FromUint32(0x80000000)
  8482  					}
  8483  					return ((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))
  8484  				}()
  8485  			}
  8486  			return func() int32 {
  8487  				if ((uint32((out32_Q10) | ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) == uint32(0) {
  8488  					return 0x7FFFFFFF
  8489  				}
  8490  				return ((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))
  8491  			}()
  8492  		}()
  8493  
  8494  		/* scale to Q0 */
  8495  		out32 = func() int32 {
  8496  			if (10) == 1 {
  8497  				return (((out32_Q10) >> 1) + ((out32_Q10) & 1))
  8498  			}
  8499  			return ((((out32_Q10) >> ((10) - 1)) + 1) >> 1)
  8500  		}()
  8501  
  8502  		/* saturate output */
  8503  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  8504  			if (out32) > 0x7FFF {
  8505  				return int16(0x7FFF)
  8506  			}
  8507  			return func() int16 {
  8508  				if (out32) < (int32(libc.Int16FromInt32(0x8000))) {
  8509  					return libc.Int16FromInt32(0x8000)
  8510  				}
  8511  				return int16(out32)
  8512  			}()
  8513  		}()
  8514  
  8515  		/* move result into delay line */
  8516  		*(*int32)(unsafe.Pointer(S + uintptr((Order-1))*4)) = ((func() int32 {
  8517  			if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  8518  				return func() int32 {
  8519  					if (out32_Q10) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  8520  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  8521  					}
  8522  					return func() int32 {
  8523  						if (out32_Q10) < (int32((0x7FFFFFFF)) >> (4)) {
  8524  							return (int32((0x7FFFFFFF)) >> (4))
  8525  						}
  8526  						return out32_Q10
  8527  					}()
  8528  				}()
  8529  			}
  8530  			return func() int32 {
  8531  				if (out32_Q10) > (int32((0x7FFFFFFF)) >> (4)) {
  8532  					return (int32((0x7FFFFFFF)) >> (4))
  8533  				}
  8534  				return func() int32 {
  8535  					if (out32_Q10) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  8536  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  8537  					}
  8538  					return out32_Q10
  8539  				}()
  8540  			}()
  8541  		}()) << (4))
  8542  	}
  8543  }
  8544  
  8545  /* 16th order AR filter */
  8546  func SKP_Silk_LPC_synthesis_order16(tls *libc.TLS, in uintptr, A_Q12 uintptr, Gain_Q26 int32, S uintptr, out uintptr, len int32) { /* SKP_Silk_LPC_synthesis_order16.c:37:6: */
  8547  	var k int32
  8548  	var SA int32
  8549  	var SB int32
  8550  	var out32_Q10 int32
  8551  	var out32 int32
  8552  	for k = 0; k < len; k++ {
  8553  		/* unrolled loop: prolog */
  8554  		/* multiply-add two prediction coefficients per iteration */
  8555  		SA = *(*int32)(unsafe.Pointer(S + 15*4))
  8556  		SB = *(*int32)(unsafe.Pointer(S + 14*4))
  8557  		*(*int32)(unsafe.Pointer(S + 14*4)) = SA
  8558  		out32_Q10 = ((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12))))) >> 16))
  8559  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 1*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 1*2))))) >> 16)))))
  8560  		SA = *(*int32)(unsafe.Pointer(S + 13*4))
  8561  		*(*int32)(unsafe.Pointer(S + 13*4)) = SB
  8562  
  8563  		/* unrolled loop: main loop */
  8564  		SB = *(*int32)(unsafe.Pointer(S + 12*4))
  8565  		*(*int32)(unsafe.Pointer(S + 12*4)) = SA
  8566  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 2*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 2*2))))) >> 16)))))
  8567  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 3*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 3*2))))) >> 16)))))
  8568  		SA = *(*int32)(unsafe.Pointer(S + 11*4))
  8569  		*(*int32)(unsafe.Pointer(S + 11*4)) = SB
  8570  
  8571  		SB = *(*int32)(unsafe.Pointer(S + 10*4))
  8572  		*(*int32)(unsafe.Pointer(S + 10*4)) = SA
  8573  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 4*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 4*2))))) >> 16)))))
  8574  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 5*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 5*2))))) >> 16)))))
  8575  		SA = *(*int32)(unsafe.Pointer(S + 9*4))
  8576  		*(*int32)(unsafe.Pointer(S + 9*4)) = SB
  8577  
  8578  		SB = *(*int32)(unsafe.Pointer(S + 8*4))
  8579  		*(*int32)(unsafe.Pointer(S + 8*4)) = SA
  8580  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 6*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 6*2))))) >> 16)))))
  8581  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 7*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 7*2))))) >> 16)))))
  8582  		SA = *(*int32)(unsafe.Pointer(S + 7*4))
  8583  		*(*int32)(unsafe.Pointer(S + 7*4)) = SB
  8584  
  8585  		SB = *(*int32)(unsafe.Pointer(S + 6*4))
  8586  		*(*int32)(unsafe.Pointer(S + 6*4)) = SA
  8587  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 8*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 8*2))))) >> 16)))))
  8588  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 9*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 9*2))))) >> 16)))))
  8589  		SA = *(*int32)(unsafe.Pointer(S + 5*4))
  8590  		*(*int32)(unsafe.Pointer(S + 5*4)) = SB
  8591  
  8592  		SB = *(*int32)(unsafe.Pointer(S + 4*4))
  8593  		*(*int32)(unsafe.Pointer(S + 4*4)) = SA
  8594  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 10*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 10*2))))) >> 16)))))
  8595  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 11*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 11*2))))) >> 16)))))
  8596  		SA = *(*int32)(unsafe.Pointer(S + 3*4))
  8597  		*(*int32)(unsafe.Pointer(S + 3*4)) = SB
  8598  
  8599  		SB = *(*int32)(unsafe.Pointer(S + 2*4))
  8600  		*(*int32)(unsafe.Pointer(S + 2*4)) = SA
  8601  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 12*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 12*2))))) >> 16)))))
  8602  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 13*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 13*2))))) >> 16)))))
  8603  		SA = *(*int32)(unsafe.Pointer(S + 1*4))
  8604  		*(*int32)(unsafe.Pointer(S + 1*4)) = SB
  8605  
  8606  		/* unrolled loop: epilog */
  8607  		SB = *(*int32)(unsafe.Pointer(S))
  8608  		*(*int32)(unsafe.Pointer(S)) = SA
  8609  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SA) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 14*2))))) + ((((SA) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 14*2))))) >> 16)))))
  8610  		out32_Q10 = (int32((uint32(out32_Q10)) + (uint32((((SB) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 15*2))))) + ((((SB) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q12 + 15*2))))) >> 16)))))
  8611  
  8612  		/* unrolled loop: end */
  8613  		/* apply gain to excitation signal and add to prediction */
  8614  		out32_Q10 = func() int32 {
  8615  			if ((uint32((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) == uint32(0) {
  8616  				return func() int32 {
  8617  					if ((uint32((out32_Q10) & ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) != uint32(0) {
  8618  						return libc.Int32FromUint32(0x80000000)
  8619  					}
  8620  					return ((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))
  8621  				}()
  8622  			}
  8623  			return func() int32 {
  8624  				if ((uint32((out32_Q10) | ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))) & 0x80000000) == uint32(0) {
  8625  					return 0x7FFFFFFF
  8626  				}
  8627  				return ((out32_Q10) + ((((Gain_Q26) >> 16) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) + ((((Gain_Q26) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2))))) >> 16)))
  8628  			}()
  8629  		}()
  8630  
  8631  		/* scale to Q0 */
  8632  		out32 = func() int32 {
  8633  			if (10) == 1 {
  8634  				return (((out32_Q10) >> 1) + ((out32_Q10) & 1))
  8635  			}
  8636  			return ((((out32_Q10) >> ((10) - 1)) + 1) >> 1)
  8637  		}()
  8638  
  8639  		/* saturate output */
  8640  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  8641  			if (out32) > 0x7FFF {
  8642  				return int16(0x7FFF)
  8643  			}
  8644  			return func() int16 {
  8645  				if (out32) < (int32(libc.Int16FromInt32(0x8000))) {
  8646  					return libc.Int16FromInt32(0x8000)
  8647  				}
  8648  				return int16(out32)
  8649  			}()
  8650  		}()
  8651  
  8652  		/* move result into delay line */
  8653  		*(*int32)(unsafe.Pointer(S + 15*4)) = ((func() int32 {
  8654  			if (int32((libc.Int32FromUint32(0x80000000))) >> (4)) > (int32((0x7FFFFFFF)) >> (4)) {
  8655  				return func() int32 {
  8656  					if (out32_Q10) > (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  8657  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  8658  					}
  8659  					return func() int32 {
  8660  						if (out32_Q10) < (int32((0x7FFFFFFF)) >> (4)) {
  8661  							return (int32((0x7FFFFFFF)) >> (4))
  8662  						}
  8663  						return out32_Q10
  8664  					}()
  8665  				}()
  8666  			}
  8667  			return func() int32 {
  8668  				if (out32_Q10) > (int32((0x7FFFFFFF)) >> (4)) {
  8669  					return (int32((0x7FFFFFFF)) >> (4))
  8670  				}
  8671  				return func() int32 {
  8672  					if (out32_Q10) < (int32((libc.Int32FromUint32(0x80000000))) >> (4)) {
  8673  						return (int32((libc.Int32FromUint32(0x80000000))) >> (4))
  8674  					}
  8675  					return out32_Q10
  8676  				}()
  8677  			}()
  8678  		}()) << (4))
  8679  	}
  8680  }
  8681  
  8682  /* Helper function, that interpolates the filter taps */
  8683  func SKP_Silk_LP_interpolate_filter_taps(tls *libc.TLS, B_Q28 uintptr, A_Q28 uintptr, ind int32, fac_Q16 int32) { /* SKP_Silk_LP_variable_cutoff.c:40:17: */
  8684  	var nb int32
  8685  	var na int32
  8686  
  8687  	if ind < (5 - 1) {
  8688  		if fac_Q16 > 0 {
  8689  			if fac_Q16 == (func() int32 {
  8690  				if (fac_Q16) > 0x7FFF {
  8691  					return 0x7FFF
  8692  				}
  8693  				return func() int32 {
  8694  					if (fac_Q16) < (int32(libc.Int16FromInt32(0x8000))) {
  8695  						return int32(libc.Int16FromInt32(0x8000))
  8696  					}
  8697  					return fac_Q16
  8698  				}()
  8699  			}()) { /* fac_Q16 is in range of a 16-bit int */
  8700  				/* Piece-wise linear interpolation of B and A */
  8701  				for nb = 0; nb < 3; nb++ {
  8702  					*(*int32)(unsafe.Pointer(B_Q28 + uintptr(nb)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4))) >> 16) * (int32(int16(fac_Q16)))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4))) & 0x0000FFFF) * (int32(int16(fac_Q16)))) >> 16)))
  8703  				}
  8704  				for na = 0; na < 2; na++ {
  8705  					*(*int32)(unsafe.Pointer(A_Q28 + uintptr(na)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4))) >> 16) * (int32(int16(fac_Q16)))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4))) & 0x0000FFFF) * (int32(int16(fac_Q16)))) >> 16)))
  8706  				}
  8707  			} else if fac_Q16 == (int32(1) << 15) { /* Neither fac_Q16 nor ( ( 1 << 16 ) - fac_Q16 ) is in range of a 16-bit int */
  8708  
  8709  				/* Piece-wise linear interpolation of B and A */
  8710  				for nb = 0; nb < 3; nb++ {
  8711  					*(*int32)(unsafe.Pointer(B_Q28 + uintptr(nb)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4)) + *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4))) >> (1))
  8712  				}
  8713  				for na = 0; na < 2; na++ {
  8714  					*(*int32)(unsafe.Pointer(A_Q28 + uintptr(na)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4)) + *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4))) >> (1))
  8715  				}
  8716  			} else { /* ( ( 1 << 16 ) - fac_Q16 ) is in range of a 16-bit int */
  8717  
  8718  				/* Piece-wise linear interpolation of B and A */
  8719  				for nb = 0; nb < 3; nb++ {
  8720  					*(*int32)(unsafe.Pointer(B_Q28 + uintptr(nb)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4))) >> 16) * (int32((int16((int32(1) << 16) - fac_Q16))))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12) + uintptr(nb)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr((ind+1))*12) + uintptr(nb)*4))) & 0x0000FFFF) * (int32((int16((int32(1) << 16) - fac_Q16))))) >> 16)))
  8721  				}
  8722  				for na = 0; na < 2; na++ {
  8723  					*(*int32)(unsafe.Pointer(A_Q28 + uintptr(na)*4)) = ((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4))) >> 16) * (int32((int16((int32(1) << 16) - fac_Q16))))) + ((((*(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8) + uintptr(na)*4)) - *(*int32)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr((ind+1))*8) + uintptr(na)*4))) & 0x0000FFFF) * (int32((int16((int32(1) << 16) - fac_Q16))))) >> 16)))
  8724  				}
  8725  			}
  8726  		} else {
  8727  			libc.Xmemcpy(tls, B_Q28, (uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + uintptr(ind)*12), (uint32(3) * uint32(unsafe.Sizeof(int32(0)))))
  8728  			libc.Xmemcpy(tls, A_Q28, (uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + uintptr(ind)*8), (uint32(2) * uint32(unsafe.Sizeof(int32(0)))))
  8729  		}
  8730  	} else {
  8731  		libc.Xmemcpy(tls, B_Q28, (uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_B_Q28)) + 4*12), (uint32(3) * uint32(unsafe.Sizeof(int32(0)))))
  8732  		libc.Xmemcpy(tls, A_Q28, (uintptr(unsafe.Pointer(&SKP_Silk_Transition_LP_A_Q28)) + 4*8), (uint32(2) * uint32(unsafe.Sizeof(int32(0)))))
  8733  	}
  8734  }
  8735  
  8736  /* Low-pass filter with variable cutoff frequency based on  */
  8737  /* piece-wise linear interpolation between elliptic filters */
  8738  /* Start by setting psEncC->transition_frame_no = 1;            */
  8739  /* Deactivate by setting psEncC->transition_frame_no = 0;   */
  8740  func SKP_Silk_LP_variable_cutoff(tls *libc.TLS, psLP uintptr, out uintptr, in uintptr, frame_length int32) { /* SKP_Silk_LP_variable_cutoff.c:115:6: */
  8741  	bp := tls.Alloc(20)
  8742  	defer tls.Free(20)
  8743  
  8744  	// var B_Q28 [3]int32 at bp, 12
  8745  
  8746  	// var A_Q28 [2]int32 at bp+12, 8
  8747  
  8748  	var fac_Q16 int32 = 0
  8749  	var ind int32 = 0
  8750  
  8751  	/* Interpolate filter coefficients if needed */
  8752  	if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no > 0 {
  8753  		if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Fmode == 0 {
  8754  			if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no < (2560 / 20) {
  8755  				/* Calculate index and interpolation factor for interpolation */
  8756  				fac_Q16 = (((*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no) << (16 - 5))
  8757  				ind = ((fac_Q16) >> (16))
  8758  				fac_Q16 = fac_Q16 - ((ind) << (16))
  8759  
  8760  				/* Interpolate filter coefficients */
  8761  				SKP_Silk_LP_interpolate_filter_taps(tls, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, ind, fac_Q16)
  8762  
  8763  				/* Increment transition frame number for next frame */
  8764  				(*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no++
  8765  
  8766  			} else {
  8767  
  8768  				/* End of transition phase */
  8769  				SKP_Silk_LP_interpolate_filter_taps(tls, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, (5 - 1), 0)
  8770  			}
  8771  		} else {
  8772  
  8773  			if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no < (5120 / 20) {
  8774  				/* Calculate index and interpolation factor for interpolation */
  8775  				fac_Q16 = (((5120 / 20) - (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no) << (16 - 6))
  8776  				ind = ((fac_Q16) >> (16))
  8777  				fac_Q16 = fac_Q16 - ((ind) << (16))
  8778  
  8779  				/* Interpolate filter coefficients */
  8780  				SKP_Silk_LP_interpolate_filter_taps(tls, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, ind, fac_Q16)
  8781  
  8782  				/* Increment transition frame number for next frame */
  8783  				(*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no++
  8784  
  8785  			} else {
  8786  
  8787  				/* End of transition phase */
  8788  				SKP_Silk_LP_interpolate_filter_taps(tls, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, 0, 0)
  8789  			}
  8790  		}
  8791  	}
  8792  
  8793  	if (*SKP_Silk_LP_state)(unsafe.Pointer(psLP)).Ftransition_frame_no > 0 {
  8794  		/* ARMA low-pass filtering */
  8795  
  8796  		SKP_Silk_biquad_alt(tls, in, bp /* &B_Q28[0] */, bp+12 /* &A_Q28[0] */, psLP /* &.In_LP_State */, out, frame_length)
  8797  	} else {
  8798  		/* Instead of using the filter, copy input directly to output */
  8799  		libc.Xmemcpy(tls, out, in, (uint32(frame_length) * uint32(unsafe.Sizeof(int16(0)))))
  8800  	}
  8801  }
  8802  
  8803  // Q12 values (even)
  8804  var SKP_Silk_LSFCosTab_FIX_Q12 = [129]int32{
  8805  	8192, 8190, 8182, 8170,
  8806  	8152, 8130, 8104, 8072,
  8807  	8034, 7994, 7946, 7896,
  8808  	7840, 7778, 7714, 7644,
  8809  	7568, 7490, 7406, 7318,
  8810  	7226, 7128, 7026, 6922,
  8811  	6812, 6698, 6580, 6458,
  8812  	6332, 6204, 6070, 5934,
  8813  	5792, 5648, 5502, 5352,
  8814  	5198, 5040, 4880, 4718,
  8815  	4552, 4382, 4212, 4038,
  8816  	3862, 3684, 3502, 3320,
  8817  	3136, 2948, 2760, 2570,
  8818  	2378, 2186, 1990, 1794,
  8819  	1598, 1400, 1202, 1002,
  8820  	802, 602, 402, 202,
  8821  	0, -202, -402, -602,
  8822  	-802, -1002, -1202, -1400,
  8823  	-1598, -1794, -1990, -2186,
  8824  	-2378, -2570, -2760, -2948,
  8825  	-3136, -3320, -3502, -3684,
  8826  	-3862, -4038, -4212, -4382,
  8827  	-4552, -4718, -4880, -5040,
  8828  	-5198, -5352, -5502, -5648,
  8829  	-5792, -5934, -6070, -6204,
  8830  	-6332, -6458, -6580, -6698,
  8831  	-6812, -6922, -7026, -7128,
  8832  	-7226, -7318, -7406, -7490,
  8833  	-7568, -7644, -7714, -7778,
  8834  	-7840, -7896, -7946, -7994,
  8835  	-8034, -8072, -8104, -8130,
  8836  	-8152, -8170, -8182, -8190,
  8837  	-8192,
  8838  } /* SKP_Silk_LSF_cos_table.c:31:15 */
  8839  
  8840  func SKP_Silk_LTP_analysis_filter_FIX(tls *libc.TLS, LTP_res uintptr, x uintptr, LTPCoef_Q14 uintptr, pitchL uintptr, invGains_Q16 uintptr, subfr_length int32, pre_length int32) { /* SKP_Silk_LTP_analysis_filter_FIX.c:30:6: */
  8841  	bp := tls.Alloc(10)
  8842  	defer tls.Free(10)
  8843  
  8844  	var x_ptr uintptr
  8845  	var x_lag_ptr uintptr
  8846  	// var Btmp_Q14 [5]int16 at bp, 10
  8847  
  8848  	var LTP_res_ptr uintptr
  8849  	var k int32
  8850  	var i int32
  8851  	var j int32
  8852  	var LTP_est int32
  8853  
  8854  	x_ptr = x
  8855  	LTP_res_ptr = LTP_res
  8856  	for k = 0; k < 4; k++ {
  8857  
  8858  		x_lag_ptr = (x_ptr - uintptr(*(*int32)(unsafe.Pointer(pitchL + uintptr(k)*4)))*2)
  8859  		for i = 0; i < 5; i++ {
  8860  			*(*int16)(unsafe.Pointer(bp /* &Btmp_Q14[0] */ + uintptr(i)*2)) = *(*int16)(unsafe.Pointer(LTPCoef_Q14 + uintptr(((k*5)+i))*2))
  8861  		}
  8862  
  8863  		/* LTP analysis FIR filter */
  8864  		for i = 0; i < (subfr_length + pre_length); i++ {
  8865  			*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2)) = *(*int16)(unsafe.Pointer(x_ptr + uintptr(i)*2))
  8866  
  8867  			/* Long-term prediction */
  8868  			LTP_est = ((int32(*(*int16)(unsafe.Pointer(x_lag_ptr + 2*2)))) * (int32(*(*int16)(unsafe.Pointer(bp /* &Btmp_Q14[0] */)))))
  8869  			for j = 1; j < 5; j++ {
  8870  				LTP_est = (int32((uint32(LTP_est)) + (uint32((int32(*(*int16)(unsafe.Pointer(x_lag_ptr + uintptr(((5/2)-j))*2)))) * (int32(*(*int16)(unsafe.Pointer(bp /* &Btmp_Q14[0] */ + uintptr(j)*2))))))))
  8871  			}
  8872  			LTP_est = func() int32 {
  8873  				if (14) == 1 {
  8874  					return (((LTP_est) >> 1) + ((LTP_est) & 1))
  8875  				}
  8876  				return ((((LTP_est) >> ((14) - 1)) + 1) >> 1)
  8877  			}() // round and -> Q0
  8878  
  8879  			/* Subtract long-term prediction */
  8880  			*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2)) = func() int16 {
  8881  				if (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(i)*2))) - LTP_est) > 0x7FFF {
  8882  					return int16(0x7FFF)
  8883  				}
  8884  				return func() int16 {
  8885  					if (int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(i)*2))) - LTP_est) < (int32(libc.Int16FromInt32(0x8000))) {
  8886  						return libc.Int16FromInt32(0x8000)
  8887  					}
  8888  					return (int16(int32(*(*int16)(unsafe.Pointer(x_ptr + uintptr(i)*2))) - LTP_est))
  8889  				}()
  8890  			}()
  8891  
  8892  			/* Scale residual */
  8893  			*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2)) = (int16((((*(*int32)(unsafe.Pointer(invGains_Q16 + uintptr(k)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2))))) + ((((*(*int32)(unsafe.Pointer(invGains_Q16 + uintptr(k)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(LTP_res_ptr + uintptr(i)*2))))) >> 16)))
  8894  
  8895  			x_lag_ptr += 2
  8896  		}
  8897  
  8898  		/* Update pointers */
  8899  		LTP_res_ptr += 2 * (uintptr(subfr_length + pre_length))
  8900  		x_ptr += 2 * (uintptr(subfr_length))
  8901  	}
  8902  }
  8903  
  8904  /* Table containing trained thresholds for LTP scaling */
  8905  var LTPScaleThresholds_Q15 = [11]int16{
  8906  	int16(31129), int16(26214), int16(16384), int16(13107), int16(9830), int16(6554),
  8907  	int16(4915), int16(3276), int16(2621), int16(2458), int16(0),
  8908  } /* SKP_Silk_LTP_scale_ctrl_FIX.c:33:24 */
  8909  
  8910  func SKP_Silk_LTP_scale_ctrl_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr) { /* SKP_Silk_LTP_scale_ctrl_FIX.c:39:6: */
  8911  	var round_loss int32
  8912  	var frames_per_packet int32
  8913  	var g_out_Q5 int32
  8914  	var g_limit_Q15 int32
  8915  	var thrld1_Q15 int32
  8916  	var thrld2_Q15 int32
  8917  
  8918  	/* 1st order high-pass filter */
  8919  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7 = (SKP_max_int(tls, ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7-(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FprevLTPredCodGain_Q7), 0) +
  8920  		(func() int32 {
  8921  			if (1) == 1 {
  8922  				return ((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> 1) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) & 1))
  8923  			}
  8924  			return (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> ((1) - 1)) + 1) >> 1)
  8925  		}()))
  8926  
  8927  	(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FprevLTPredCodGain_Q7 = (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7
  8928  
  8929  	/* combine input and filtered input */
  8930  	g_out_Q5 = func() int32 {
  8931  		if (3) == 1 {
  8932  			return ((((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7) >> (1)) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> (1))) >> 1) + (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7) >> (1)) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> (1))) & 1))
  8933  		}
  8934  		return (((((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7) >> (1)) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FHPLTPredCodGain_Q7) >> (1))) >> ((3) - 1)) + 1) >> 1)
  8935  	}()
  8936  	g_limit_Q15 = SKP_Silk_sigm_Q15(tls, (g_out_Q5 - (int32(3) << 5)))
  8937  
  8938  	/* Default is minimum scaling */
  8939  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = 0
  8940  
  8941  	/* Round the loss measure to whole pct */
  8942  	round_loss = (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketLoss_perc
  8943  
  8944  	/* Only scale if first frame in packet 0% */
  8945  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf == 0 {
  8946  
  8947  		frames_per_packet = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FPacketSize_ms) / (20))
  8948  
  8949  		round_loss = round_loss + (frames_per_packet - 1)
  8950  		thrld1_Q15 = int32(LTPScaleThresholds_Q15[SKP_min_int(tls, round_loss, (11-1))])
  8951  		thrld2_Q15 = int32(LTPScaleThresholds_Q15[SKP_min_int(tls, (round_loss+1), (11-1))])
  8952  
  8953  		if g_limit_Q15 > thrld1_Q15 {
  8954  			/* Maximum scaling */
  8955  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = 2
  8956  		} else if g_limit_Q15 > thrld2_Q15 {
  8957  			/* Medium scaling */
  8958  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex = 1
  8959  		}
  8960  	}
  8961  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTP_scale_Q14 = int32(SKP_Silk_LTPScales_table_Q14[(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FLTP_scaleIndex])
  8962  }
  8963  
  8964  /* Variable order MA prediction error filter */
  8965  func SKP_Silk_MA_Prediction(tls *libc.TLS, in uintptr, B uintptr, S uintptr, out uintptr, len int32, order int32) { /* SKP_Silk_MA.c:39:6: */
  8966  	var k int32
  8967  	var d int32
  8968  	var in16 int32
  8969  	var out32 int32
  8970  
  8971  	for k = 0; k < len; k++ {
  8972  		in16 = int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))
  8973  		out32 = (((in16) << (12)) - *(*int32)(unsafe.Pointer(S)))
  8974  		out32 = func() int32 {
  8975  			if (12) == 1 {
  8976  				return (((out32) >> 1) + ((out32) & 1))
  8977  			}
  8978  			return ((((out32) >> ((12) - 1)) + 1) >> 1)
  8979  		}()
  8980  
  8981  		for d = 0; d < (order - 1); d++ {
  8982  			*(*int32)(unsafe.Pointer(S + uintptr(d)*4)) = (int32((uint32(*(*int32)(unsafe.Pointer(S + uintptr((d+1))*4)))) + (uint32((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B + uintptr(d)*2))))))))
  8983  		}
  8984  		*(*int32)(unsafe.Pointer(S + uintptr((order-1))*4)) = ((int32(int16(in16))) * (int32(*(*int16)(unsafe.Pointer(B + uintptr((order-1))*2)))))
  8985  
  8986  		/* Limit */
  8987  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  8988  			if (out32) > 0x7FFF {
  8989  				return int16(0x7FFF)
  8990  			}
  8991  			return func() int16 {
  8992  				if (out32) < (int32(libc.Int16FromInt32(0x8000))) {
  8993  					return libc.Int16FromInt32(0x8000)
  8994  				}
  8995  				return int16(out32)
  8996  			}()
  8997  		}()
  8998  	}
  8999  }
  9000  
  9001  func SKP_Silk_LPC_analysis_filter(tls *libc.TLS, in uintptr, B uintptr, S uintptr, out uintptr, len int32, Order int32) { /* SKP_Silk_MA.c:67:6: */
  9002  	var k int32
  9003  	var j int32
  9004  	var idx int32
  9005  	var Order_half int32 = ((Order) >> (1))
  9006  	var out32_Q12 int32
  9007  	var out32 int32
  9008  	var SA int16
  9009  	var SB int16
  9010  	/* Order must be even */
  9011  
  9012  	/* S[] values are in Q0 */
  9013  	for k = 0; k < len; k++ {
  9014  		SA = *(*int16)(unsafe.Pointer(S))
  9015  		out32_Q12 = 0
  9016  		for j = 0; j < (Order_half - 1); j++ {
  9017  			idx = (((int32(int16(2))) * (int32(int16(j)))) + 1)
  9018  			/* Multiply-add two prediction coefficients for each loop */
  9019  			SB = *(*int16)(unsafe.Pointer(S + uintptr(idx)*2))
  9020  			*(*int16)(unsafe.Pointer(S + uintptr(idx)*2)) = SA
  9021  			out32_Q12 = ((out32_Q12) + ((int32(SA)) * (int32(*(*int16)(unsafe.Pointer(B + uintptr((idx-1))*2))))))
  9022  			out32_Q12 = ((out32_Q12) + ((int32(SB)) * (int32(*(*int16)(unsafe.Pointer(B + uintptr(idx)*2))))))
  9023  			SA = *(*int16)(unsafe.Pointer(S + uintptr((idx+1))*2))
  9024  			*(*int16)(unsafe.Pointer(S + uintptr((idx+1))*2)) = SB
  9025  		}
  9026  
  9027  		/* Unrolled loop: epilog */
  9028  		SB = *(*int16)(unsafe.Pointer(S + uintptr((Order-1))*2))
  9029  		*(*int16)(unsafe.Pointer(S + uintptr((Order-1))*2)) = SA
  9030  		out32_Q12 = ((out32_Q12) + ((int32(SA)) * (int32(*(*int16)(unsafe.Pointer(B + uintptr((Order-2))*2))))))
  9031  		out32_Q12 = ((out32_Q12) + ((int32(SB)) * (int32(*(*int16)(unsafe.Pointer(B + uintptr((Order-1))*2))))))
  9032  
  9033  		/* Subtract prediction */
  9034  		out32_Q12 = func() int32 {
  9035  			if ((uint32(((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12)) - (out32_Q12))) & 0x80000000) == uint32(0) {
  9036  				return func() int32 {
  9037  					if (((uint32((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12))) & ((uint32(out32_Q12)) ^ 0x80000000)) & 0x80000000) != 0 {
  9038  						return libc.Int32FromUint32(0x80000000)
  9039  					}
  9040  					return (((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12)) - (out32_Q12))
  9041  				}()
  9042  			}
  9043  			return func() int32 {
  9044  				if ((((uint32((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12))) ^ 0x80000000) & (uint32(out32_Q12))) & 0x80000000) != 0 {
  9045  					return 0x7FFFFFFF
  9046  				}
  9047  				return (((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (12)) - (out32_Q12))
  9048  			}()
  9049  		}()
  9050  
  9051  		/* Scale to Q0 */
  9052  		out32 = func() int32 {
  9053  			if (12) == 1 {
  9054  				return (((out32_Q12) >> 1) + ((out32_Q12) & 1))
  9055  			}
  9056  			return ((((out32_Q12) >> ((12) - 1)) + 1) >> 1)
  9057  		}()
  9058  
  9059  		/* Saturate output */
  9060  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
  9061  			if (out32) > 0x7FFF {
  9062  				return int16(0x7FFF)
  9063  			}
  9064  			return func() int16 {
  9065  				if (out32) < (int32(libc.Int16FromInt32(0x8000))) {
  9066  					return libc.Int16FromInt32(0x8000)
  9067  				}
  9068  				return int16(out32)
  9069  			}()
  9070  		}()
  9071  
  9072  		/* Move input line */
  9073  		*(*int16)(unsafe.Pointer(S)) = *(*int16)(unsafe.Pointer(in + uintptr(k)*2))
  9074  	}
  9075  }
  9076  
  9077  /* helper function for NLSF2A(..) */
  9078  func SKP_Silk_NLSF2A_find_poly(tls *libc.TLS, out uintptr, cLSF uintptr, dd int32) { /* SKP_Silk_NLSF2A.c:37:17: */
  9079  	var k int32
  9080  	var n int32
  9081  	var ftmp int32
  9082  
  9083  	*(*int32)(unsafe.Pointer(out)) = (int32((1)) << (20))
  9084  	*(*int32)(unsafe.Pointer(out + 1*4)) = -*(*int32)(unsafe.Pointer(cLSF))
  9085  	for k = 1; k < dd; k++ {
  9086  		ftmp = *(*int32)(unsafe.Pointer(cLSF + uintptr((2*k))*4)) // Q20
  9087  		*(*int32)(unsafe.Pointer(out + uintptr((k+1))*4)) = (((*(*int32)(unsafe.Pointer(out + uintptr((k-1))*4))) << (1)) - (func() int32 {
  9088  			if (20) == 1 {
  9089  				return (int32((((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr(k)*4))))) >> 1) + (((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr(k)*4))))) & int64(1))))
  9090  			}
  9091  			return (int32(((((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr(k)*4))))) >> ((20) - 1)) + int64(1)) >> 1))
  9092  		}()))
  9093  		for n = k; n > 1; n-- {
  9094  			*(*int32)(unsafe.Pointer(out + uintptr(n)*4)) += (*(*int32)(unsafe.Pointer(out + uintptr((n-2))*4)) - (func() int32 {
  9095  				if (20) == 1 {
  9096  					return (int32((((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr((n-1))*4))))) >> 1) + (((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr((n-1))*4))))) & int64(1))))
  9097  				}
  9098  				return (int32(((((int64_t(ftmp)) * (int64_t(*(*int32)(unsafe.Pointer(out + uintptr((n-1))*4))))) >> ((20) - 1)) + int64(1)) >> 1))
  9099  			}()))
  9100  		}
  9101  		*(*int32)(unsafe.Pointer(out + 1*4)) -= (ftmp)
  9102  	}
  9103  }
  9104  
  9105  /* compute whitening filter coefficients from normalized line spectral frequencies */
  9106  func SKP_Silk_NLSF2A(tls *libc.TLS, a uintptr, NLSF uintptr, d int32) { /* SKP_Silk_NLSF2A.c:59:6: */
  9107  	bp := tls.Alloc(200)
  9108  	defer tls.Free(200)
  9109  
  9110  	var k int32
  9111  	var i int32
  9112  	var dd int32
  9113  	// var cos_LSF_Q20 [16]int32 at bp, 64
  9114  
  9115  	// var P [9]int32 at bp+64, 36
  9116  
  9117  	// var Q [9]int32 at bp+100, 36
  9118  
  9119  	var Ptmp int32
  9120  	var Qtmp int32
  9121  	var f_int int32
  9122  	var f_frac int32
  9123  	var cos_val int32
  9124  	var delta int32
  9125  	// var a_int32 [16]int32 at bp+136, 64
  9126  
  9127  	var maxabs int32
  9128  	var absval int32
  9129  	var idx int32 = 0
  9130  	var sc_Q16 int32
  9131  
  9132  	/* convert LSFs to 2*cos(LSF(i)), using piecewise linear curve from table */
  9133  	for k = 0; k < d; k++ {
  9134  
  9135  		/* f_int on a scale 0-127 (rounded down) */
  9136  		f_int = ((*(*int32)(unsafe.Pointer(NLSF + uintptr(k)*4))) >> (15 - 7))
  9137  
  9138  		/* f_frac, range: 0..255 */
  9139  		f_frac = (*(*int32)(unsafe.Pointer(NLSF + uintptr(k)*4)) - ((f_int) << (15 - 7)))
  9140  
  9141  		/* Read start and end value from table */
  9142  		cos_val = SKP_Silk_LSFCosTab_FIX_Q12[f_int]               /* Q12 */
  9143  		delta = (SKP_Silk_LSFCosTab_FIX_Q12[(f_int+1)] - cos_val) /* Q12, with a range of 0..200 */
  9144  
  9145  		/* Linear interpolation */
  9146  		*(*int32)(unsafe.Pointer(bp /* &cos_LSF_Q20[0] */ + uintptr(k)*4)) = (((cos_val) << (8)) + ((delta) * (f_frac))) /* Q20 */
  9147  	}
  9148  
  9149  	dd = ((d) >> (1))
  9150  
  9151  	/* generate even and odd polynomials using convolution */
  9152  	SKP_Silk_NLSF2A_find_poly(tls, bp+64 /* &P[0] */, (bp /* &cos_LSF_Q20 */), dd)
  9153  	SKP_Silk_NLSF2A_find_poly(tls, bp+100 /* &Q[0] */, (bp /* &cos_LSF_Q20 */ + 1*4), dd)
  9154  
  9155  	/* convert even and odd polynomials to SKP_int32 Q12 filter coefs */
  9156  	for k = 0; k < dd; k++ {
  9157  		Ptmp = (*(*int32)(unsafe.Pointer(bp + 64 /* &P[0] */ + uintptr((k+1))*4)) + *(*int32)(unsafe.Pointer(bp + 64 /* &P[0] */ + uintptr(k)*4)))
  9158  		Qtmp = (*(*int32)(unsafe.Pointer(bp + 100 /* &Q[0] */ + uintptr((k+1))*4)) - *(*int32)(unsafe.Pointer(bp + 100 /* &Q[0] */ + uintptr(k)*4)))
  9159  
  9160  		/* the Ptmp and Qtmp values at this stage need to fit in int32 */
  9161  
  9162  		*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4)) = -func() int32 {
  9163  			if (9) == 1 {
  9164  				return (((Ptmp + Qtmp) >> 1) + ((Ptmp + Qtmp) & 1))
  9165  			}
  9166  			return ((((Ptmp + Qtmp) >> ((9) - 1)) + 1) >> 1)
  9167  		}() /* Q20 -> Q12 */
  9168  		*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(((d-k)-1))*4)) = func() int32 {
  9169  			if (9) == 1 {
  9170  				return (((Qtmp - Ptmp) >> 1) + ((Qtmp - Ptmp) & 1))
  9171  			}
  9172  			return ((((Qtmp - Ptmp) >> ((9) - 1)) + 1) >> 1)
  9173  		}() /* Q20 -> Q12 */
  9174  	}
  9175  
  9176  	/* Limit the maximum absolute value of the prediction coefficients */
  9177  	for i = 0; i < 10; i++ {
  9178  		/* Find maximum absolute value and its index */
  9179  		maxabs = 0
  9180  		for k = 0; k < d; k++ {
  9181  			absval = func() int32 {
  9182  				if (*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))) > 0 {
  9183  					return *(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))
  9184  				}
  9185  				return -*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))
  9186  			}()
  9187  			if absval > maxabs {
  9188  				maxabs = absval
  9189  				idx = k
  9190  			}
  9191  		}
  9192  
  9193  		if maxabs > 0x7FFF {
  9194  			/* Reduce magnitude of prediction coefficients */
  9195  			maxabs = func() int32 {
  9196  				if (maxabs) < (98369) {
  9197  					return maxabs
  9198  				}
  9199  				return 98369
  9200  			}() // ( SKP_int32_MAX / ( 65470 >> 2 ) ) + SKP_int16_MAX = 98369
  9201  			sc_Q16 = (65470 - (((int32(65470) >> 2) * (maxabs - 0x7FFF)) / (((maxabs) * (idx + 1)) >> (2))))
  9202  			SKP_Silk_bwexpander_32(tls, bp+136 /* &a_int32[0] */, d, sc_Q16)
  9203  		} else {
  9204  			break
  9205  		}
  9206  	}
  9207  
  9208  	/* Reached the last iteration */
  9209  	if i == 10 {
  9210  
  9211  		for k = 0; k < d; k++ {
  9212  			*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4)) = func() int32 {
  9213  				if (*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))) > 0x7FFF {
  9214  					return 0x7FFF
  9215  				}
  9216  				return func() int32 {
  9217  					if (*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))) < (int32(libc.Int16FromInt32(0x8000))) {
  9218  						return int32(libc.Int16FromInt32(0x8000))
  9219  					}
  9220  					return *(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4))
  9221  				}()
  9222  			}()
  9223  		}
  9224  	}
  9225  
  9226  	/* Return as SKP_int16 Q12 coefficients */
  9227  	for k = 0; k < d; k++ {
  9228  		*(*int16)(unsafe.Pointer(a + uintptr(k)*2)) = int16(*(*int32)(unsafe.Pointer(bp + 136 /* &a_int32[0] */ + uintptr(k)*4)))
  9229  	}
  9230  }
  9231  
  9232  /* Convert NLSF parameters to stable AR prediction filter coefficients */
  9233  func SKP_Silk_NLSF2A_stable(tls *libc.TLS, pAR_Q12 uintptr, pNLSF uintptr, LPC_order int32) { /* SKP_Silk_NLSF2A_stable.c:31:6: */
  9234  	bp := tls.Alloc(4)
  9235  	defer tls.Free(4)
  9236  
  9237  	var i int32
  9238  	// var invGain_Q30 int32 at bp, 4
  9239  
  9240  	SKP_Silk_NLSF2A(tls, pAR_Q12, pNLSF, LPC_order)
  9241  
  9242  	/* Ensure stable LPCs */
  9243  	for i = 0; i < 20; i++ {
  9244  		if SKP_Silk_LPC_inverse_pred_gain(tls, bp /* &invGain_Q30 */, pAR_Q12, LPC_order) == 1 {
  9245  			SKP_Silk_bwexpander(tls, pAR_Q12, LPC_order, (65536 - ((int32((int16(10 + i)))) * (int32(int16(i)))))) /* 10_Q16 = 0.00015 */
  9246  		} else {
  9247  			break
  9248  		}
  9249  	}
  9250  
  9251  	/* Reached the last iteration */
  9252  	if i == 20 {
  9253  
  9254  		for i = 0; i < LPC_order; i++ {
  9255  			*(*int16)(unsafe.Pointer(pAR_Q12 + uintptr(i)*2)) = int16(0)
  9256  		}
  9257  	}
  9258  }
  9259  
  9260  /* NLSF vector decoder */
  9261  func SKP_Silk_NLSF_MSVQ_decode(tls *libc.TLS, pNLSF_Q15 uintptr, psNLSF_CB uintptr, NLSFIndices uintptr, LPC_order int32) { /* SKP_Silk_NLSF_MSVQ_decode.c:31:6: */
  9262  	var pCB_element uintptr
  9263  	var s int32
  9264  	var i int32
  9265  
  9266  	/* Check that each index is within valid range */
  9267  
  9268  	/* Point to the first vector element */
  9269  	pCB_element = ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FCBStages)).FCB_NLSF_Q15 + uintptr(((*(*int32)(unsafe.Pointer(NLSFIndices)))*(LPC_order)))*2)
  9270  
  9271  	/* Initialize with the codebook vector from stage 0 */
  9272  	for i = 0; i < LPC_order; i++ {
  9273  		*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(i)*4)) = int32(*(*int16)(unsafe.Pointer(pCB_element + uintptr(i)*2)))
  9274  	}
  9275  
  9276  	for s = 1; s < (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages; s++ {
  9277  		/* Check that each index is within valid range */
  9278  
  9279  		if LPC_order == 16 {
  9280  			/* Point to the first vector element */
  9281  			pCB_element = ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FCBStages+uintptr(s)*12)).FCB_NLSF_Q15 + uintptr(((*(*int32)(unsafe.Pointer(NLSFIndices + uintptr(s)*4)))<<(4)))*2)
  9282  
  9283  			/* Add the codebook vector from the current stage */
  9284  			*(*int32)(unsafe.Pointer(pNLSF_Q15)) += (int32(*(*int16)(unsafe.Pointer(pCB_element))))
  9285  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 1*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 1*2))))
  9286  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 2*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 2*2))))
  9287  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 3*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 3*2))))
  9288  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 4*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 4*2))))
  9289  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 5*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 5*2))))
  9290  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 6*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 6*2))))
  9291  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 7*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 7*2))))
  9292  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 8*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 8*2))))
  9293  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 9*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 9*2))))
  9294  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 10*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 10*2))))
  9295  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 11*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 11*2))))
  9296  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 12*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 12*2))))
  9297  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 13*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 13*2))))
  9298  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 14*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 14*2))))
  9299  			*(*int32)(unsafe.Pointer(pNLSF_Q15 + 15*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + 15*2))))
  9300  		} else {
  9301  			/* Point to the first vector element */
  9302  			pCB_element = ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FCBStages+uintptr(s)*12)).FCB_NLSF_Q15 + uintptr(((int32(int16(*(*int32)(unsafe.Pointer(NLSFIndices + uintptr(s)*4)))))*(int32(int16(LPC_order)))))*2)
  9303  
  9304  			/* Add the codebook vector from the current stage */
  9305  			for i = 0; i < LPC_order; i++ {
  9306  				*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(i)*4)) += (int32(*(*int16)(unsafe.Pointer(pCB_element + uintptr(i)*2))))
  9307  			}
  9308  		}
  9309  	}
  9310  
  9311  	/* NLSF stabilization */
  9312  	SKP_Silk_NLSF_stabilize(tls, pNLSF_Q15, (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FNDeltaMin_Q15, LPC_order)
  9313  }
  9314  
  9315  /***********************/
  9316  /* NLSF vector encoder */
  9317  /***********************/
  9318  func SKP_Silk_NLSF_MSVQ_encode_FIX(tls *libc.TLS, NLSFIndices uintptr, pNLSF_Q15 uintptr, psNLSF_CB uintptr, pNLSF_q_Q15_prev uintptr, pW_Q6 uintptr, NLSF_mu_Q15 int32, NLSF_mu_fluc_red_Q16 int32, NLSF_MSVQ_Survivors int32, LPC_order int32, deactivate_fluc_red int32) { /* SKP_Silk_NLSF_MSVQ_encode_FIX.c:33:6: */
  9319  	bp := tls.Alloc(4544)
  9320  	defer tls.Free(4544)
  9321  
  9322  	var i int32
  9323  	var s int32
  9324  	var k int32
  9325  	var cur_survivors int32 = 0
  9326  	var prev_survivors int32
  9327  	var min_survivors int32
  9328  	var input_index int32
  9329  	var cb_index int32
  9330  	var bestIndex int32
  9331  	var rateDistThreshold_Q18 int32
  9332  	var se_Q15 int32
  9333  	var wsse_Q20 int32
  9334  	var bestRateDist_Q20 int32
  9335  	// var pRateDist_Q18 [256]int32 at bp+1088, 1024
  9336  
  9337  	// var pRate_Q5 [16]int32 at bp, 64
  9338  
  9339  	// var pRate_new_Q5 [16]int32 at bp+3200, 64
  9340  
  9341  	// var pTempIndices [16]int32 at bp+2112, 64
  9342  
  9343  	// var pPath [160]int32 at bp+3264, 640
  9344  
  9345  	// var pPath_new [160]int32 at bp+3904, 640
  9346  
  9347  	// var pRes_Q15 [256]int32 at bp+64, 1024
  9348  
  9349  	// var pRes_new_Q15 [256]int32 at bp+2176, 1024
  9350  
  9351  	var pConstInt uintptr
  9352  	var pInt uintptr
  9353  	var pCB_element uintptr
  9354  	var pCurrentCBStage uintptr
  9355  
  9356  	/****************************************************/
  9357  	/* Tree search for the multi-stage vector quantizer */
  9358  	/****************************************************/
  9359  
  9360  	/* Clear accumulated rates */
  9361  	libc.Xmemset(tls, bp /* &pRate_Q5[0] */, 0, (uint32(NLSF_MSVQ_Survivors) * uint32(unsafe.Sizeof(int32(0)))))
  9362  
  9363  	/* Copy NLSFs into residual signal vector */
  9364  	for i = 0; i < LPC_order; i++ {
  9365  		*(*int32)(unsafe.Pointer(bp + 64 /* &pRes_Q15[0] */ + uintptr(i)*4)) = *(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(i)*4))
  9366  	}
  9367  
  9368  	/* Set first stage values */
  9369  	prev_survivors = 1
  9370  
  9371  	/* Minimum number of survivors */
  9372  	min_survivors = (NLSF_MSVQ_Survivors / 2)
  9373  
  9374  	/* Loop over all stages */
  9375  	for s = 0; s < (*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages; s++ {
  9376  
  9377  		/* Set a pointer to the current stage codebook */
  9378  		pCurrentCBStage = ((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FCBStages + uintptr(s)*12)
  9379  
  9380  		/* Calculate the number of survivors in the current stage */
  9381  		cur_survivors = SKP_min_32(tls, NLSF_MSVQ_Survivors, ((int32(int16(prev_survivors))) * (int32(int16((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors)))))
  9382  
  9383  		/* Nearest neighbor clustering for multiple input data vectors */
  9384  		SKP_Silk_NLSF_VQ_rate_distortion_FIX(tls, bp+1088 /* &pRateDist_Q18[0] */, pCurrentCBStage, bp+64 /* &pRes_Q15[0] */, pW_Q6,
  9385  			bp /* &pRate_Q5[0] */, NLSF_mu_Q15, prev_survivors, LPC_order)
  9386  
  9387  		/* Sort the rate-distortion errors */
  9388  		SKP_Silk_insertion_sort_increasing(tls, bp+1088 /* &pRateDist_Q18[0] */, bp+2112, /* &pTempIndices[0] */
  9389  			(prev_survivors * (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors), cur_survivors)
  9390  
  9391  		/* Discard survivors with rate-distortion values too far above the best one */
  9392  		if *(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */)) < (0x7FFFFFFF / 16) {
  9393  			rateDistThreshold_Q18 = ((*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */))) + (((((NLSF_MSVQ_Survivors) * (*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */)))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + (((((NLSF_MSVQ_Survivors) * (*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */)))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16)))
  9394  			for (*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */ + uintptr((cur_survivors-1))*4)) > rateDistThreshold_Q18) && (cur_survivors > min_survivors) {
  9395  				cur_survivors--
  9396  			}
  9397  		}
  9398  		/* Update accumulated codebook contributions for the 'cur_survivors' best codebook indices */
  9399  		for k = 0; k < cur_survivors; k++ {
  9400  			if s > 0 {
  9401  				/* Find the indices of the input and the codebook vector */
  9402  				if (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors == 8 {
  9403  					input_index = ((*(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4))) >> (3))
  9404  					cb_index = (*(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4)) & 7)
  9405  				} else {
  9406  					input_index = ((*(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4))) / ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors))
  9407  					cb_index = (*(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4)) - ((int32(int16(input_index))) * (int32(int16((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FnVectors)))))
  9408  				}
  9409  			} else {
  9410  				/* Find the indices of the input and the codebook vector */
  9411  				input_index = 0
  9412  				cb_index = *(*int32)(unsafe.Pointer(bp + 2112 /* &pTempIndices[0] */ + uintptr(k)*4))
  9413  			}
  9414  
  9415  			/* Subtract new contribution from the previous residual vector for each of 'cur_survivors' */
  9416  			pConstInt = (bp + 64 /* &pRes_Q15 */ + uintptr(((int32(int16(input_index)))*(int32(int16(LPC_order)))))*4)
  9417  			pCB_element = ((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FCB_NLSF_Q15 + uintptr(((int32(int16(cb_index)))*(int32(int16(LPC_order)))))*2)
  9418  			pInt = (bp + 2176 /* &pRes_new_Q15 */ + uintptr(((int32(int16(k)))*(int32(int16(LPC_order)))))*4)
  9419  			for i = 0; i < LPC_order; i++ {
  9420  				*(*int32)(unsafe.Pointer(pInt + uintptr(i)*4)) = (*(*int32)(unsafe.Pointer(pConstInt + uintptr(i)*4)) - int32(*(*int16)(unsafe.Pointer(pCB_element + uintptr(i)*2))))
  9421  			}
  9422  
  9423  			/* Update accumulated rate for stage 1 to the current */
  9424  			*(*int32)(unsafe.Pointer(bp + 3200 /* &pRate_new_Q5[0] */ + uintptr(k)*4)) = (*(*int32)(unsafe.Pointer(bp /* &pRate_Q5[0] */ + uintptr(input_index)*4)) + int32(*(*int16)(unsafe.Pointer((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(pCurrentCBStage)).FRates_Q5 + uintptr(cb_index)*2))))
  9425  
  9426  			/* Copy paths from previous matrix, starting with the best path */
  9427  			pConstInt = (bp + 3264 /* &pPath */ + uintptr(((int32(int16(input_index)))*(int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)))))*4)
  9428  			pInt = (bp + 3904 /* &pPath_new */ + uintptr(((int32(int16(k)))*(int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)))))*4)
  9429  			for i = 0; i < s; i++ {
  9430  				*(*int32)(unsafe.Pointer(pInt + uintptr(i)*4)) = *(*int32)(unsafe.Pointer(pConstInt + uintptr(i)*4))
  9431  			}
  9432  			/* Write the current stage indices for the 'cur_survivors' to the best path matrix */
  9433  			*(*int32)(unsafe.Pointer(pInt + uintptr(s)*4)) = cb_index
  9434  		}
  9435  
  9436  		if s < ((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages - 1) {
  9437  			/* Copy NLSF residual matrix for next stage */
  9438  			libc.Xmemcpy(tls, bp+64 /* &pRes_Q15[0] */, bp+2176 /* &pRes_new_Q15[0] */, ((uint32((int32(int16(cur_survivors))) * (int32(int16(LPC_order))))) * uint32(unsafe.Sizeof(int32(0)))))
  9439  
  9440  			/* Copy rate vector for next stage */
  9441  			libc.Xmemcpy(tls, bp /* &pRate_Q5[0] */, bp+3200 /* &pRate_new_Q5[0] */, (uint32(cur_survivors) * uint32(unsafe.Sizeof(int32(0)))))
  9442  
  9443  			/* Copy best path matrix for next stage */
  9444  			libc.Xmemcpy(tls, bp+3264 /* &pPath[0] */, bp+3904 /* &pPath_new[0] */, ((uint32((int32(int16(cur_survivors))) * (int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages))))) * uint32(unsafe.Sizeof(int32(0)))))
  9445  		}
  9446  
  9447  		prev_survivors = cur_survivors
  9448  	}
  9449  
  9450  	/* (Preliminary) index of the best survivor, later to be decoded */
  9451  	bestIndex = 0
  9452  
  9453  	/******************************/
  9454  	/* NLSF fluctuation reduction */
  9455  	/******************************/
  9456  	if deactivate_fluc_red != 1 {
  9457  
  9458  		/* Search among all survivors, now taking also weighted fluctuation errors into account */
  9459  		bestRateDist_Q20 = 0x7FFFFFFF
  9460  		for s = 0; s < cur_survivors; s++ {
  9461  			/* Decode survivor to compare with previous quantized NLSF vector */
  9462  			SKP_Silk_NLSF_MSVQ_decode(tls, pNLSF_Q15, psNLSF_CB, (bp + 3904 /* &pPath_new */ + uintptr(((int32(int16(s)))*(int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)))))*4), LPC_order)
  9463  
  9464  			/* Compare decoded NLSF vector with the previously quantized vector */
  9465  			wsse_Q20 = 0
  9466  			for i = 0; i < LPC_order; i = i + (2) {
  9467  				/* Compute weighted squared quantization error for index i */
  9468  				se_Q15 = (*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(i)*4)) - *(*int32)(unsafe.Pointer(pNLSF_q_Q15_prev + uintptr(i)*4))) // range: [ -32767 : 32767 ]
  9469  				wsse_Q20 = ((wsse_Q20) + (((((int32(int16(se_Q15))) * (int32(int16(se_Q15)))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(pW_Q6 + uintptr(i)*4)))))) + (((((int32(int16(se_Q15))) * (int32(int16(se_Q15)))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(pW_Q6 + uintptr(i)*4)))))) >> 16)))
  9470  
  9471  				/* Compute weighted squared quantization error for index i + 1 */
  9472  				se_Q15 = (*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((i+1))*4)) - *(*int32)(unsafe.Pointer(pNLSF_q_Q15_prev + uintptr((i+1))*4))) // range: [ -32767 : 32767 ]
  9473  				wsse_Q20 = ((wsse_Q20) + (((((int32(int16(se_Q15))) * (int32(int16(se_Q15)))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(pW_Q6 + uintptr((i+1))*4)))))) + (((((int32(int16(se_Q15))) * (int32(int16(se_Q15)))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(pW_Q6 + uintptr((i+1))*4)))))) >> 16)))
  9474  			}
  9475  
  9476  			/* Add the fluctuation reduction penalty to the rate distortion error */
  9477  			wsse_Q20 = func() int32 {
  9478  				if ((uint32((*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */ + uintptr(s)*4))) + ((((wsse_Q20) >> 16) * (int32(int16(NLSF_mu_fluc_red_Q16)))) + ((((wsse_Q20) & 0x0000FFFF) * (int32(int16(NLSF_mu_fluc_red_Q16)))) >> 16)))) & 0x80000000) != 0 {
  9479  					return 0x7FFFFFFF
  9480  				}
  9481  				return ((*(*int32)(unsafe.Pointer(bp + 1088 /* &pRateDist_Q18[0] */ + uintptr(s)*4))) + ((((wsse_Q20) >> 16) * (int32(int16(NLSF_mu_fluc_red_Q16)))) + ((((wsse_Q20) & 0x0000FFFF) * (int32(int16(NLSF_mu_fluc_red_Q16)))) >> 16)))
  9482  			}()
  9483  
  9484  			/* Keep index of best survivor */
  9485  			if wsse_Q20 < bestRateDist_Q20 {
  9486  				bestRateDist_Q20 = wsse_Q20
  9487  				bestIndex = s
  9488  			}
  9489  		}
  9490  	}
  9491  
  9492  	/* Copy best path to output argument */
  9493  	libc.Xmemcpy(tls, NLSFIndices, (bp + 3904 /* &pPath_new */ + uintptr(((int32(int16(bestIndex)))*(int32(int16((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages)))))*4), (uint32((*SKP_Silk_NLSF_CB_struct)(unsafe.Pointer(psNLSF_CB)).FnStages) * uint32(unsafe.Sizeof(int32(0)))))
  9494  
  9495  	/* Decode and stabilize the best survivor */
  9496  	SKP_Silk_NLSF_MSVQ_decode(tls, pNLSF_Q15, psNLSF_CB, NLSFIndices, LPC_order)
  9497  
  9498  }
  9499  
  9500  /* Constant Definitions */
  9501  
  9502  /* NLSF stabilizer, for a single input data vector */
  9503  func SKP_Silk_NLSF_stabilize(tls *libc.TLS, NLSF_Q15 uintptr, NDeltaMin_Q15 uintptr, L int32) { /* SKP_Silk_NLSF_stabilize.c:42:6: */
  9504  	var center_freq_Q15 int32
  9505  	var diff_Q15 int32
  9506  	var min_center_Q15 int32
  9507  	var max_center_Q15 int32
  9508  	var min_diff_Q15 int32
  9509  	var loops int32
  9510  	var i int32
  9511  	var I int32 = 0
  9512  	var k int32
  9513  
  9514  	/* This is necessary to ensure an output within range of a SKP_int16 */
  9515  
  9516  	for loops = 0; loops < 20; loops++ {
  9517  		/**************************/
  9518  		/* Find smallest distance */
  9519  		/**************************/
  9520  		/* First element */
  9521  		min_diff_Q15 = (*(*int32)(unsafe.Pointer(NLSF_Q15)) - *(*int32)(unsafe.Pointer(NDeltaMin_Q15)))
  9522  		I = 0
  9523  		/* Middle elements */
  9524  		for i = 1; i <= (L - 1); i++ {
  9525  			diff_Q15 = (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)) - (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((i-1))*4)) + *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(i)*4))))
  9526  			if diff_Q15 < min_diff_Q15 {
  9527  				min_diff_Q15 = diff_Q15
  9528  				I = i
  9529  			}
  9530  		}
  9531  		/* Last element */
  9532  		diff_Q15 = ((int32(1) << 15) - (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((L-1))*4)) + *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(L)*4))))
  9533  		if diff_Q15 < min_diff_Q15 {
  9534  			min_diff_Q15 = diff_Q15
  9535  			I = L
  9536  		}
  9537  
  9538  		/***************************************************/
  9539  		/* Now check if the smallest distance non-negative */
  9540  		/***************************************************/
  9541  		if min_diff_Q15 >= 0 {
  9542  			return
  9543  		}
  9544  
  9545  		if I == 0 {
  9546  			/* Move away from lower limit */
  9547  			*(*int32)(unsafe.Pointer(NLSF_Q15)) = *(*int32)(unsafe.Pointer(NDeltaMin_Q15))
  9548  
  9549  		} else if I == L {
  9550  			/* Move away from higher limit */
  9551  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((L-1))*4)) = ((int32(1) << 15) - *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(L)*4)))
  9552  
  9553  		} else {
  9554  			/* Find the lower extreme for the location of the current center frequency */
  9555  			min_center_Q15 = 0
  9556  			for k = 0; k < I; k++ {
  9557  				min_center_Q15 = min_center_Q15 + (*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(k)*4)))
  9558  			}
  9559  			min_center_Q15 = min_center_Q15 + ((*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4))) >> (1))
  9560  
  9561  			/* Find the upper extreme for the location of the current center frequency */
  9562  			max_center_Q15 = (int32(1) << 15)
  9563  			for k = L; k > I; k-- {
  9564  				max_center_Q15 = max_center_Q15 - (*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(k)*4)))
  9565  			}
  9566  			max_center_Q15 = max_center_Q15 - (*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4)) - ((*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4))) >> (1)))
  9567  
  9568  			/* Move apart, sorted by value, keeping the same center frequency */
  9569  			center_freq_Q15 = func() int32 {
  9570  				if (min_center_Q15) > (max_center_Q15) {
  9571  					return func() int32 {
  9572  						if (func() int32 {
  9573  							if (1) == 1 {
  9574  								return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9575  							}
  9576  							return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9577  						}()) > (min_center_Q15) {
  9578  							return min_center_Q15
  9579  						}
  9580  						return func() int32 {
  9581  							if (func() int32 {
  9582  								if (1) == 1 {
  9583  									return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9584  								}
  9585  								return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9586  							}()) < (max_center_Q15) {
  9587  								return max_center_Q15
  9588  							}
  9589  							return func() int32 {
  9590  								if (1) == 1 {
  9591  									return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9592  								}
  9593  								return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9594  							}()
  9595  						}()
  9596  					}()
  9597  				}
  9598  				return func() int32 {
  9599  					if (func() int32 {
  9600  						if (1) == 1 {
  9601  							return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9602  						}
  9603  						return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9604  					}()) > (max_center_Q15) {
  9605  						return max_center_Q15
  9606  					}
  9607  					return func() int32 {
  9608  						if (func() int32 {
  9609  							if (1) == 1 {
  9610  								return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9611  							}
  9612  							return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9613  						}()) < (min_center_Q15) {
  9614  							return min_center_Q15
  9615  						}
  9616  						return func() int32 {
  9617  							if (1) == 1 {
  9618  								return (((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) & 1))
  9619  							}
  9620  							return ((((*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4))) >> ((1) - 1)) + 1) >> 1)
  9621  						}()
  9622  					}()
  9623  				}()
  9624  			}()
  9625  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) = (center_freq_Q15 - ((*(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4))) >> (1)))
  9626  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(I)*4)) = (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((I-1))*4)) + *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(I)*4)))
  9627  		}
  9628  	}
  9629  
  9630  	/* Safe and simple fall back method, which is less ideal than the above */
  9631  	if loops == 20 {
  9632  		/* Insertion sort (fast for already almost sorted arrays):   */
  9633  		/* Best case:  O(n)   for an already sorted array            */
  9634  		/* Worst case: O(n^2) for an inversely sorted array          */
  9635  		SKP_Silk_insertion_sort_increasing_all_values(tls, (NLSF_Q15), L)
  9636  
  9637  		/* First NLSF should be no less than NDeltaMin[0] */
  9638  		*(*int32)(unsafe.Pointer(NLSF_Q15)) = SKP_max_int(tls, *(*int32)(unsafe.Pointer(NLSF_Q15)), *(*int32)(unsafe.Pointer(NDeltaMin_Q15)))
  9639  
  9640  		/* Keep delta_min distance between the NLSFs */
  9641  		for i = 1; i < L; i++ {
  9642  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)) = SKP_max_int(tls, *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)), (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((i-1))*4)) + *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(i)*4))))
  9643  		}
  9644  
  9645  		/* Last NLSF should be no higher than 1 - NDeltaMin[L] */
  9646  		*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((L-1))*4)) = SKP_min_int(tls, *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((L-1))*4)), ((int32(1) << 15) - *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr(L)*4))))
  9647  
  9648  		/* Keep NDeltaMin distance between the NLSFs */
  9649  		for i = (L - 2); i >= 0; i-- {
  9650  			*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)) = SKP_min_int(tls, *(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr(i)*4)), (*(*int32)(unsafe.Pointer(NLSF_Q15 + uintptr((i+1))*4)) - *(*int32)(unsafe.Pointer(NDeltaMin_Q15 + uintptr((i+1))*4))))
  9651  		}
  9652  	}
  9653  }
  9654  
  9655  /* Rate-Distortion calculations for multiple input data vectors */
  9656  func SKP_Silk_NLSF_VQ_rate_distortion_FIX(tls *libc.TLS, pRD_Q20 uintptr, psNLSF_CBS uintptr, in_Q15 uintptr, w_Q6 uintptr, rate_acc_Q5 uintptr, mu_Q15 int32, N int32, LPC_order int32) { /* SKP_Silk_NLSF_VQ_rate_distortion_FIX.c:31:6: */
  9657  	var i int32
  9658  	var n int32
  9659  	var pRD_vec_Q20 uintptr
  9660  
  9661  	/* Compute weighted quantization errors for all input vectors over one codebook stage */
  9662  	SKP_Silk_NLSF_VQ_sum_error_FIX(tls, pRD_Q20, in_Q15, w_Q6, (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FCB_NLSF_Q15,
  9663  		N, (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FnVectors, LPC_order)
  9664  
  9665  	/* Loop over input vectors */
  9666  	pRD_vec_Q20 = pRD_Q20
  9667  	for n = 0; n < N; n++ {
  9668  		/* Add rate cost to error for each codebook vector */
  9669  		for i = 0; i < (*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FnVectors; i++ {
  9670  
  9671  			*(*int32)(unsafe.Pointer(pRD_vec_Q20 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(pRD_vec_Q20 + uintptr(i)*4))) + ((int32((int16(*(*int32)(unsafe.Pointer(rate_acc_Q5 + uintptr(n)*4)) + int32(*(*int16)(unsafe.Pointer((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FRates_Q5 + uintptr(i)*2))))))) * (int32(int16(mu_Q15)))))
  9672  
  9673  		}
  9674  		pRD_vec_Q20 += 4 * (uintptr((*SKP_Silk_NLSF_CBS)(unsafe.Pointer(psNLSF_CBS)).FnVectors))
  9675  	}
  9676  }
  9677  
  9678  /* Compute weighted quantization errors for an LPC_order element input vector, over one codebook stage */
  9679  func SKP_Silk_NLSF_VQ_sum_error_FIX(tls *libc.TLS, err_Q20 uintptr, in_Q15 uintptr, w_Q6 uintptr, pCB_Q15 uintptr, N int32, K int32, LPC_order int32) { /* SKP_Silk_NLSF_VQ_sum_error_FIX.c:32:6: */
  9680  	bp := tls.Alloc(32)
  9681  	defer tls.Free(32)
  9682  
  9683  	var i int32
  9684  	var n int32
  9685  	var m int32
  9686  	var diff_Q15 int32
  9687  	var sum_error int32
  9688  	var Wtmp_Q6 int32
  9689  	// var Wcpy_Q6 [8]int32 at bp, 32
  9690  
  9691  	var cb_vec_Q15 uintptr
  9692  
  9693  	/* Copy to local stack and pack two weights per int32 */
  9694  	for m = 0; m < ((LPC_order) >> (1)); m++ {
  9695  		*(*int32)(unsafe.Pointer(bp /* &Wcpy_Q6[0] */ + uintptr(m)*4)) = (*(*int32)(unsafe.Pointer(w_Q6 + uintptr((2*m))*4)) | ((*(*int32)(unsafe.Pointer(w_Q6 + uintptr(((2*m)+1))*4))) << (16)))
  9696  	}
  9697  
  9698  	/* Loop over input vectors */
  9699  	for n = 0; n < N; n++ {
  9700  		/* Loop over codebook */
  9701  		cb_vec_Q15 = pCB_Q15
  9702  		for i = 0; i < K; i++ {
  9703  			sum_error = 0
  9704  			for m = 0; m < LPC_order; m = m + (2) {
  9705  				/* Get two weights packed in an int32 */
  9706  				Wtmp_Q6 = *(*int32)(unsafe.Pointer(bp /* &Wcpy_Q6[0] */ + uintptr(((m)>>(1)))*4))
  9707  
  9708  				/* Compute weighted squared quantization error for index m */
  9709  				diff_Q15 = (*(*int32)(unsafe.Pointer(in_Q15 + uintptr(m)*4)) - int32(*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&cb_vec_Q15, 2))))) // range: [ -32767 : 32767 ]
  9710  				sum_error = ((sum_error) + (((((int32(int16(diff_Q15))) * (int32(int16(diff_Q15)))) >> 16) * (int32(int16(Wtmp_Q6)))) + (((((int32(int16(diff_Q15))) * (int32(int16(diff_Q15)))) & 0x0000FFFF) * (int32(int16(Wtmp_Q6)))) >> 16)))
  9711  
  9712  				/* Compute weighted squared quantization error for index m + 1 */
  9713  				diff_Q15 = (*(*int32)(unsafe.Pointer(in_Q15 + uintptr((m+1))*4)) - int32(*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&cb_vec_Q15, 2))))) // range: [ -32767 : 32767 ]
  9714  				sum_error = (((sum_error) + ((((int32(int16(diff_Q15))) * (int32(int16(diff_Q15)))) >> 16) * ((Wtmp_Q6) >> 16))) + (((((int32(int16(diff_Q15))) * (int32(int16(diff_Q15)))) & 0x0000FFFF) * ((Wtmp_Q6) >> 16)) >> 16))
  9715  			}
  9716  
  9717  			*(*int32)(unsafe.Pointer(err_Q20 + uintptr(i)*4)) = sum_error
  9718  		}
  9719  		err_Q20 += 4 * (uintptr(K))
  9720  		in_Q15 += 4 * (uintptr(LPC_order))
  9721  	}
  9722  }
  9723  
  9724  /*
  9725  R. Laroia, N. Phamdo and N. Farvardin, "Robust and Efficient Quantization of Speech LSP
  9726  Parameters Using Structured Vector Quantization", Proc. IEEE Int. Conf. Acoust., Speech,
  9727  Signal Processing, pp. 641-644, 1991.
  9728  */
  9729  
  9730  /* Laroia low complexity NLSF weights */
  9731  func SKP_Silk_NLSF_VQ_weights_laroia(tls *libc.TLS, pNLSFW_Q6 uintptr, pNLSF_Q15 uintptr, D int32) { /* SKP_Silk_NLSF_VQ_weights_laroia.c:40:6: */
  9732  	var k int32
  9733  	var tmp1_int int32
  9734  	var tmp2_int int32
  9735  
  9736  	/* Check that we are guaranteed to end up within the required range */
  9737  
  9738  	/* First value */
  9739  	tmp1_int = SKP_max_int(tls, *(*int32)(unsafe.Pointer(pNLSF_Q15)), 3)
  9740  	tmp1_int = ((int32(1) << (15 + 6)) / (tmp1_int))
  9741  	tmp2_int = SKP_max_int(tls, (*(*int32)(unsafe.Pointer(pNLSF_Q15 + 1*4)) - *(*int32)(unsafe.Pointer(pNLSF_Q15))), 3)
  9742  	tmp2_int = ((int32(1) << (15 + 6)) / (tmp2_int))
  9743  	*(*int32)(unsafe.Pointer(pNLSFW_Q6)) = SKP_min_int(tls, (tmp1_int + tmp2_int), 0x7FFF)
  9744  
  9745  	/* Main loop */
  9746  	for k = 1; k < (D - 1); k = k + (2) {
  9747  		tmp1_int = SKP_max_int(tls, (*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((k+1))*4)) - *(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr(k)*4))), 3)
  9748  		tmp1_int = ((int32(1) << (15 + 6)) / (tmp1_int))
  9749  		*(*int32)(unsafe.Pointer(pNLSFW_Q6 + uintptr(k)*4)) = SKP_min_int(tls, (tmp1_int + tmp2_int), 0x7FFF)
  9750  
  9751  		tmp2_int = SKP_max_int(tls, (*(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((k+2))*4)) - *(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((k+1))*4))), 3)
  9752  		tmp2_int = ((int32(1) << (15 + 6)) / (tmp2_int))
  9753  		*(*int32)(unsafe.Pointer(pNLSFW_Q6 + uintptr((k+1))*4)) = SKP_min_int(tls, (tmp1_int + tmp2_int), 0x7FFF)
  9754  
  9755  	}
  9756  
  9757  	/* Last value */
  9758  	tmp1_int = SKP_max_int(tls, ((int32(1) << 15) - *(*int32)(unsafe.Pointer(pNLSF_Q15 + uintptr((D-1))*4))), 3)
  9759  	tmp1_int = ((int32(1) << (15 + 6)) / (tmp1_int))
  9760  	*(*int32)(unsafe.Pointer(pNLSFW_Q6 + uintptr((D-1))*4)) = SKP_min_int(tls, (tmp1_int + tmp2_int), 0x7FFF)
  9761  
  9762  }
  9763  
  9764  /***********************************************************************
  9765  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
  9766  Redistribution and use in source and binary forms, with or without
  9767  modification, (subject to the limitations in the disclaimer below)
  9768  are permitted provided that the following conditions are met:
  9769  - Redistributions of source code must retain the above copyright notice,
  9770  this list of conditions and the following disclaimer.
  9771  - Redistributions in binary form must reproduce the above copyright
  9772  notice, this list of conditions and the following disclaimer in the
  9773  documentation and/or other materials provided with the distribution.
  9774  - Neither the name of Skype Limited, nor the names of specific
  9775  contributors, may be used to endorse or promote products derived from
  9776  this software without specific prior written permission.
  9777  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
  9778  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  9779  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
  9780  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
  9781  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  9782  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  9783  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  9784  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  9785  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  9786  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  9787  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  9788  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  9789  ***********************************************************************/
  9790  
  9791  /*******************/
  9792  /* Pitch estimator */
  9793  /*******************/
  9794  
  9795  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
  9796  
  9797  /* Bandwidth expansion for whitening filter in pitch analysis */
  9798  
  9799  /* Threshold used by pitch estimator for early escape */
  9800  
  9801  /*********************/
  9802  /* Linear prediction */
  9803  /*********************/
  9804  
  9805  /* LPC analysis defines: regularization and bandwidth expansion */
  9806  
  9807  /* LTP analysis defines */
  9808  
  9809  /* LTP quantization settings */
  9810  
  9811  /***********************/
  9812  /* High pass filtering */
  9813  /***********************/
  9814  
  9815  /* Smoothing parameters for low end of pitch frequency range estimation */
  9816  
  9817  /* Min and max values for low end of pitch frequency range estimation */
  9818  
  9819  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
  9820  
  9821  /***********/
  9822  /* Various */
  9823  /***********/
  9824  
  9825  /* Required speech activity for counting frame as active */
  9826  
  9827  /* Speech Activity LBRR enable threshold (needs tuning) */
  9828  
  9829  /*************************/
  9830  /* Perceptual parameters */
  9831  /*************************/
  9832  
  9833  /* reduction in coding SNR during low speech activity */
  9834  
  9835  /* factor for reducing quantization noise during voiced speech */
  9836  
  9837  /* factor for reducing quantization noise for unvoiced sparse signals */
  9838  
  9839  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
  9840  
  9841  /* warping control */
  9842  
  9843  /* fraction added to first autocorrelation value */
  9844  
  9845  /* noise shaping filter chirp factor */
  9846  
  9847  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
  9848  
  9849  /* gain reduction for fricatives */
  9850  
  9851  /* extra harmonic boosting (signal shaping) at low bitrates */
  9852  
  9853  /* extra harmonic boosting (signal shaping) for noisy input signals */
  9854  
  9855  /* harmonic noise shaping */
  9856  
  9857  /* extra harmonic noise shaping for high bitrates or noisy input */
  9858  
  9859  /* parameter for shaping noise towards higher frequencies */
  9860  
  9861  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
  9862  
  9863  /* parameter for applying a high-pass tilt to the input signal */
  9864  
  9865  /* parameter for extra high-pass tilt to the input signal at high rates */
  9866  
  9867  /* parameter for reducing noise at the very low frequencies */
  9868  
  9869  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
  9870  
  9871  /* noise floor to put a lower limit on the quantization step size */
  9872  
  9873  /* noise floor relative to active speech gain level */
  9874  
  9875  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
  9876  
  9877  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
  9878  
  9879  /* parameters defining the R/D tradeoff in the residual quantizer */
  9880  
  9881  /* Compute gain to make warped filter coefficients have a zero mean log frequency response on a     */
  9882  /* non-warped frequency scale. (So that it can be implemented with a minimum-phase monic filter.)   */
  9883  func warped_gain(tls *libc.TLS, coefs_Q24 uintptr, lambda_Q16 int32, order int32) int32 { /* SKP_Silk_noise_shape_analysis_FIX.c:33:22: */
  9884  	var i int32
  9885  	var gain_Q24 int32
  9886  
  9887  	lambda_Q16 = -lambda_Q16
  9888  	gain_Q24 = *(*int32)(unsafe.Pointer(coefs_Q24 + uintptr((order-1))*4))
  9889  	for i = (order - 2); i >= 0; i-- {
  9890  		gain_Q24 = ((*(*int32)(unsafe.Pointer(coefs_Q24 + uintptr(i)*4))) + ((((gain_Q24) >> 16) * (int32(int16(lambda_Q16)))) + ((((gain_Q24) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9891  	}
  9892  	gain_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((gain_Q24) >> 16) * (int32(int16(-lambda_Q16)))) + ((((gain_Q24) & 0x0000FFFF) * (int32(int16(-lambda_Q16)))) >> 16)))
  9893  	return SKP_INVERSE32_varQ(tls, gain_Q24, 40)
  9894  }
  9895  
  9896  /* Convert warped filter coefficients to monic pseudo-warped coefficients and limit maximum     */
  9897  /* amplitude of monic warped coefficients by using bandwidth expansion on the true coefficients */
  9898  func limit_warped_coefs(tls *libc.TLS, coefs_syn_Q24 uintptr, coefs_ana_Q24 uintptr, lambda_Q16 int32, limit_Q24 int32, order int32) { /* SKP_Silk_noise_shape_analysis_FIX.c:52:17: */
  9899  	var i int32
  9900  	var iter int32
  9901  	var ind int32 = 0
  9902  	var tmp int32
  9903  	var maxabs_Q24 int32
  9904  	var chirp_Q16 int32
  9905  	var gain_syn_Q16 int32
  9906  	var gain_ana_Q16 int32
  9907  	var nom_Q16 int32
  9908  	var den_Q24 int32
  9909  
  9910  	/* Convert to monic coefficients */
  9911  	lambda_Q16 = -lambda_Q16
  9912  	for i = (order - 1); i > 0; i-- {
  9913  		*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9914  		*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9915  	}
  9916  	lambda_Q16 = -lambda_Q16
  9917  	nom_Q16 = ((SKP_FIX_CONST(tls, 1.0, 16)) + ((((-lambda_Q16) >> 16) * (int32(int16(lambda_Q16)))) + ((((-lambda_Q16) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9918  	den_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9919  	gain_syn_Q16 = SKP_DIV32_varQ(tls, nom_Q16, den_Q24, 24)
  9920  	den_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9921  	gain_ana_Q16 = SKP_DIV32_varQ(tls, nom_Q16, den_Q24, 24)
  9922  	for i = 0; i < order; i++ {
  9923  		*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)) = (((((gain_syn_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) + ((((gain_syn_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_syn_Q16) * (func() int32 {
  9924  			if (16) == 1 {
  9925  				return (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 1))
  9926  			}
  9927  			return ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9928  		}())))
  9929  		*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)) = (((((gain_ana_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) + ((((gain_ana_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_ana_Q16) * (func() int32 {
  9930  			if (16) == 1 {
  9931  				return (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 1))
  9932  			}
  9933  			return ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9934  		}())))
  9935  	}
  9936  
  9937  	for iter = 0; iter < 10; iter++ {
  9938  		/* Find maximum absolute value */
  9939  		maxabs_Q24 = -1
  9940  		for i = 0; i < order; i++ {
  9941  			tmp = func() int32 {
  9942  				if (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) ^ ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 31)) - ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 31)) > (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) ^ ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 31)) - ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 31)) {
  9943  					return (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) ^ ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 31)) - ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 31))
  9944  				}
  9945  				return (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) ^ ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 31)) - ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 31))
  9946  			}()
  9947  			if tmp > maxabs_Q24 {
  9948  				maxabs_Q24 = tmp
  9949  				ind = i
  9950  			}
  9951  		}
  9952  		if maxabs_Q24 <= limit_Q24 {
  9953  			/* Coefficients are within range - done */
  9954  			return
  9955  		}
  9956  
  9957  		/* Convert back to true warped coefficients */
  9958  		for i = 1; i < order; i++ {
  9959  			*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9960  			*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9961  		}
  9962  		gain_syn_Q16 = SKP_INVERSE32_varQ(tls, gain_syn_Q16, 32)
  9963  		gain_ana_Q16 = SKP_INVERSE32_varQ(tls, gain_ana_Q16, 32)
  9964  		for i = 0; i < order; i++ {
  9965  			*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)) = (((((gain_syn_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) + ((((gain_syn_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_syn_Q16) * (func() int32 {
  9966  				if (16) == 1 {
  9967  					return (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 1))
  9968  				}
  9969  				return ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9970  			}())))
  9971  			*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)) = (((((gain_ana_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) + ((((gain_ana_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_ana_Q16) * (func() int32 {
  9972  				if (16) == 1 {
  9973  					return (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 1))
  9974  				}
  9975  				return ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
  9976  			}())))
  9977  		}
  9978  
  9979  		/* Apply bandwidth expansion */
  9980  		chirp_Q16 = (SKP_FIX_CONST(tls, 0.99, 16) - SKP_DIV32_varQ(tls,
  9981  			((((maxabs_Q24-limit_Q24)>>16)*(int32((int16((SKP_FIX_CONST(tls, 0.8, 10)) + ((int32(int16(SKP_FIX_CONST(tls, 0.1, 10)))) * (int32(int16(iter)))))))))+((((maxabs_Q24-limit_Q24)&0x0000FFFF)*(int32((int16((SKP_FIX_CONST(tls, 0.8, 10)) + ((int32(int16(SKP_FIX_CONST(tls, 0.1, 10)))) * (int32(int16(iter)))))))))>>16)),
  9982  			((maxabs_Q24)*(ind+1)), 22))
  9983  		SKP_Silk_bwexpander_32(tls, coefs_syn_Q24, order, chirp_Q16)
  9984  		SKP_Silk_bwexpander_32(tls, coefs_ana_Q24, order, chirp_Q16)
  9985  
  9986  		/* Convert to monic warped coefficients */
  9987  		lambda_Q16 = -lambda_Q16
  9988  		for i = (order - 1); i > 0; i-- {
  9989  			*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9990  			*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4)) = ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr((i-1))*4))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9991  		}
  9992  		lambda_Q16 = -lambda_Q16
  9993  		nom_Q16 = ((SKP_FIX_CONST(tls, 1.0, 16)) + ((((-lambda_Q16) >> 16) * (int32(int16(lambda_Q16)))) + ((((-lambda_Q16) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9994  		den_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9995  		gain_syn_Q16 = SKP_DIV32_varQ(tls, nom_Q16, den_Q24, 24)
  9996  		den_Q24 = ((SKP_FIX_CONST(tls, 1.0, 24)) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24))) >> 16) * (int32(int16(lambda_Q16)))) + ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24))) & 0x0000FFFF) * (int32(int16(lambda_Q16)))) >> 16)))
  9997  		gain_ana_Q16 = SKP_DIV32_varQ(tls, nom_Q16, den_Q24, 24)
  9998  		for i = 0; i < order; i++ {
  9999  			*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)) = (((((gain_syn_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) + ((((gain_syn_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_syn_Q16) * (func() int32 {
 10000  				if (16) == 1 {
 10001  					return (((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) & 1))
 10002  				}
 10003  				return ((((*(*int32)(unsafe.Pointer(coefs_syn_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 10004  			}())))
 10005  			*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)) = (((((gain_ana_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) + ((((gain_ana_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4)))))) >> 16)) + ((gain_ana_Q16) * (func() int32 {
 10006  				if (16) == 1 {
 10007  					return (((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) & 1))
 10008  				}
 10009  				return ((((*(*int32)(unsafe.Pointer(coefs_ana_Q24 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 10010  			}())))
 10011  		}
 10012  	}
 10013  
 10014  }
 10015  
 10016  /**************************************************************/
 10017  /* Compute noise shaping coefficients and initial gain values */
 10018  /**************************************************************/
 10019  func SKP_Silk_noise_shape_analysis_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, pitch_res uintptr, x uintptr) { /* SKP_Silk_noise_shape_analysis_FIX.c:137:6: */
 10020  	bp := tls.Alloc(992)
 10021  	defer tls.Free(992)
 10022  
 10023  	var psShapeSt uintptr = (psEnc + 19540 /* &.sShape */)
 10024  	var k int32
 10025  	var i int32
 10026  	var nSamples int32
 10027  	var Qnrg int32
 10028  	var b_Q14 int32
 10029  	var warping_Q16 int32
 10030  	*(*int32)(unsafe.Pointer(bp + 4 /* scale */)) = 0
 10031  	var SNR_adj_dB_Q7 int32
 10032  	var HarmBoost_Q16 int32
 10033  	var HarmShapeGain_Q16 int32
 10034  	var Tilt_Q16 int32
 10035  	var tmp32 int32
 10036  	// var nrg int32 at bp, 4
 10037  
 10038  	// var pre_nrg_Q30 int32 at bp+988, 4
 10039  
 10040  	var log_energy_Q7 int32
 10041  	var log_energy_prev_Q7 int32
 10042  	var energy_variation_Q7 int32
 10043  	var delta_Q16 int32
 10044  	var BWExp1_Q16 int32
 10045  	var BWExp2_Q16 int32
 10046  	var gain_mult_Q16 int32
 10047  	var gain_add_Q16 int32
 10048  	var strength_Q16 int32
 10049  	var b_Q8 int32
 10050  	// var auto_corr [17]int32 at bp+728, 68
 10051  
 10052  	// var refl_coef_Q16 [16]int32 at bp+796, 64
 10053  
 10054  	// var AR1_Q24 [16]int32 at bp+924, 64
 10055  
 10056  	// var AR2_Q24 [16]int32 at bp+860, 64
 10057  
 10058  	// var x_windowed [360]int16 at bp+8, 720
 10059  
 10060  	var x_ptr uintptr
 10061  	var pitch_res_ptr uintptr
 10062  
 10063  	/* Point to start of first LPC analysis block */
 10064  	x_ptr = (x - uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fla_shape)*2)
 10065  
 10066  	/****************/
 10067  	/* CONTROL SNR  */
 10068  	/****************/
 10069  	/* Reduce SNR_dB values if recent bitstream has exceeded TargetRate */
 10070  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7 = ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FSNR_dB_Q7 - ((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) << (7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.05, 16))))) + ((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FBufferedInChannel_ms) << (7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.05, 16))))) >> 16)))
 10071  
 10072  	/* Reduce SNR_dB if inband FEC used */
 10073  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 > SKP_FIX_CONST(tls, 0.5, 8) {
 10074  		*(*int32)(unsafe.Pointer(psEncCtrl + 604 /* &.current_SNR_dB_Q7 */)) -= (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FinBandFEC_SNR_comp_Q8) >> (1))
 10075  	}
 10076  
 10077  	/****************/
 10078  	/* GAIN CONTROL */
 10079  	/****************/
 10080  	/* Input quality is the average of the quality in the lowest two VAD bands */
 10081  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14 = ((*(*int32)(unsafe.Pointer((psEncCtrl + 620 /* &.input_quality_bands_Q15 */))) + *(*int32)(unsafe.Pointer((psEncCtrl + 620 /* &.input_quality_bands_Q15 */) + 1*4))) >> (2))
 10082  
 10083  	/* Coding quality level, between 0.0_Q0 and 1.0_Q0, but in Q14 */
 10084  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14 = ((SKP_Silk_sigm_Q15(tls, func() int32 {
 10085  		if (4) == 1 {
 10086  			return ((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7 - SKP_FIX_CONST(tls, 18.0, 7)) >> 1) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7 - SKP_FIX_CONST(tls, 18.0, 7)) & 1))
 10087  		}
 10088  		return (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7 - SKP_FIX_CONST(tls, 18.0, 7)) >> ((4) - 1)) + 1) >> 1)
 10089  	}())) >> (1))
 10090  
 10091  	/* Reduce coding SNR during low speech activity */
 10092  	b_Q8 = (SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)
 10093  	b_Q8 = (((((b_Q8) << (8)) >> 16) * (int32(int16(b_Q8)))) + (((((b_Q8) << (8)) & 0x0000FFFF) * (int32(int16(b_Q8)))) >> 16))
 10094  	SNR_adj_dB_Q7 = (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7) + (((((int32((int16(SKP_FIX_CONST(tls, float64(-4.0), 7) >> (4 + 1))))) * (int32(int16(b_Q8)))) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 1.0, 14) + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) + ((((SKP_FIX_CONST(tls, 1.0, 14) + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) >> 16)))))) + (((((int32((int16(SKP_FIX_CONST(tls, float64(-4.0), 7) >> (4 + 1))))) * (int32(int16(b_Q8)))) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 1.0, 14) + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) + ((((SKP_FIX_CONST(tls, 1.0, 14) + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) >> 16)))))) >> 16))) // Q12
 10095  
 10096  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 10097  		/* Reduce gains for periodic signals */
 10098  		SNR_adj_dB_Q7 = ((SNR_adj_dB_Q7) + ((((SKP_FIX_CONST(tls, 2.0, 8)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) + ((((SKP_FIX_CONST(tls, 2.0, 8)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) >> 16)))
 10099  	} else {
 10100  		/* For unvoiced signals and low-quality input, adjust the quality slower than SNR_dB setting */
 10101  		SNR_adj_dB_Q7 = ((SNR_adj_dB_Q7) + (((((SKP_FIX_CONST(tls, 6.0, 9)) + ((((-SKP_FIX_CONST(tls, 0.4, 18)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7)))) + ((((-SKP_FIX_CONST(tls, 0.4, 18)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7)))) >> 16))) >> 16) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 14) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14))))) + (((((SKP_FIX_CONST(tls, 6.0, 9)) + ((((-SKP_FIX_CONST(tls, 0.4, 18)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7)))) + ((((-SKP_FIX_CONST(tls, 0.4, 18)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7)))) >> 16))) & 0x0000FFFF) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 14) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14))))) >> 16)))
 10102  	}
 10103  
 10104  	/*************************/
 10105  	/* SPARSENESS PROCESSING */
 10106  	/*************************/
 10107  	/* Set quantizer offset */
 10108  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 10109  		/* Initally set to 0; may be overruled in process_gains(..) */
 10110  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 0
 10111  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 = 0
 10112  	} else {
 10113  		/* Sparseness measure, based on relative fluctuations of energy per 2 milliseconds */
 10114  		nSamples = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz) << (1))
 10115  		energy_variation_Q7 = 0
 10116  		log_energy_prev_Q7 = 0
 10117  		pitch_res_ptr = pitch_res
 10118  		for k = 0; k < (20 / 2); k++ {
 10119  			SKP_Silk_sum_sqr_shift(tls, bp /* &nrg */, bp+4 /* &scale */, pitch_res_ptr, nSamples)
 10120  			*(*int32)(unsafe.Pointer(bp /* nrg */)) += ((nSamples) >> (*(*int32)(unsafe.Pointer(bp + 4 /* scale */)))) // Q(-scale)
 10121  
 10122  			log_energy_Q7 = SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer(bp /* nrg */)))
 10123  			if k > 0 {
 10124  				energy_variation_Q7 = energy_variation_Q7 + (func() int32 {
 10125  					if (log_energy_Q7 - log_energy_prev_Q7) > 0 {
 10126  						return (log_energy_Q7 - log_energy_prev_Q7)
 10127  					}
 10128  					return -(log_energy_Q7 - log_energy_prev_Q7)
 10129  				}())
 10130  			}
 10131  			log_energy_prev_Q7 = log_energy_Q7
 10132  			pitch_res_ptr += 2 * (uintptr(nSamples))
 10133  		}
 10134  
 10135  		(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 = ((SKP_Silk_sigm_Q15(tls, ((((energy_variation_Q7 - SKP_FIX_CONST(tls, 5.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + ((((energy_variation_Q7 - SKP_FIX_CONST(tls, 5.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16)))) >> (7))
 10136  
 10137  		/* Set quantization offset depending on sparseness measure */
 10138  		if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 > SKP_FIX_CONST(tls, 0.75, 8) {
 10139  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 0
 10140  		} else {
 10141  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 1
 10142  		}
 10143  
 10144  		/* Increase coding SNR for sparse signals */
 10145  		SNR_adj_dB_Q7 = ((SNR_adj_dB_Q7) + ((((SKP_FIX_CONST(tls, 2.0, 15)) >> 16) * (int32((int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 - SKP_FIX_CONST(tls, 0.5, 8)))))) + ((((SKP_FIX_CONST(tls, 2.0, 15)) & 0x0000FFFF) * (int32((int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8 - SKP_FIX_CONST(tls, 0.5, 8)))))) >> 16)))
 10146  	}
 10147  
 10148  	/*******************************/
 10149  	/* Control bandwidth expansion */
 10150  	/*******************************/
 10151  	/* More BWE for signals with high prediction gain */
 10152  	strength_Q16 = (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FpredGain_Q16) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 16))))) + (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FpredGain_Q16) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 16))))) >> 16))
 10153  	BWExp1_Q16 = libc.AssignInt32(&BWExp2_Q16, SKP_DIV32_varQ(tls, SKP_FIX_CONST(tls, 0.95, 16),
 10154  		(((SKP_FIX_CONST(tls, 1.0, 16))+((((strength_Q16)>>16)*(int32(int16(strength_Q16))))+((((strength_Q16)&0x0000FFFF)*(int32(int16(strength_Q16))))>>16)))+((strength_Q16)*(func() int32 {
 10155  			if (16) == 1 {
 10156  				return (((strength_Q16) >> 1) + ((strength_Q16) & 1))
 10157  			}
 10158  			return ((((strength_Q16) >> ((16) - 1)) + 1) >> 1)
 10159  		}()))), 16))
 10160  	delta_Q16 = ((((SKP_FIX_CONST(tls, 1.0, 16) - ((int32(int16(3))) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14))))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.01, 16))))) + ((((SKP_FIX_CONST(tls, 1.0, 16) - ((int32(int16(3))) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14))))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.01, 16))))) >> 16))
 10161  	BWExp1_Q16 = ((BWExp1_Q16) - (delta_Q16))
 10162  	BWExp2_Q16 = ((BWExp2_Q16) + (delta_Q16))
 10163  	/* BWExp1 will be applied after BWExp2, so make it relative */
 10164  	BWExp1_Q16 = (((BWExp1_Q16) << (14)) / ((BWExp2_Q16) >> (2)))
 10165  
 10166  	if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0 {
 10167  		/* Slightly more warping in analysis will move quantization noise up in frequency, where it's better masked */
 10168  		warping_Q16 = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16) + (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.01, 18))))) + (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.01, 18))))) >> 16)))
 10169  	} else {
 10170  		warping_Q16 = 0
 10171  	}
 10172  
 10173  	/********************************************/
 10174  	/* Compute noise shaping AR coefs and gains */
 10175  	/********************************************/
 10176  	for k = 0; k < 4; k++ {
 10177  		/* Apply window: sine slope followed by flat part followed by cosine slope */
 10178  		var shift int32
 10179  		var slope_part int32
 10180  		var flat_part int32
 10181  		flat_part = ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz * 5)
 10182  		slope_part = (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapeWinLength - flat_part) >> (1))
 10183  
 10184  		SKP_Silk_apply_sine_window(tls, bp+8 /* &x_windowed[0] */, x_ptr, 1, slope_part)
 10185  		shift = slope_part
 10186  		libc.Xmemcpy(tls, (bp + 8 /* &x_windowed[0] */ + uintptr(shift)*2), (x_ptr + uintptr(shift)*2), (uint32(flat_part) * uint32(unsafe.Sizeof(int16(0)))))
 10187  		shift = shift + (flat_part)
 10188  		SKP_Silk_apply_sine_window(tls, (bp + 8 /* &x_windowed[0] */ + uintptr(shift)*2), (x_ptr + uintptr(shift)*2), 2, slope_part)
 10189  
 10190  		/* Update pointer: next LPC analysis block */
 10191  		x_ptr += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
 10192  
 10193  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0 {
 10194  			/* Calculate warped auto correlation */
 10195  			SKP_Silk_warped_autocorrelation_FIX(tls, bp+728 /* &auto_corr[0] */, bp+4 /* &scale */, bp+8 /* &x_windowed[0] */, int16(warping_Q16), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapeWinLength, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10196  		} else {
 10197  			/* Calculate regular auto correlation */
 10198  			SKP_Silk_autocorr(tls, bp+728 /* &auto_corr[0] */, bp+4 /* &scale */, bp+8 /* &x_windowed[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapeWinLength, ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder + 1))
 10199  		}
 10200  
 10201  		/* Add white noise, as a fraction of energy */
 10202  		*(*int32)(unsafe.Pointer(bp + 728 /* &auto_corr[0] */)) = ((*(*int32)(unsafe.Pointer(bp + 728 /* &auto_corr[0] */))) + (SKP_max_32(tls, (((((*(*int32)(unsafe.Pointer(bp + 728 /* &auto_corr[0] */))) >> (4)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 1e-5, 20))))) + (((((*(*int32)(unsafe.Pointer(bp + 728 /* &auto_corr[0] */))) >> (4)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 1e-5, 20))))) >> 16)), 1)))
 10203  
 10204  		/* Calculate the reflection coefficients using schur */
 10205  		*(*int32)(unsafe.Pointer(bp /* nrg */)) = SKP_Silk_schur64(tls, bp+796 /* &refl_coef_Q16[0] */, bp+728 /* &auto_corr[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10206  
 10207  		/* Convert reflection coefficients to prediction coefficients */
 10208  		SKP_Silk_k2a_Q16(tls, bp+860 /* &AR2_Q24[0] */, bp+796 /* &refl_coef_Q16[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10209  
 10210  		Qnrg = -*(*int32)(unsafe.Pointer(bp + 4 /* scale */)) // range: -12...30
 10211  
 10212  		/* Make sure that Qnrg is an even number */
 10213  		if (Qnrg & 1) != 0 {
 10214  			Qnrg = Qnrg - (1)
 10215  			*(*int32)(unsafe.Pointer(bp /* nrg */)) >>= 1
 10216  		}
 10217  
 10218  		tmp32 = SKP_Silk_SQRT_APPROX(tls, *(*int32)(unsafe.Pointer(bp /* nrg */)))
 10219  		Qnrg >>= 1 // range: -6...15
 10220  
 10221  		*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = ((func() int32 {
 10222  			if (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg)) > (int32((0x7FFFFFFF)) >> (16 - Qnrg)) {
 10223  				return func() int32 {
 10224  					if (tmp32) > (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg)) {
 10225  						return (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg))
 10226  					}
 10227  					return func() int32 {
 10228  						if (tmp32) < (int32((0x7FFFFFFF)) >> (16 - Qnrg)) {
 10229  							return (int32((0x7FFFFFFF)) >> (16 - Qnrg))
 10230  						}
 10231  						return tmp32
 10232  					}()
 10233  				}()
 10234  			}
 10235  			return func() int32 {
 10236  				if (tmp32) > (int32((0x7FFFFFFF)) >> (16 - Qnrg)) {
 10237  					return (int32((0x7FFFFFFF)) >> (16 - Qnrg))
 10238  				}
 10239  				return func() int32 {
 10240  					if (tmp32) < (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg)) {
 10241  						return (int32((libc.Int32FromUint32(0x80000000))) >> (16 - Qnrg))
 10242  					}
 10243  					return tmp32
 10244  				}()
 10245  			}()
 10246  		}()) << (16 - Qnrg))
 10247  
 10248  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16 > 0 {
 10249  			/* Adjust gain for warping */
 10250  			gain_mult_Q16 = warped_gain(tls, bp+860 /* &AR2_Q24[0] */, warping_Q16, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10251  
 10252  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = (((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) >> 16) * (int32(int16(gain_mult_Q16)))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(gain_mult_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) * (func() int32 {
 10253  				if (16) == 1 {
 10254  					return (((gain_mult_Q16) >> 1) + ((gain_mult_Q16) & 1))
 10255  				}
 10256  				return ((((gain_mult_Q16) >> ((16) - 1)) + 1) >> 1)
 10257  			}())))
 10258  			if *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) < 0 {
 10259  				*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = 0x7FFFFFFF
 10260  			}
 10261  		}
 10262  
 10263  		/* Bandwidth expansion for synthesis filter shaping */
 10264  		SKP_Silk_bwexpander_32(tls, bp+860 /* &AR2_Q24[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder, BWExp2_Q16)
 10265  
 10266  		/* Compute noise shaping filter coefficients */
 10267  		libc.Xmemcpy(tls, bp+924 /* &AR1_Q24[0] */, bp+860 /* &AR2_Q24[0] */, (uint32((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder) * uint32(unsafe.Sizeof(int32(0)))))
 10268  
 10269  		/* Bandwidth expansion for analysis filter shaping */
 10270  
 10271  		SKP_Silk_bwexpander_32(tls, bp+924 /* &AR1_Q24[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder, BWExp1_Q16)
 10272  
 10273  		/* Ratio of prediction gains, in energy domain */
 10274  		SKP_Silk_LPC_inverse_pred_gain_Q24(tls, bp+988 /* &pre_nrg_Q30 */, bp+860 /* &AR2_Q24[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10275  		SKP_Silk_LPC_inverse_pred_gain_Q24(tls, bp /* &nrg */, bp+924 /* &AR1_Q24[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10276  
 10277  		//psEncCtrl->GainsPre[ k ] = 1.0f - 0.7f * ( 1.0f - pre_nrg / nrg ) = 0.3f + 0.7f * pre_nrg / nrg;
 10278  		*(*int32)(unsafe.Pointer(bp + 988 /* pre_nrg_Q30 */)) = (((((*(*int32)(unsafe.Pointer(bp + 988 /* pre_nrg_Q30 */))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.7, 15))))) + ((((*(*int32)(unsafe.Pointer(bp + 988 /* pre_nrg_Q30 */))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.7, 15))))) >> 16)) << (1))
 10279  		*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)) = (SKP_FIX_CONST(tls, 0.3, 14) + SKP_DIV32_varQ(tls, *(*int32)(unsafe.Pointer(bp + 988 /* pre_nrg_Q30 */)), *(*int32)(unsafe.Pointer(bp /* nrg */)), 14))
 10280  
 10281  		/* Convert to monic warped prediction coefficients and limit absolute values */
 10282  		limit_warped_coefs(tls, bp+860 /* &AR2_Q24[0] */, bp+924 /* &AR1_Q24[0] */, warping_Q16, SKP_FIX_CONST(tls, 3.999, 24), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 10283  
 10284  		/* Convert from Q24 to Q13 and store in int16 */
 10285  		for i = 0; i < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder; i++ {
 10286  			*(*int16)(unsafe.Pointer((psEncCtrl + 252 /* &.AR1_Q13 */) + uintptr(((k*16)+i))*2)) = func() int16 {
 10287  				if (func() int32 {
 10288  					if (11) == 1 {
 10289  						return (((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) & 1))
 10290  					}
 10291  					return ((((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1)
 10292  				}()) > 0x7FFF {
 10293  					return int16(0x7FFF)
 10294  				}
 10295  				return func() int16 {
 10296  					if (func() int32 {
 10297  						if (11) == 1 {
 10298  							return (((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) & 1))
 10299  						}
 10300  						return ((((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1)
 10301  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 10302  						return libc.Int16FromInt32(0x8000)
 10303  					}
 10304  					return func() int16 {
 10305  						if (11) == 1 {
 10306  							return (int16(((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) & 1)))
 10307  						}
 10308  						return (int16((((*(*int32)(unsafe.Pointer(bp + 924 /* &AR1_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1))
 10309  					}()
 10310  				}()
 10311  			}()
 10312  			*(*int16)(unsafe.Pointer((psEncCtrl + 380 /* &.AR2_Q13 */) + uintptr(((k*16)+i))*2)) = func() int16 {
 10313  				if (func() int32 {
 10314  					if (11) == 1 {
 10315  						return (((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) & 1))
 10316  					}
 10317  					return ((((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1)
 10318  				}()) > 0x7FFF {
 10319  					return int16(0x7FFF)
 10320  				}
 10321  				return func() int16 {
 10322  					if (func() int32 {
 10323  						if (11) == 1 {
 10324  							return (((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) & 1))
 10325  						}
 10326  						return ((((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1)
 10327  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 10328  						return libc.Int16FromInt32(0x8000)
 10329  					}
 10330  					return func() int16 {
 10331  						if (11) == 1 {
 10332  							return (int16(((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) & 1)))
 10333  						}
 10334  						return (int16((((*(*int32)(unsafe.Pointer(bp + 860 /* &AR2_Q24[0] */ + uintptr(i)*4))) >> ((11) - 1)) + 1) >> 1))
 10335  					}()
 10336  				}()
 10337  			}()
 10338  		}
 10339  	}
 10340  
 10341  	/*****************/
 10342  	/* Gain tweaking */
 10343  	/*****************/
 10344  	/* Increase gains during low speech activity and put lower limit on gains */
 10345  	gain_mult_Q16 = SKP_Silk_log2lin(tls, -((-SKP_FIX_CONST(tls, 16.0, 7)) + ((((SNR_adj_dB_Q7) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) + ((((SNR_adj_dB_Q7) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) >> 16))))
 10346  	gain_add_Q16 = SKP_Silk_log2lin(tls, ((SKP_FIX_CONST(tls, 16.0, 7)) + ((((SKP_FIX_CONST(tls, 4.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) + ((((SKP_FIX_CONST(tls, 4.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) >> 16))))
 10347  	tmp32 = SKP_Silk_log2lin(tls, ((SKP_FIX_CONST(tls, 16.0, 7)) + ((((SKP_FIX_CONST(tls, float64(-50.0), 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) + ((((SKP_FIX_CONST(tls, float64(-50.0), 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 16))))) >> 16))))
 10348  	tmp32 = ((((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(int16(tmp32)))) + (((((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(int16(tmp32)))) >> 16)) + (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) * (func() int32 {
 10349  		if (16) == 1 {
 10350  			return (((tmp32) >> 1) + ((tmp32) & 1))
 10351  		}
 10352  		return ((((tmp32) >> ((16) - 1)) + 1) >> 1)
 10353  	}())))
 10354  	gain_add_Q16 = func() int32 {
 10355  		if ((uint32((gain_add_Q16) + (tmp32))) & 0x80000000) == uint32(0) {
 10356  			return func() int32 {
 10357  				if ((uint32((gain_add_Q16) & (tmp32))) & 0x80000000) != uint32(0) {
 10358  					return libc.Int32FromUint32(0x80000000)
 10359  				}
 10360  				return ((gain_add_Q16) + (tmp32))
 10361  			}()
 10362  		}
 10363  		return func() int32 {
 10364  			if ((uint32((gain_add_Q16) | (tmp32))) & 0x80000000) == uint32(0) {
 10365  				return 0x7FFFFFFF
 10366  			}
 10367  			return ((gain_add_Q16) + (tmp32))
 10368  		}()
 10369  	}()
 10370  
 10371  	for k = 0; k < 4; k++ {
 10372  		*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = (((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) >> 16) * (int32(int16(gain_mult_Q16)))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(gain_mult_Q16)))) >> 16)) + ((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) * (func() int32 {
 10373  			if (16) == 1 {
 10374  				return (((gain_mult_Q16) >> 1) + ((gain_mult_Q16) & 1))
 10375  			}
 10376  			return ((((gain_mult_Q16) >> ((16) - 1)) + 1) >> 1)
 10377  		}())))
 10378  		if *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) < 0 {
 10379  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = 0x7FFFFFFF
 10380  		}
 10381  	}
 10382  
 10383  	for k = 0; k < 4; k++ {
 10384  		*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = func() int32 {
 10385  			if ((uint32((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) + (gain_add_Q16))) & 0x80000000) != 0 {
 10386  				return 0x7FFFFFFF
 10387  			}
 10388  			return ((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) + (gain_add_Q16))
 10389  		}()
 10390  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16 = func() int32 {
 10391  			if ((uint32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10392  				if (2) == 1 {
 10393  					return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10394  				}
 10395  				return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10396  			}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10397  				if (2) == 1 {
 10398  					return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10399  				}
 10400  				return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10401  			}()))) >> 16)))) & 0x80000000) == uint32(0) {
 10402  				return func() int32 {
 10403  					if ((uint32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10404  						if (2) == 1 {
 10405  							return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10406  						}
 10407  						return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10408  					}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10409  						if (2) == 1 {
 10410  							return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10411  						}
 10412  						return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10413  					}()))) >> 16)))) & 0x80000000) != uint32(0) {
 10414  						return libc.Int32FromUint32(0x80000000)
 10415  					}
 10416  					return (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10417  						if (2) == 1 {
 10418  							return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10419  						}
 10420  						return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10421  					}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10422  						if (2) == 1 {
 10423  							return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10424  						}
 10425  						return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10426  					}()))) >> 16)))
 10427  				}()
 10428  			}
 10429  			return func() int32 {
 10430  				if ((uint32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) | ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10431  					if (2) == 1 {
 10432  						return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10433  					}
 10434  					return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10435  				}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10436  					if (2) == 1 {
 10437  						return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10438  					}
 10439  					return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10440  				}()))) >> 16)))) & 0x80000000) == uint32(0) {
 10441  					return 0x7FFFFFFF
 10442  				}
 10443  				return (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) >> 16) * (int32(func() int16 {
 10444  					if (2) == 1 {
 10445  						return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10446  					}
 10447  					return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10448  				}()))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) - (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FavgGain_Q16) & 0x0000FFFF) * (int32(func() int16 {
 10449  					if (2) == 1 {
 10450  						return (int16((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) & 1)))
 10451  					}
 10452  					return (int16(((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32(int16(SKP_FIX_CONST(tls, 1e-3, 10))))) >> ((2) - 1)) + 1) >> 1))
 10453  				}()))) >> 16)))
 10454  			}()
 10455  		}()
 10456  	}
 10457  
 10458  	/************************************************/
 10459  	/* Decrease level during fricatives (de-essing) */
 10460  	/************************************************/
 10461  	gain_mult_Q16 = (SKP_FIX_CONST(tls, 1.0, 16) + (func() int32 {
 10462  		if (10) == 1 {
 10463  			return ((((SKP_FIX_CONST(tls, 0.05, 26)) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) * (SKP_FIX_CONST(tls, 0.1, 12)))) >> 1) + (((SKP_FIX_CONST(tls, 0.05, 26)) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) * (SKP_FIX_CONST(tls, 0.1, 12)))) & 1))
 10464  		}
 10465  		return (((((SKP_FIX_CONST(tls, 0.05, 26)) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) * (SKP_FIX_CONST(tls, 0.1, 12)))) >> ((10) - 1)) + 1) >> 1)
 10466  	}()))
 10467  
 10468  	if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15 <= 0) && ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 1) {
 10469  		if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 24 {
 10470  			var essStrength_Q15 int32 = (((((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) >> 16) * (int32((int16((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))))))) + ((((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) & 0x0000FFFF) * (int32((int16((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))))))) >> 16)) + ((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) * (func() int32 {
 10471  				if (16) == 1 {
 10472  					return ((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) & 1))
 10473  				}
 10474  				return (((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> ((16) - 1)) + 1) >> 1)
 10475  			}())))
 10476  			tmp32 = SKP_Silk_log2lin(tls, (SKP_FIX_CONST(tls, 16.0, 7) - ((((essStrength_Q15) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 2.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) + ((((SKP_FIX_CONST(tls, 2.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) >> 16)))))) + ((((essStrength_Q15) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 2.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) + ((((SKP_FIX_CONST(tls, 2.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) >> 16)))))) >> 16))))
 10477  			gain_mult_Q16 = (((((gain_mult_Q16) >> 16) * (int32(int16(tmp32)))) + ((((gain_mult_Q16) & 0x0000FFFF) * (int32(int16(tmp32)))) >> 16)) + ((gain_mult_Q16) * (func() int32 {
 10478  				if (16) == 1 {
 10479  					return (((tmp32) >> 1) + ((tmp32) & 1))
 10480  				}
 10481  				return ((((tmp32) >> ((16) - 1)) + 1) >> 1)
 10482  			}())))
 10483  		} else if (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz == 16 {
 10484  			var essStrength_Q15 int32 = (((((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) >> 16) * (int32((int16((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))))))) + ((((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) & 0x0000FFFF) * (int32((int16((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))))))) >> 16)) + ((-(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) * (func() int32 {
 10485  				if (16) == 1 {
 10486  					return ((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> 1) + (((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) & 1))
 10487  				}
 10488  				return (((((int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8))) * (int32((int16(SKP_FIX_CONST(tls, 1.0, 8) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> ((16) - 1)) + 1) >> 1)
 10489  			}())))
 10490  			tmp32 = SKP_Silk_log2lin(tls, (SKP_FIX_CONST(tls, 16.0, 7) - ((((essStrength_Q15) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 1.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) + ((((SKP_FIX_CONST(tls, 1.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) >> 16)))))) + ((((essStrength_Q15) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 1.0, 7)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) + ((((SKP_FIX_CONST(tls, 1.0, 7)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.16, 17))))) >> 16)))))) >> 16))))
 10491  			gain_mult_Q16 = (((((gain_mult_Q16) >> 16) * (int32(int16(tmp32)))) + ((((gain_mult_Q16) & 0x0000FFFF) * (int32(int16(tmp32)))) >> 16)) + ((gain_mult_Q16) * (func() int32 {
 10492  				if (16) == 1 {
 10493  					return (((tmp32) >> 1) + ((tmp32) & 1))
 10494  				}
 10495  				return ((((tmp32) >> ((16) - 1)) + 1) >> 1)
 10496  			}())))
 10497  		} else {
 10498  
 10499  		}
 10500  	}
 10501  
 10502  	for k = 0; k < 4; k++ {
 10503  		*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)) = ((((gain_mult_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)))))) + ((((gain_mult_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)))))) >> 16))
 10504  	}
 10505  
 10506  	/************************************************/
 10507  	/* Control low-frequency shaping and noise tilt */
 10508  	/************************************************/
 10509  	/* Less low frequency shaping for noisy inputs */
 10510  	strength_Q16 = ((SKP_FIX_CONST(tls, 3.0, 0)) * (SKP_FIX_CONST(tls, 1.0, 16) + ((int32(int16(SKP_FIX_CONST(tls, 0.5, 1)))) * (int32((int16(*(*int32)(unsafe.Pointer((psEncCtrl + 620 /* &.input_quality_bands_Q15 */))) - SKP_FIX_CONST(tls, 1.0, 15))))))))
 10511  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 10512  		/* Reduce low frequencies quantization noise for periodic signals, depending on pitch lag */
 10513  		/*f = 400; freqz([1, -0.98 + 2e-4 * f], [1, -0.97 + 7e-4 * f], 2^12, Fs); axis([0, 1000, -10, 1])*/
 10514  		var fs_kHz_inv int32 = ((SKP_FIX_CONST(tls, 0.2, 14)) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz))
 10515  		for k = 0; k < 4; k++ {
 10516  			b_Q14 = (fs_kHz_inv + ((SKP_FIX_CONST(tls, 3.0, 14)) / (*(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 108 /* &.pitchL */) + uintptr(k)*4)))))
 10517  			/* Pack two coefficients in one int32 */
 10518  			*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */) + uintptr(k)*4)) = (((SKP_FIX_CONST(tls, 1.0, 14) - b_Q14) - ((((strength_Q16) >> 16) * (int32(int16(b_Q14)))) + ((((strength_Q16) & 0x0000FFFF) * (int32(int16(b_Q14)))) >> 16))) << (16))
 10519  			*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */) + uintptr(k)*4)) |= (int32((uint16(b_Q14 - SKP_FIX_CONST(tls, 1.0, 14)))))
 10520  		}
 10521  		// Guarantees that second argument to SMULWB() is within range of an SKP_int16
 10522  		Tilt_Q16 = (-SKP_FIX_CONST(tls, 0.3, 16) - ((((SKP_FIX_CONST(tls, 1.0, 16) - SKP_FIX_CONST(tls, 0.3, 16)) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 0.35, 24)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((SKP_FIX_CONST(tls, 0.35, 24)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))))) + ((((SKP_FIX_CONST(tls, 1.0, 16) - SKP_FIX_CONST(tls, 0.3, 16)) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 0.35, 24)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((SKP_FIX_CONST(tls, 0.35, 24)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))))) >> 16)))
 10523  	} else {
 10524  		b_Q14 = ((21299) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffs_kHz)) // 1.3_Q0 = 21299_Q14
 10525  		/* Pack two coefficients in one int32 */
 10526  		*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */))) = (((SKP_FIX_CONST(tls, 1.0, 14) - b_Q14) - ((((strength_Q16) >> 16) * (int32((int16((((SKP_FIX_CONST(tls, 0.6, 16)) >> 16) * (int32(int16(b_Q14)))) + ((((SKP_FIX_CONST(tls, 0.6, 16)) & 0x0000FFFF) * (int32(int16(b_Q14)))) >> 16)))))) + ((((strength_Q16) & 0x0000FFFF) * (int32((int16((((SKP_FIX_CONST(tls, 0.6, 16)) >> 16) * (int32(int16(b_Q14)))) + ((((SKP_FIX_CONST(tls, 0.6, 16)) & 0x0000FFFF) * (int32(int16(b_Q14)))) >> 16)))))) >> 16))) << (16))
 10527  		*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */))) |= (int32((uint16(b_Q14 - SKP_FIX_CONST(tls, 1.0, 14)))))
 10528  		for k = 1; k < 4; k++ {
 10529  			*(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */) + uintptr(k)*4)) = *(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */)))
 10530  		}
 10531  		Tilt_Q16 = -SKP_FIX_CONST(tls, 0.3, 16)
 10532  	}
 10533  
 10534  	/****************************/
 10535  	/* HARMONIC SHAPING CONTROL */
 10536  	/****************************/
 10537  	/* Control boosting of harmonic frequencies */
 10538  	HarmBoost_Q16 = (((((((SKP_FIX_CONST(tls, 1.0, 17) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (3))) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) + ((((SKP_FIX_CONST(tls, 1.0, 17) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (3))) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) >> 16)) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + (((((((SKP_FIX_CONST(tls, 1.0, 17) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (3))) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) + ((((SKP_FIX_CONST(tls, 1.0, 17) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (3))) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15)))) >> 16)) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16))
 10539  
 10540  	/* More harmonic boost for noisy input signals */
 10541  	HarmBoost_Q16 = ((HarmBoost_Q16) + ((((SKP_FIX_CONST(tls, 1.0, 16) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) << (2))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) + ((((SKP_FIX_CONST(tls, 1.0, 16) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14) << (2))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 16))))) >> 16)))
 10542  
 10543  	if (1 != 0) && ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0) {
 10544  		/* More harmonic noise shaping for high bitrates or noisy input */
 10545  		HarmShapeGain_Q16 = ((SKP_FIX_CONST(tls, 0.3, 16)) + ((((SKP_FIX_CONST(tls, 1.0, 16) - ((((SKP_FIX_CONST(tls, 1.0, 18) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (4))) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) + ((((SKP_FIX_CONST(tls, 1.0, 18) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (4))) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) >> 16))) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.2, 16))))) + ((((SKP_FIX_CONST(tls, 1.0, 16) - ((((SKP_FIX_CONST(tls, 1.0, 18) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (4))) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) + ((((SKP_FIX_CONST(tls, 1.0, 18) - (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14) << (4))) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) >> 16))) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.2, 16))))) >> 16)))
 10546  
 10547  		/* Less harmonic noise shaping for less periodic signals */
 10548  		HarmShapeGain_Q16 = (((((HarmShapeGain_Q16) << (1)) >> 16) * (int32(int16(SKP_Silk_SQRT_APPROX(tls, (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15) << (15))))))) + (((((HarmShapeGain_Q16) << (1)) & 0x0000FFFF) * (int32(int16(SKP_Silk_SQRT_APPROX(tls, (((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FLTPCorr_Q15) << (15))))))) >> 16))
 10549  	} else {
 10550  		HarmShapeGain_Q16 = 0
 10551  	}
 10552  
 10553  	/*************************/
 10554  	/* Smooth over subframes */
 10555  	/*************************/
 10556  	for k = 0; k < 4; k++ {
 10557  		(*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16 = (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) + ((((HarmBoost_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) + ((((HarmBoost_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) >> 16)))
 10558  		(*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16 = (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) + ((((HarmShapeGain_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) + ((((HarmShapeGain_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) >> 16)))
 10559  		(*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16 = (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) + ((((Tilt_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) + ((((Tilt_Q16 - (*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.4, 16))))) >> 16)))
 10560  
 10561  		*(*int32)(unsafe.Pointer((psEncCtrl + 540 /* &.HarmBoost_Q14 */) + uintptr(k)*4)) = func() int32 {
 10562  			if (2) == 1 {
 10563  				return ((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) >> 1) + (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) & 1))
 10564  			}
 10565  			return (((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmBoost_smth_Q16) >> ((2) - 1)) + 1) >> 1)
 10566  		}()
 10567  		*(*int32)(unsafe.Pointer((psEncCtrl + 572 /* &.HarmShapeGain_Q14 */) + uintptr(k)*4)) = func() int32 {
 10568  			if (2) == 1 {
 10569  				return ((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) >> 1) + (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) & 1))
 10570  			}
 10571  			return (((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FHarmShapeGain_smth_Q16) >> ((2) - 1)) + 1) >> 1)
 10572  		}()
 10573  		*(*int32)(unsafe.Pointer((psEncCtrl + 556 /* &.Tilt_Q14 */) + uintptr(k)*4)) = func() int32 {
 10574  			if (2) == 1 {
 10575  				return ((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) >> 1) + (((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) & 1))
 10576  			}
 10577  			return (((((*SKP_Silk_shape_state_FIX)(unsafe.Pointer(psShapeSt)).FTilt_smth_Q16) >> ((2) - 1)) + 1) >> 1)
 10578  		}()
 10579  	}
 10580  }
 10581  
 10582  func SKP_Silk_NSQ(tls *libc.TLS, psEncC uintptr, psEncCtrlC uintptr, NSQ uintptr, x uintptr, q uintptr, LSFInterpFactor_Q2 int32, PredCoef_Q12 uintptr, LTPCoef_Q14 uintptr, AR2_Q13 uintptr, HarmShapeGain_Q14 uintptr, Tilt_Q14 uintptr, LF_shp_Q14 uintptr, Gains_Q16 uintptr, Lambda_Q10 int32, LTP_scale_Q14 int32) { /* SKP_Silk_NSQ.c:65:6: */
 10583  	bp := tls.Alloc(6304)
 10584  	defer tls.Free(6304)
 10585  
 10586  	var k int32
 10587  	var lag int32
 10588  	var start_idx int32
 10589  	var LSF_interpolation_flag int32
 10590  	var A_Q12 uintptr
 10591  	var B_Q14 uintptr
 10592  	var AR_shp_Q13 uintptr
 10593  	var pxq uintptr
 10594  	// var sLTP_Q16 [960]int32 at bp+2464, 3840
 10595  
 10596  	// var sLTP [960]int16 at bp+64, 1920
 10597  
 10598  	var HarmShapeFIRPacked_Q14 int32
 10599  	var offset_Q10 int32
 10600  	// var FiltState [16]int32 at bp, 64
 10601  
 10602  	// var x_sc_Q10 [120]int32 at bp+1984, 480
 10603  
 10604  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frand_seed = (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FSeed
 10605  	/* Set unvoiced lag to the previous one, overwrite later for voiced */
 10606  	lag = (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FlagPrev
 10607  
 10608  	offset_Q10 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Quantization_Offsets_Q10)) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype)*4) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FQuantOffsetType)*2)))
 10609  
 10610  	if LSFInterpFactor_Q2 == (int32(1) << 2) {
 10611  		LSF_interpolation_flag = 0
 10612  	} else {
 10613  		LSF_interpolation_flag = 1
 10614  	}
 10615  
 10616  	/* Setup pointers to start of sub frame */
 10617  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 10618  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 10619  	pxq = ((NSQ /* &.xq */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*2)
 10620  	for k = 0; k < 4; k++ {
 10621  		A_Q12 = (PredCoef_Q12 + uintptr((((k>>1)|(1-LSF_interpolation_flag))*16))*2)
 10622  		B_Q14 = (LTPCoef_Q14 + uintptr((k*5))*2)
 10623  		AR_shp_Q13 = (AR2_Q13 + uintptr((k*16))*2)
 10624  
 10625  		/* Noise shape parameters */
 10626  
 10627  		HarmShapeFIRPacked_Q14 = ((*(*int32)(unsafe.Pointer(HarmShapeGain_Q14 + uintptr(k)*4))) >> (2))
 10628  		HarmShapeFIRPacked_Q14 = HarmShapeFIRPacked_Q14 | (((*(*int32)(unsafe.Pointer(HarmShapeGain_Q14 + uintptr(k)*4))) >> (1)) << (16))
 10629  
 10630  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag = 0
 10631  		if (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype == 0 {
 10632  			/* Voiced */
 10633  			lag = *(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + uintptr(k)*4))
 10634  
 10635  			/* Re-whitening */
 10636  			if (k & (3 - ((LSF_interpolation_flag) << (1)))) == 0 {
 10637  
 10638  				/* Rewhiten with new A coefs */
 10639  				start_idx = ((((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length - lag) - (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder) - (5 / 2))
 10640  
 10641  				libc.Xmemset(tls, bp /* &FiltState[0] */, 0, (uint32((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder) * uint32(unsafe.Sizeof(int32(0)))))
 10642  				SKP_Silk_MA_Prediction(tls, ((NSQ /* &.xq */) + uintptr((start_idx+(k*((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length>>2))))*2),
 10643  					A_Q12, bp /* &FiltState[0] */, (bp + 64 /* &sLTP[0] */ + uintptr(start_idx)*2), ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length - start_idx), (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder)
 10644  
 10645  				(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag = 1
 10646  				(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 10647  			}
 10648  		}
 10649  
 10650  		SKP_Silk_nsq_scale_states(tls, NSQ, x, bp+1984 /* &x_sc_Q10[0] */, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length, bp+64, /* &sLTP[0] */
 10651  			bp+2464 /* &sLTP_Q16[0] */, k, LTP_scale_Q14, Gains_Q16, psEncCtrlC+108 /* &.pitchL */)
 10652  
 10653  		SKP_Silk_noise_shape_quantizer(tls, NSQ, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype, bp+1984 /* &x_sc_Q10[0] */, q, pxq, bp+2464 /* &sLTP_Q16[0] */, A_Q12, B_Q14,
 10654  			AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, *(*int32)(unsafe.Pointer(Tilt_Q14 + uintptr(k)*4)), *(*int32)(unsafe.Pointer(LF_shp_Q14 + uintptr(k)*4)), *(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(k)*4)), Lambda_Q10,
 10655  			offset_Q10, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder)
 10656  
 10657  		x += 2 * uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)
 10658  		q += uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)
 10659  		pxq += 2 * (uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length))
 10660  	}
 10661  
 10662  	/* Update lagPrev for next frame */
 10663  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FlagPrev = *(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + 3*4))
 10664  
 10665  	/* Save quantized speech and noise shaping signals */
 10666  	libc.Xmemcpy(tls, NSQ /* &.xq */, ((NSQ /* &.xq */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*2), (uint32((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length) * uint32(unsafe.Sizeof(int16(0)))))
 10667  	libc.Xmemcpy(tls, NSQ+1920 /* &.sLTP_shp_Q10 */, ((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*4), (uint32((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length) * uint32(unsafe.Sizeof(int32(0)))))
 10668  
 10669  }
 10670  
 10671  /***********************************/
 10672  /* SKP_Silk_noise_shape_quantizer  */
 10673  /***********************************/
 10674  func SKP_Silk_noise_shape_quantizer(tls *libc.TLS, NSQ uintptr, sigtype int32, x_sc_Q10 uintptr, q uintptr, xq uintptr, sLTP_Q16 uintptr, a_Q12 uintptr, b_Q14 uintptr, AR_shp_Q13 uintptr, lag int32, HarmShapeFIRPacked_Q14 int32, Tilt_Q14 int32, LF_shp_Q14 int32, Gain_Q16 int32, Lambda_Q10 int32, offset_Q10 int32, length int32, shapingLPCOrder int32, predictLPCOrder int32) { /* SKP_Silk_NSQ.c:172:17: */
 10675  	var i int32
 10676  	var j int32
 10677  	var LTP_pred_Q14 int32
 10678  	var LPC_pred_Q10 int32
 10679  	var n_AR_Q10 int32
 10680  	var n_LTP_Q14 int32
 10681  	var n_LF_Q10 int32
 10682  	var r_Q10 int32
 10683  	var q_Q0 int32
 10684  	var q_Q10 int32
 10685  	var thr1_Q10 int32
 10686  	var thr2_Q10 int32
 10687  	var thr3_Q10 int32
 10688  	var dither int32
 10689  	var exc_Q10 int32
 10690  	var LPC_exc_Q10 int32
 10691  	var xq_Q10 int32
 10692  	var tmp1 int32
 10693  	var tmp2 int32
 10694  	var sLF_AR_shp_Q10 int32
 10695  	var psLPC_Q14 uintptr
 10696  	var shp_lag_ptr uintptr
 10697  	var pred_lag_ptr uintptr
 10698  
 10699  	shp_lag_ptr = ((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-lag)+(3/2)))*4)
 10700  	pred_lag_ptr = (sLTP_Q16 + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx-lag)+(5/2)))*4)
 10701  
 10702  	/* Setup short term AR state */
 10703  	psLPC_Q14 = ((NSQ + 5760 /* &.sLPC_Q14 */) + 31*4)
 10704  
 10705  	/* Quantization thresholds */
 10706  	thr1_Q10 = ((-1536) - ((Lambda_Q10) >> (1)))
 10707  	thr2_Q10 = ((-512) - ((Lambda_Q10) >> (1)))
 10708  	thr2_Q10 = ((thr2_Q10) + (((int32(int16(offset_Q10))) * (int32(int16(Lambda_Q10)))) >> (10)))
 10709  	thr3_Q10 = ((512) + ((Lambda_Q10) >> (1)))
 10710  
 10711  	for i = 0; i < length; i++ {
 10712  		/* Generate dither */
 10713  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frand_seed = (int32((uint32(907633515)) + ((uint32((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frand_seed)) * (uint32(196314165)))))
 10714  
 10715  		/* dither = rand_seed < 0 ? 0xFFFFFFFF : 0; */
 10716  		dither = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frand_seed) >> (31))
 10717  
 10718  		/* Short-term prediction */
 10719  		/* check that order is even */
 10720  		/* check that array starts at 4-byte aligned address */
 10721  
 10722  		/* check that unrolling works */
 10723  		/* Partially unrolled */
 10724  		LPC_pred_Q10 = ((((*(*int32)(unsafe.Pointer(psLPC_Q14))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12))))) >> 16))
 10725  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 1*2))))) >> 16)))
 10726  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 2*2))))) >> 16)))
 10727  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 3*2))))) >> 16)))
 10728  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 4*2))))) >> 16)))
 10729  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-5)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 5*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-5)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 5*2))))) >> 16)))
 10730  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-6)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 6*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-6)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 6*2))))) >> 16)))
 10731  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-7)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 7*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-7)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 7*2))))) >> 16)))
 10732  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-8)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 8*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-8)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 8*2))))) >> 16)))
 10733  		LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-9)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 9*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-9)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 9*2))))) >> 16)))
 10734  		for j = 10; j < predictLPCOrder; j++ {
 10735  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + uintptr(-j)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + uintptr(j)*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + uintptr(-j)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + uintptr(j)*2))))) >> 16)))
 10736  		}
 10737  		/* Long-term prediction */
 10738  		if sigtype == 0 {
 10739  			/* Unrolled loop */
 10740  			LTP_pred_Q14 = ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14))))) >> 16))
 10741  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 1*2))))) >> 16)))
 10742  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 2*2))))) >> 16)))
 10743  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 3*2))))) >> 16)))
 10744  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 4*2))))) >> 16)))
 10745  			pred_lag_ptr += 4
 10746  		} else {
 10747  			LTP_pred_Q14 = 0
 10748  		}
 10749  
 10750  		/* Noise shape feedback */
 10751  		/* check that order is even */
 10752  		tmp2 = *(*int32)(unsafe.Pointer(psLPC_Q14))
 10753  		tmp1 = *(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */)))
 10754  		*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */))) = tmp2
 10755  		n_AR_Q10 = ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13))))) >> 16))
 10756  		for j = 2; j < shapingLPCOrder; j = j + (2) {
 10757  			tmp2 = *(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((j-1))*4))
 10758  			*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((j-1))*4)) = tmp1
 10759  			n_AR_Q10 = ((n_AR_Q10) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((j-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((j-1))*2))))) >> 16)))
 10760  			tmp1 = *(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((j+0))*4))
 10761  			*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((j+0))*4)) = tmp2
 10762  			n_AR_Q10 = ((n_AR_Q10) + ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr(j)*2))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr(j)*2))))) >> 16)))
 10763  		}
 10764  		*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr((shapingLPCOrder-1))*4)) = tmp1
 10765  		n_AR_Q10 = ((n_AR_Q10) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((shapingLPCOrder-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((shapingLPCOrder-1))*2))))) >> 16)))
 10766  
 10767  		n_AR_Q10 = ((n_AR_Q10) >> (1)) /* Q11 -> Q10 */
 10768  		n_AR_Q10 = ((n_AR_Q10) + (((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) >> 16) * (int32(int16(Tilt_Q14)))) + (((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) & 0x0000FFFF) * (int32(int16(Tilt_Q14)))) >> 16)))
 10769  
 10770  		n_LF_Q10 = (((((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-1))*4))) >> 16) * (int32(int16(LF_shp_Q14)))) + ((((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-1))*4))) & 0x0000FFFF) * (int32(int16(LF_shp_Q14)))) >> 16)) << (2))
 10771  		n_LF_Q10 = (((n_LF_Q10) + ((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) >> 16) * ((LF_shp_Q14) >> 16))) + (((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) & 0x0000FFFF) * ((LF_shp_Q14) >> 16)) >> 16))
 10772  
 10773  		/* Long-term shaping */
 10774  		if lag > 0 {
 10775  			/* Symmetric, packed FIR coefficients */
 10776  			n_LTP_Q14 = (((((*(*int32)(unsafe.Pointer(shp_lag_ptr))) + (*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-2)*4)))) >> 16) * (int32(int16(HarmShapeFIRPacked_Q14)))) + (((((*(*int32)(unsafe.Pointer(shp_lag_ptr))) + (*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-2)*4)))) & 0x0000FFFF) * (int32(int16(HarmShapeFIRPacked_Q14)))) >> 16))
 10777  			n_LTP_Q14 = (((n_LTP_Q14) + (((*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * ((HarmShapeFIRPacked_Q14) >> 16))) + ((((*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * ((HarmShapeFIRPacked_Q14) >> 16)) >> 16))
 10778  			n_LTP_Q14 = ((n_LTP_Q14) << (6))
 10779  			shp_lag_ptr += 4
 10780  		} else {
 10781  			n_LTP_Q14 = 0
 10782  		}
 10783  
 10784  		/* Input minus prediction plus noise feedback  */
 10785  		//r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP;
 10786  		tmp1 = ((LTP_pred_Q14) - (n_LTP_Q14)) /* Add Q14 stuff */
 10787  		tmp1 = ((tmp1) >> (4))                /* convert to Q10  */
 10788  		tmp1 = ((tmp1) + (LPC_pred_Q10))      /* add Q10 stuff */
 10789  		tmp1 = ((tmp1) - (n_AR_Q10))          /* subtract Q10 stuff */
 10790  		tmp1 = ((tmp1) - (n_LF_Q10))          /* subtract Q10 stuff */
 10791  		r_Q10 = ((*(*int32)(unsafe.Pointer(x_sc_Q10 + uintptr(i)*4))) - (tmp1))
 10792  
 10793  		/* Flip sign depending on dither */
 10794  		r_Q10 = ((r_Q10 ^ dither) - dither)
 10795  		r_Q10 = ((r_Q10) - (offset_Q10))
 10796  		r_Q10 = func() int32 {
 10797  			if (int32(-64) << 10) > (int32(64) << 10) {
 10798  				return func() int32 {
 10799  					if (r_Q10) > (int32(-64) << 10) {
 10800  						return (int32(-64) << 10)
 10801  					}
 10802  					return func() int32 {
 10803  						if (r_Q10) < (int32(64) << 10) {
 10804  							return (int32(64) << 10)
 10805  						}
 10806  						return r_Q10
 10807  					}()
 10808  				}()
 10809  			}
 10810  			return func() int32 {
 10811  				if (r_Q10) > (int32(64) << 10) {
 10812  					return (int32(64) << 10)
 10813  				}
 10814  				return func() int32 {
 10815  					if (r_Q10) < (int32(-64) << 10) {
 10816  						return (int32(-64) << 10)
 10817  					}
 10818  					return r_Q10
 10819  				}()
 10820  			}()
 10821  		}()
 10822  
 10823  		/* Quantize */
 10824  		q_Q0 = 0
 10825  		q_Q10 = 0
 10826  		if r_Q10 < thr2_Q10 {
 10827  			if r_Q10 < thr1_Q10 {
 10828  				q_Q0 = func() int32 {
 10829  					if (10) == 1 {
 10830  						return ((((r_Q10) + ((Lambda_Q10) >> (1))) >> 1) + (((r_Q10) + ((Lambda_Q10) >> (1))) & 1))
 10831  					}
 10832  					return (((((r_Q10) + ((Lambda_Q10) >> (1))) >> ((10) - 1)) + 1) >> 1)
 10833  				}()
 10834  				q_Q10 = ((q_Q0) << (10))
 10835  			} else {
 10836  				q_Q0 = -1
 10837  				q_Q10 = -1024
 10838  			}
 10839  		} else {
 10840  			if r_Q10 > thr3_Q10 {
 10841  				q_Q0 = func() int32 {
 10842  					if (10) == 1 {
 10843  						return ((((r_Q10) - ((Lambda_Q10) >> (1))) >> 1) + (((r_Q10) - ((Lambda_Q10) >> (1))) & 1))
 10844  					}
 10845  					return (((((r_Q10) - ((Lambda_Q10) >> (1))) >> ((10) - 1)) + 1) >> 1)
 10846  				}()
 10847  				q_Q10 = ((q_Q0) << (10))
 10848  			}
 10849  		}
 10850  		*(*int8)(unsafe.Pointer(q + uintptr(i))) = int8(q_Q0) /* No saturation needed because max is 64 */
 10851  
 10852  		/* Excitation */
 10853  		exc_Q10 = ((q_Q10) + (offset_Q10))
 10854  		exc_Q10 = ((exc_Q10 ^ dither) - dither)
 10855  
 10856  		/* Add predictions */
 10857  		LPC_exc_Q10 = ((exc_Q10) + (func() int32 {
 10858  			if (4) == 1 {
 10859  				return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
 10860  			}
 10861  			return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
 10862  		}()))
 10863  		xq_Q10 = ((LPC_exc_Q10) + (LPC_pred_Q10))
 10864  
 10865  		/* Scale XQ back to normal level before saving */
 10866  		*(*int16)(unsafe.Pointer(xq + uintptr(i)*2)) = func() int16 {
 10867  			if (func() int32 {
 10868  				if (10) == 1 {
 10869  					return (((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10870  						if (16) == 1 {
 10871  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10872  						}
 10873  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10874  					}()))) >> 1) + ((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10875  						if (16) == 1 {
 10876  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10877  						}
 10878  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10879  					}()))) & 1))
 10880  				}
 10881  				return ((((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10882  					if (16) == 1 {
 10883  						return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10884  					}
 10885  					return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10886  				}()))) >> ((10) - 1)) + 1) >> 1)
 10887  			}()) > 0x7FFF {
 10888  				return int16(0x7FFF)
 10889  			}
 10890  			return func() int16 {
 10891  				if (func() int32 {
 10892  					if (10) == 1 {
 10893  						return (((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10894  							if (16) == 1 {
 10895  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10896  							}
 10897  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10898  						}()))) >> 1) + ((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10899  							if (16) == 1 {
 10900  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10901  							}
 10902  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10903  						}()))) & 1))
 10904  					}
 10905  					return ((((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10906  						if (16) == 1 {
 10907  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10908  						}
 10909  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10910  					}()))) >> ((10) - 1)) + 1) >> 1)
 10911  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 10912  					return libc.Int16FromInt32(0x8000)
 10913  				}
 10914  				return func() int16 {
 10915  					if (10) == 1 {
 10916  						return (int16(((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10917  							if (16) == 1 {
 10918  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10919  							}
 10920  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10921  						}()))) >> 1) + ((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10922  							if (16) == 1 {
 10923  								return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10924  							}
 10925  							return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10926  						}()))) & 1)))
 10927  					}
 10928  					return (int16((((((((xq_Q10) >> 16) * (int32(int16(Gain_Q16)))) + ((((xq_Q10) & 0x0000FFFF) * (int32(int16(Gain_Q16)))) >> 16)) + ((xq_Q10) * (func() int32 {
 10929  						if (16) == 1 {
 10930  							return (((Gain_Q16) >> 1) + ((Gain_Q16) & 1))
 10931  						}
 10932  						return ((((Gain_Q16) >> ((16) - 1)) + 1) >> 1)
 10933  					}()))) >> ((10) - 1)) + 1) >> 1))
 10934  				}()
 10935  			}()
 10936  		}()
 10937  
 10938  		/* Update states */
 10939  		psLPC_Q14 += 4
 10940  		*(*int32)(unsafe.Pointer(psLPC_Q14)) = ((xq_Q10) << (4))
 10941  		sLF_AR_shp_Q10 = ((xq_Q10) - (n_AR_Q10))
 10942  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12 = ((sLF_AR_shp_Q10) << (2))
 10943  
 10944  		*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx)*4)) = ((sLF_AR_shp_Q10) - (n_LF_Q10))
 10945  		*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx)*4)) = ((LPC_exc_Q10) << (6))
 10946  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx++
 10947  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx++
 10948  
 10949  		/* Make dither dependent on quantized signal */
 10950  		*(*int32)(unsafe.Pointer(NSQ + 6448 /* &.rand_seed */)) += (int32(*(*int8)(unsafe.Pointer(q + uintptr(i)))))
 10951  	}
 10952  
 10953  	/* Update LPC synth buffer */
 10954  	libc.Xmemcpy(tls, NSQ+5760 /* &.sLPC_Q14 */, ((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(length)*4), (uint32(32) * uint32(unsafe.Sizeof(int32(0)))))
 10955  }
 10956  
 10957  func SKP_Silk_nsq_scale_states(tls *libc.TLS, NSQ uintptr, x uintptr, x_sc_Q10 uintptr, subfr_length int32, sLTP uintptr, sLTP_Q16 uintptr, subfr int32, LTP_scale_Q14 int32, Gains_Q16 uintptr, pitchL uintptr) { /* SKP_Silk_NSQ.c:353:17: */
 10958  	var i int32
 10959  	var lag int32
 10960  	var inv_gain_Q16 int32
 10961  	var gain_adj_Q16 int32
 10962  	var inv_gain_Q32 int32
 10963  
 10964  	inv_gain_Q16 = SKP_INVERSE32_varQ(tls, func() int32 {
 10965  		if (*(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(subfr)*4))) > (1) {
 10966  			return *(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(subfr)*4))
 10967  		}
 10968  		return 1
 10969  	}(), 32)
 10970  	inv_gain_Q16 = func() int32 {
 10971  		if (inv_gain_Q16) < (0x7FFF) {
 10972  			return inv_gain_Q16
 10973  		}
 10974  		return 0x7FFF
 10975  	}()
 10976  	lag = *(*int32)(unsafe.Pointer(pitchL + uintptr(subfr)*4))
 10977  
 10978  	/* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */
 10979  	if (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag != 0 {
 10980  		inv_gain_Q32 = ((inv_gain_Q16) << (16))
 10981  		if subfr == 0 {
 10982  			/* Do LTP downscaling */
 10983  			inv_gain_Q32 = (((((inv_gain_Q32) >> 16) * (int32(int16(LTP_scale_Q14)))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(int16(LTP_scale_Q14)))) >> 16)) << (2))
 10984  		}
 10985  		for i = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx - lag) - (5 / 2)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx; i++ {
 10986  
 10987  			*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)) = ((((inv_gain_Q32) >> 16) * (int32(*(*int16)(unsafe.Pointer(sLTP + uintptr(i)*2))))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(sLTP + uintptr(i)*2))))) >> 16))
 10988  		}
 10989  	}
 10990  
 10991  	/* Adjust for changing gain */
 10992  	if inv_gain_Q16 != (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16 {
 10993  		gain_adj_Q16 = SKP_DIV32_varQ(tls, inv_gain_Q16, (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16, 16)
 10994  
 10995  		/* Scale long-term shaping state */
 10996  		for i = ((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx - (subfr_length * 4)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx; i++ {
 10997  			*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 10998  				if (16) == 1 {
 10999  					return (((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) & 1))
 11000  				}
 11001  				return ((((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11002  			}())))
 11003  		}
 11004  
 11005  		/* Scale long-term prediction state */
 11006  		if (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag == 0 {
 11007  			for i = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx - lag) - (5 / 2)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx; i++ {
 11008  				*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11009  					if (16) == 1 {
 11010  						return (((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) & 1))
 11011  					}
 11012  					return ((((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11013  				}())))
 11014  			}
 11015  		}
 11016  
 11017  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12 = (((((gain_adj_Q16) >> 16) * (int32(int16((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12)))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12)))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11018  			if (16) == 1 {
 11019  				return ((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) >> 1) + (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) & 1))
 11020  			}
 11021  			return (((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12) >> ((16) - 1)) + 1) >> 1)
 11022  		}())))
 11023  
 11024  		/* Scale short-term prediction and shaping states */
 11025  		for i = 0; i < 32; i++ {
 11026  			*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11027  				if (16) == 1 {
 11028  					return (((*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4))) & 1))
 11029  				}
 11030  				return ((((*(*int32)(unsafe.Pointer((NSQ + 5760 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11031  			}())))
 11032  		}
 11033  		for i = 0; i < 16; i++ {
 11034  			*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11035  				if (16) == 1 {
 11036  					return (((*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4))) & 1))
 11037  				}
 11038  				return ((((*(*int32)(unsafe.Pointer((NSQ + 6368 /* &.sAR2_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11039  			}())))
 11040  		}
 11041  	}
 11042  
 11043  	/* Scale input */
 11044  	for i = 0; i < subfr_length; i++ {
 11045  		*(*int32)(unsafe.Pointer(x_sc_Q10 + uintptr(i)*4)) = (((int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))) * (int32(int16(inv_gain_Q16)))) >> (6))
 11046  	}
 11047  
 11048  	/* save inv_gain */
 11049  
 11050  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16 = inv_gain_Q16
 11051  }
 11052  
 11053  type NSQ_del_dec_struct = struct {
 11054  	FRandState [32]int32
 11055  	FQ_Q10     [32]int32
 11056  	FXq_Q10    [32]int32
 11057  	FPred_Q16  [32]int32
 11058  	FShape_Q10 [32]int32
 11059  	FGain_Q16  [32]int32
 11060  	FsAR2_Q14  [16]int32
 11061  	FsLPC_Q14  [152]int32
 11062  	FLF_AR_Q12 int32
 11063  	FSeed      int32
 11064  	FSeedInit  int32
 11065  	FRD_Q10    int32
 11066  } /* SKP_Silk_NSQ_del_dec.c:43:3 */
 11067  
 11068  type NSQ_sample_struct = struct {
 11069  	FQ_Q10        int32
 11070  	FRD_Q10       int32
 11071  	Fxq_Q14       int32
 11072  	FLF_AR_Q12    int32
 11073  	FsLTP_shp_Q10 int32
 11074  	FLPC_exc_Q16  int32
 11075  } /* SKP_Silk_NSQ_del_dec.c:52:3 */
 11076  
 11077  func SKP_Silk_NSQ_del_dec(tls *libc.TLS, psEncC uintptr, psEncCtrlC uintptr, NSQ uintptr, x uintptr, q uintptr, LSFInterpFactor_Q2 int32, PredCoef_Q12 uintptr, LTPCoef_Q14 uintptr, AR2_Q13 uintptr, HarmShapeGain_Q14 uintptr, Tilt_Q14 uintptr, LF_shp_Q14 uintptr, Gains_Q16 uintptr, Lambda_Q10 int32, LTP_scale_Q14 int32) { /* SKP_Silk_NSQ_del_dec.c:107:6: */
 11078  	bp := tls.Alloc(12132)
 11079  	defer tls.Free(12132)
 11080  
 11081  	var i int32
 11082  	var k int32
 11083  	var lag int32
 11084  	var start_idx int32
 11085  	var LSF_interpolation_flag int32
 11086  	var Winner_ind int32
 11087  	var subfr int32
 11088  	var last_smple_idx int32
 11089  	// var smpl_buf_idx int32 at bp+12128, 4
 11090  
 11091  	var decisionDelay int32
 11092  	var subfr_length int32
 11093  	var A_Q12 uintptr
 11094  	var B_Q14 uintptr
 11095  	var AR_shp_Q13 uintptr
 11096  	var pxq uintptr
 11097  	// var sLTP_Q16 [960]int32 at bp+8288, 3840
 11098  
 11099  	// var sLTP [960]int16 at bp+5888, 1920
 11100  
 11101  	var HarmShapeFIRPacked_Q14 int32
 11102  	var offset_Q10 int32
 11103  	// var FiltState [16]int32 at bp+5824, 64
 11104  
 11105  	var RDmin_Q10 int32
 11106  	// var x_sc_Q10 [120]int32 at bp+7808, 480
 11107  
 11108  	// var psDelDec [4]NSQ_del_dec_struct at bp, 5824
 11109  
 11110  	var psDD uintptr
 11111  
 11112  	subfr_length = ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length / 4)
 11113  
 11114  	/* Set unvoiced lag to the previous one, overwrite later for voiced */
 11115  	lag = (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FlagPrev
 11116  
 11117  	/* Initialize delayed decision states */
 11118  	libc.Xmemset(tls, bp /* &psDelDec[0] */, 0, (uint32((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision) * uint32(unsafe.Sizeof(NSQ_del_dec_struct{}))))
 11119  	for k = 0; k < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision; k++ {
 11120  		psDD = (bp /* &psDelDec */ + uintptr(k)*1456)
 11121  		(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed = ((k + (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FSeed) & 3)
 11122  		(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeedInit = (*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed
 11123  		(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10 = 0
 11124  		(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12 = (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12
 11125  		*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */))) = *(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length-1))*4))
 11126  		libc.Xmemcpy(tls, psDD+832 /* &.sLPC_Q14 */, NSQ+5760 /* &.sLPC_Q14 */, (uint32(32) * uint32(unsafe.Sizeof(int32(0)))))
 11127  		libc.Xmemcpy(tls, psDD+768 /* &.sAR2_Q14 */, NSQ+6368 /* &.sAR2_Q14 */, uint32(unsafe.Sizeof([16]int32{})))
 11128  	}
 11129  
 11130  	offset_Q10 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Quantization_Offsets_Q10)) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype)*4) + uintptr((*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FQuantOffsetType)*2)))
 11131  	*(*int32)(unsafe.Pointer(bp + 12128 /* smpl_buf_idx */)) = 0 /* index of oldest samples */
 11132  
 11133  	decisionDelay = SKP_min_int(tls, 32, subfr_length)
 11134  
 11135  	/* For voiced frames limit the decision delay to lower than the pitch lag */
 11136  	if (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype == 0 {
 11137  		for k = 0; k < 4; k++ {
 11138  			decisionDelay = SKP_min_int(tls, decisionDelay, ((*(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + uintptr(k)*4)) - (5 / 2)) - 1))
 11139  		}
 11140  	} else {
 11141  		if lag > 0 {
 11142  			decisionDelay = SKP_min_int(tls, decisionDelay, ((lag - (5 / 2)) - 1))
 11143  		}
 11144  	}
 11145  
 11146  	if LSFInterpFactor_Q2 == (int32(1) << 2) {
 11147  		LSF_interpolation_flag = 0
 11148  	} else {
 11149  		LSF_interpolation_flag = 1
 11150  	}
 11151  
 11152  	/* Setup pointers to start of sub frame */
 11153  	pxq = ((NSQ /* &.xq */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*2)
 11154  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 11155  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 11156  	subfr = 0
 11157  	for k = 0; k < 4; k++ {
 11158  		A_Q12 = (PredCoef_Q12 + uintptr((((k>>1)|(1-LSF_interpolation_flag))*16))*2)
 11159  		B_Q14 = (LTPCoef_Q14 + uintptr((k*5))*2)
 11160  		AR_shp_Q13 = (AR2_Q13 + uintptr((k*16))*2)
 11161  
 11162  		/* Noise shape parameters */
 11163  
 11164  		HarmShapeFIRPacked_Q14 = ((*(*int32)(unsafe.Pointer(HarmShapeGain_Q14 + uintptr(k)*4))) >> (2))
 11165  		HarmShapeFIRPacked_Q14 = HarmShapeFIRPacked_Q14 | (((*(*int32)(unsafe.Pointer(HarmShapeGain_Q14 + uintptr(k)*4))) >> (1)) << (16))
 11166  
 11167  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag = 0
 11168  		if (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype == 0 {
 11169  			/* Voiced */
 11170  			lag = *(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + uintptr(k)*4))
 11171  
 11172  			/* Re-whitening */
 11173  			if (k & (3 - ((LSF_interpolation_flag) << (1)))) == 0 {
 11174  				if k == 2 {
 11175  					/* RESET DELAYED DECISIONS */
 11176  					/* Find winner */
 11177  					RDmin_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */)).FRD_Q10
 11178  					Winner_ind = 0
 11179  					for i = 1; i < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision; i++ {
 11180  						if (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */ +uintptr(i)*1456)).FRD_Q10 < RDmin_Q10 {
 11181  							RDmin_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */ + uintptr(i)*1456)).FRD_Q10
 11182  							Winner_ind = i
 11183  						}
 11184  					}
 11185  					for i = 0; i < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision; i++ {
 11186  						if i != Winner_ind {
 11187  							*(*int32)(unsafe.Pointer(bp /* &psDelDec */ + uintptr(i)*1456 + 1452 /* &.RD_Q10 */)) += (int32(0x7FFFFFFF) >> 4)
 11188  
 11189  						}
 11190  					}
 11191  
 11192  					/* Copy final part of signals from winner state to output and long-term filter states */
 11193  					psDD = (bp /* &psDelDec */ + uintptr(Winner_ind)*1456)
 11194  					last_smple_idx = (*(*int32)(unsafe.Pointer(bp + 12128 /* smpl_buf_idx */)) + decisionDelay)
 11195  					for i = 0; i < decisionDelay; i++ {
 11196  						last_smple_idx = ((last_smple_idx - 1) & (32 - 1))
 11197  						*(*int8)(unsafe.Pointer(q + uintptr((i - decisionDelay)))) = (int8((*(*int32)(unsafe.Pointer((psDD + 128 /* &.Q_Q10 */) + uintptr(last_smple_idx)*4))) >> (10)))
 11198  						*(*int16)(unsafe.Pointer(pxq + uintptr((i-decisionDelay))*2)) = func() int16 {
 11199  							if (func() int32 {
 11200  								if (10) == 1 {
 11201  									return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11202  										if (16) == 1 {
 11203  											return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11204  										}
 11205  										return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11206  									}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11207  										if (16) == 1 {
 11208  											return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11209  										}
 11210  										return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11211  									}()))) & 1))
 11212  								}
 11213  								return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11214  									if (16) == 1 {
 11215  										return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11216  									}
 11217  									return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11218  								}()))) >> ((10) - 1)) + 1) >> 1)
 11219  							}()) > 0x7FFF {
 11220  								return int16(0x7FFF)
 11221  							}
 11222  							return func() int16 {
 11223  								if (func() int32 {
 11224  									if (10) == 1 {
 11225  										return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11226  											if (16) == 1 {
 11227  												return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11228  											}
 11229  											return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11230  										}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11231  											if (16) == 1 {
 11232  												return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11233  											}
 11234  											return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11235  										}()))) & 1))
 11236  									}
 11237  									return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11238  										if (16) == 1 {
 11239  											return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11240  										}
 11241  										return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11242  									}()))) >> ((10) - 1)) + 1) >> 1)
 11243  								}()) < (int32(libc.Int16FromInt32(0x8000))) {
 11244  									return libc.Int16FromInt32(0x8000)
 11245  								}
 11246  								return func() int16 {
 11247  									if (10) == 1 {
 11248  										return (int16(((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11249  											if (16) == 1 {
 11250  												return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11251  											}
 11252  											return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11253  										}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11254  											if (16) == 1 {
 11255  												return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11256  											}
 11257  											return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11258  										}()))) & 1)))
 11259  									}
 11260  									return (int16((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11261  										if (16) == 1 {
 11262  											return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11263  										}
 11264  										return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11265  									}()))) >> ((10) - 1)) + 1) >> 1))
 11266  								}()
 11267  							}()
 11268  						}()
 11269  						*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-decisionDelay)+i))*4)) = *(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(last_smple_idx)*4))
 11270  					}
 11271  
 11272  					subfr = 0
 11273  				}
 11274  
 11275  				/* Rewhiten with new A coefs */
 11276  				start_idx = ((((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length - lag) - (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder) - (5 / 2))
 11277  
 11278  				libc.Xmemset(tls, bp+5824 /* &FiltState[0] */, 0, (uint32((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder) * uint32(unsafe.Sizeof(int32(0)))))
 11279  				SKP_Silk_MA_Prediction(tls, ((NSQ /* &.xq */) + uintptr((start_idx+(k*(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)))*2),
 11280  					A_Q12, bp+5824 /* &FiltState[0] */, (bp + 5888 /* &sLTP[0] */ + uintptr(start_idx)*2), ((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length - start_idx), (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder)
 11281  
 11282  				(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx = (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length
 11283  				(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag = 1
 11284  			}
 11285  		}
 11286  
 11287  		SKP_Silk_nsq_del_dec_scale_states(tls, NSQ, bp /* &psDelDec[0] */, x, bp+7808, /* &x_sc_Q10[0] */
 11288  			subfr_length, bp+5888 /* &sLTP[0] */, bp+8288 /* &sLTP_Q16[0] */, k, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision, *(*int32)(unsafe.Pointer(bp + 12128 /* smpl_buf_idx */)),
 11289  			LTP_scale_Q14, Gains_Q16, psEncCtrlC+108 /* &.pitchL */)
 11290  
 11291  		SKP_Silk_noise_shape_quantizer_del_dec(tls, NSQ, bp /* &psDelDec[0] */, (*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).Fsigtype, bp+7808 /* &x_sc_Q10[0] */, q, pxq, bp+8288, /* &sLTP_Q16[0] */
 11292  			A_Q12, B_Q14, AR_shp_Q13, lag, HarmShapeFIRPacked_Q14, *(*int32)(unsafe.Pointer(Tilt_Q14 + uintptr(k)*4)), *(*int32)(unsafe.Pointer(LF_shp_Q14 + uintptr(k)*4)), *(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(k)*4)),
 11293  			Lambda_Q10, offset_Q10, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length, libc.PostIncInt32(&subfr, 1), (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FshapingLPCOrder, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FpredictLPCOrder,
 11294  			(*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fwarping_Q16, (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision, bp+12128 /* &smpl_buf_idx */, decisionDelay)
 11295  
 11296  		x += 2 * uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)
 11297  		q += uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)
 11298  		pxq += 2 * (uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length))
 11299  	}
 11300  
 11301  	/* Find winner */
 11302  	RDmin_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */)).FRD_Q10
 11303  	Winner_ind = 0
 11304  	for k = 1; k < (*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).FnStatesDelayedDecision; k++ {
 11305  		if (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */ +uintptr(k)*1456)).FRD_Q10 < RDmin_Q10 {
 11306  			RDmin_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(bp /* &psDelDec */ + uintptr(k)*1456)).FRD_Q10
 11307  			Winner_ind = k
 11308  		}
 11309  	}
 11310  
 11311  	/* Copy final part of signals from winner state to output and long-term filter states */
 11312  	psDD = (bp /* &psDelDec */ + uintptr(Winner_ind)*1456)
 11313  	(*SKP_Silk_encoder_control)(unsafe.Pointer(psEncCtrlC)).FSeed = (*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeedInit
 11314  	last_smple_idx = (*(*int32)(unsafe.Pointer(bp + 12128 /* smpl_buf_idx */)) + decisionDelay)
 11315  	for i = 0; i < decisionDelay; i++ {
 11316  		last_smple_idx = ((last_smple_idx - 1) & (32 - 1))
 11317  		*(*int8)(unsafe.Pointer(q + uintptr((i - decisionDelay)))) = (int8((*(*int32)(unsafe.Pointer((psDD + 128 /* &.Q_Q10 */) + uintptr(last_smple_idx)*4))) >> (10)))
 11318  		*(*int16)(unsafe.Pointer(pxq + uintptr((i-decisionDelay))*2)) = func() int16 {
 11319  			if (func() int32 {
 11320  				if (10) == 1 {
 11321  					return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11322  						if (16) == 1 {
 11323  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11324  						}
 11325  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11326  					}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11327  						if (16) == 1 {
 11328  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11329  						}
 11330  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11331  					}()))) & 1))
 11332  				}
 11333  				return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11334  					if (16) == 1 {
 11335  						return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11336  					}
 11337  					return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11338  				}()))) >> ((10) - 1)) + 1) >> 1)
 11339  			}()) > 0x7FFF {
 11340  				return int16(0x7FFF)
 11341  			}
 11342  			return func() int16 {
 11343  				if (func() int32 {
 11344  					if (10) == 1 {
 11345  						return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11346  							if (16) == 1 {
 11347  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11348  							}
 11349  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11350  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11351  							if (16) == 1 {
 11352  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11353  							}
 11354  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11355  						}()))) & 1))
 11356  					}
 11357  					return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11358  						if (16) == 1 {
 11359  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11360  						}
 11361  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11362  					}()))) >> ((10) - 1)) + 1) >> 1)
 11363  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 11364  					return libc.Int16FromInt32(0x8000)
 11365  				}
 11366  				return func() int16 {
 11367  					if (10) == 1 {
 11368  						return (int16(((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11369  							if (16) == 1 {
 11370  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11371  							}
 11372  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11373  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11374  							if (16) == 1 {
 11375  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11376  							}
 11377  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11378  						}()))) & 1)))
 11379  					}
 11380  					return (int16((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11381  						if (16) == 1 {
 11382  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11383  						}
 11384  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11385  					}()))) >> ((10) - 1)) + 1) >> 1))
 11386  				}()
 11387  			}()
 11388  		}()
 11389  		*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-decisionDelay)+i))*4)) = *(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(last_smple_idx)*4))
 11390  		*(*int32)(unsafe.Pointer(bp + 8288 /* &sLTP_Q16[0] */ + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx-decisionDelay)+i))*4)) = *(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(last_smple_idx)*4))
 11391  	}
 11392  	libc.Xmemcpy(tls, NSQ+5760 /* &.sLPC_Q14 */, ((psDD + 832 /* &.sLPC_Q14 */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fsubfr_length)*4), (uint32(32) * uint32(unsafe.Sizeof(int32(0)))))
 11393  	libc.Xmemcpy(tls, NSQ+6368 /* &.sAR2_Q14 */, psDD+768 /* &.sAR2_Q14 */, uint32(unsafe.Sizeof([16]int32{})))
 11394  
 11395  	/* Update states */
 11396  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLF_AR_shp_Q12 = (*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12
 11397  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FlagPrev = *(*int32)(unsafe.Pointer((psEncCtrlC + 108 /* &.pitchL */) + 3*4))
 11398  
 11399  	/* Save quantized speech and noise shaping signals */
 11400  	libc.Xmemcpy(tls, NSQ /* &.xq */, ((NSQ /* &.xq */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*2), (uint32((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length) * uint32(unsafe.Sizeof(int16(0)))))
 11401  	libc.Xmemcpy(tls, NSQ+1920 /* &.sLTP_shp_Q10 */, ((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length)*4), (uint32((*SKP_Silk_encoder_state)(unsafe.Pointer(psEncC)).Fframe_length) * uint32(unsafe.Sizeof(int32(0)))))
 11402  
 11403  }
 11404  
 11405  /******************************************/
 11406  /* Noise shape quantizer for one subframe */
 11407  /******************************************/
 11408  func SKP_Silk_noise_shape_quantizer_del_dec(tls *libc.TLS, NSQ uintptr, psDelDec uintptr, sigtype int32, x_Q10 uintptr, q uintptr, xq uintptr, sLTP_Q16 uintptr, a_Q12 uintptr, b_Q14 uintptr, AR_shp_Q13 uintptr, lag int32, HarmShapeFIRPacked_Q14 int32, Tilt_Q14 int32, LF_shp_Q14 int32, Gain_Q16 int32, Lambda_Q10 int32, offset_Q10 int32, length int32, subfr int32, shapingLPCOrder int32, predictLPCOrder int32, warping_Q16 int32, nStatesDelayedDecision int32, smpl_buf_idx uintptr, decisionDelay int32) { /* SKP_Silk_NSQ_del_dec.c:305:17: */
 11409  	bp := tls.Alloc(192)
 11410  	defer tls.Free(192)
 11411  
 11412  	var i int32
 11413  	var j int32
 11414  	var k int32
 11415  	var Winner_ind int32
 11416  	var RDmin_ind int32
 11417  	var RDmax_ind int32
 11418  	var last_smple_idx int32
 11419  	var Winner_rand_state int32
 11420  	var LTP_pred_Q14 int32
 11421  	var LPC_pred_Q10 int32
 11422  	var n_AR_Q10 int32
 11423  	var n_LTP_Q14 int32
 11424  	var n_LF_Q10 int32
 11425  	var r_Q10 int32
 11426  	var rr_Q20 int32
 11427  	var rd1_Q10 int32
 11428  	var rd2_Q10 int32
 11429  	var RDmin_Q10 int32
 11430  	var RDmax_Q10 int32
 11431  	var q1_Q10 int32
 11432  	var q2_Q10 int32
 11433  	var dither int32
 11434  	var exc_Q10 int32
 11435  	var LPC_exc_Q10 int32
 11436  	var xq_Q10 int32
 11437  	var tmp1 int32
 11438  	var tmp2 int32
 11439  	var sLF_AR_shp_Q10 int32
 11440  	var pred_lag_ptr uintptr
 11441  	var shp_lag_ptr uintptr
 11442  	var psLPC_Q14 uintptr
 11443  	// var psSampleState [4][2]NSQ_sample_struct at bp, 192
 11444  
 11445  	var psDD uintptr
 11446  	var psSS uintptr
 11447  
 11448  	shp_lag_ptr = ((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-lag)+(3/2)))*4)
 11449  	pred_lag_ptr = (sLTP_Q16 + uintptr((((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx-lag)+(5/2)))*4)
 11450  
 11451  	for i = 0; i < length; i++ {
 11452  		/* Perform common calculations used in all states */
 11453  
 11454  		/* Long-term prediction */
 11455  		if sigtype == 0 {
 11456  			/* Unrolled loop */
 11457  			LTP_pred_Q14 = ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14))))) >> 16))
 11458  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 1*2))))) >> 16)))
 11459  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 2*2))))) >> 16)))
 11460  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 3*2))))) >> 16)))
 11461  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(b_Q14 + 4*2))))) >> 16)))
 11462  			pred_lag_ptr += 4
 11463  		} else {
 11464  			LTP_pred_Q14 = 0
 11465  		}
 11466  
 11467  		/* Long-term shaping */
 11468  		if lag > 0 {
 11469  			/* Symmetric, packed FIR coefficients */
 11470  			n_LTP_Q14 = (((((*(*int32)(unsafe.Pointer(shp_lag_ptr))) + (*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-2)*4)))) >> 16) * (int32(int16(HarmShapeFIRPacked_Q14)))) + (((((*(*int32)(unsafe.Pointer(shp_lag_ptr))) + (*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-2)*4)))) & 0x0000FFFF) * (int32(int16(HarmShapeFIRPacked_Q14)))) >> 16))
 11471  			n_LTP_Q14 = (((n_LTP_Q14) + (((*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * ((HarmShapeFIRPacked_Q14) >> 16))) + ((((*(*int32)(unsafe.Pointer(shp_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * ((HarmShapeFIRPacked_Q14) >> 16)) >> 16))
 11472  			n_LTP_Q14 = ((n_LTP_Q14) << (6))
 11473  			shp_lag_ptr += 4
 11474  		} else {
 11475  			n_LTP_Q14 = 0
 11476  		}
 11477  
 11478  		for k = 0; k < nStatesDelayedDecision; k++ {
 11479  			/* Delayed decision state */
 11480  			psDD = (psDelDec + uintptr(k)*1456)
 11481  
 11482  			/* Sample state */
 11483  			psSS = (bp /* &psSampleState[0] */ + uintptr(k)*48)
 11484  
 11485  			/* Generate dither */
 11486  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed = (int32((uint32(907633515)) + ((uint32((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed)) * (uint32(196314165)))))
 11487  
 11488  			/* dither = rand_seed < 0 ? 0xFFFFFFFF : 0; */
 11489  			dither = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed) >> (31))
 11490  
 11491  			/* Pointer used in short term prediction and shaping */
 11492  			psLPC_Q14 = ((psDD + 832 /* &.sLPC_Q14 */) + uintptr(((32-1)+i))*4)
 11493  			/* Short-term prediction */
 11494  			/* check that unrolling works */
 11495  			/* check that order is even */
 11496  			/* check that array starts at 4-byte aligned address */
 11497  			/* Partially unrolled */
 11498  			LPC_pred_Q10 = ((((*(*int32)(unsafe.Pointer(psLPC_Q14))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12))))) >> 16))
 11499  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 1*2))))) >> 16)))
 11500  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 2*2))))) >> 16)))
 11501  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 3*2))))) >> 16)))
 11502  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 4*2))))) >> 16)))
 11503  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-5)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 5*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-5)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 5*2))))) >> 16)))
 11504  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-6)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 6*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-6)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 6*2))))) >> 16)))
 11505  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-7)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 7*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-7)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 7*2))))) >> 16)))
 11506  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-8)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 8*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-8)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 8*2))))) >> 16)))
 11507  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-9)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 9*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + libc.UintptrFromInt32(-9)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + 9*2))))) >> 16)))
 11508  			for j = 10; j < predictLPCOrder; j++ {
 11509  				LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + uintptr(-j)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + uintptr(j)*2))))) + ((((*(*int32)(unsafe.Pointer(psLPC_Q14 + uintptr(-j)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(a_Q12 + uintptr(j)*2))))) >> 16)))
 11510  			}
 11511  
 11512  			/* Noise shape feedback */
 11513  			/* check that order is even */
 11514  			/* Output of lowpass section */
 11515  			tmp2 = ((*(*int32)(unsafe.Pointer(psLPC_Q14))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */)))) >> 16) * (int32(int16(warping_Q16)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */)))) & 0x0000FFFF) * (int32(int16(warping_Q16)))) >> 16)))
 11516  			/* Output of allpass section */
 11517  			tmp1 = ((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + 1*4)) - tmp2) >> 16) * (int32(int16(warping_Q16)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + 1*4)) - tmp2) & 0x0000FFFF) * (int32(int16(warping_Q16)))) >> 16)))
 11518  			*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */))) = tmp2
 11519  			n_AR_Q10 = ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13))))) >> 16))
 11520  			/* Loop over allpass sections */
 11521  			for j = 2; j < shapingLPCOrder; j = j + (2) {
 11522  				/* Output of allpass section */
 11523  				tmp2 = ((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j-1))*4))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+0))*4)) - tmp1) >> 16) * (int32(int16(warping_Q16)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+0))*4)) - tmp1) & 0x0000FFFF) * (int32(int16(warping_Q16)))) >> 16)))
 11524  				*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j-1))*4)) = tmp1
 11525  				n_AR_Q10 = ((n_AR_Q10) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((j-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((j-1))*2))))) >> 16)))
 11526  				/* Output of allpass section */
 11527  				tmp1 = ((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+0))*4))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+1))*4)) - tmp2) >> 16) * (int32(int16(warping_Q16)))) + ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+1))*4)) - tmp2) & 0x0000FFFF) * (int32(int16(warping_Q16)))) >> 16)))
 11528  				*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((j+0))*4)) = tmp2
 11529  				n_AR_Q10 = ((n_AR_Q10) + ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr(j)*2))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr(j)*2))))) >> 16)))
 11530  			}
 11531  			*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr((shapingLPCOrder-1))*4)) = tmp1
 11532  			n_AR_Q10 = ((n_AR_Q10) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((shapingLPCOrder-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(AR_shp_Q13 + uintptr((shapingLPCOrder-1))*2))))) >> 16)))
 11533  
 11534  			n_AR_Q10 = ((n_AR_Q10) >> (1)) /* Q11 -> Q10 */
 11535  			n_AR_Q10 = ((n_AR_Q10) + (((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) >> 16) * (int32(int16(Tilt_Q14)))) + (((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) & 0x0000FFFF) * (int32(int16(Tilt_Q14)))) >> 16)))
 11536  
 11537  			n_LF_Q10 = (((((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4))) >> 16) * (int32(int16(LF_shp_Q14)))) + ((((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4))) & 0x0000FFFF) * (int32(int16(LF_shp_Q14)))) >> 16)) << (2))
 11538  			n_LF_Q10 = (((n_LF_Q10) + ((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) >> 16) * ((LF_shp_Q14) >> 16))) + (((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) & 0x0000FFFF) * ((LF_shp_Q14) >> 16)) >> 16))
 11539  
 11540  			/* Input minus prediction plus noise feedback                       */
 11541  			/* r = x[ i ] - LTP_pred - LPC_pred + n_AR + n_Tilt + n_LF + n_LTP  */
 11542  			tmp1 = ((LTP_pred_Q14) - (n_LTP_Q14))                                /* Add Q14 stuff */
 11543  			tmp1 = ((tmp1) >> (4))                                               /* convert to Q10 */
 11544  			tmp1 = ((tmp1) + (LPC_pred_Q10))                                     /* add Q10 stuff */
 11545  			tmp1 = ((tmp1) - (n_AR_Q10))                                         /* subtract Q10 stuff */
 11546  			tmp1 = ((tmp1) - (n_LF_Q10))                                         /* subtract Q10 stuff */
 11547  			r_Q10 = ((*(*int32)(unsafe.Pointer(x_Q10 + uintptr(i)*4))) - (tmp1)) /* residual error Q10 */
 11548  
 11549  			/* Flip sign depending on dither */
 11550  			r_Q10 = ((r_Q10 ^ dither) - dither)
 11551  			r_Q10 = ((r_Q10) - (offset_Q10))
 11552  			r_Q10 = func() int32 {
 11553  				if (int32(-64) << 10) > (int32(64) << 10) {
 11554  					return func() int32 {
 11555  						if (r_Q10) > (int32(-64) << 10) {
 11556  							return (int32(-64) << 10)
 11557  						}
 11558  						return func() int32 {
 11559  							if (r_Q10) < (int32(64) << 10) {
 11560  								return (int32(64) << 10)
 11561  							}
 11562  							return r_Q10
 11563  						}()
 11564  					}()
 11565  				}
 11566  				return func() int32 {
 11567  					if (r_Q10) > (int32(64) << 10) {
 11568  						return (int32(64) << 10)
 11569  					}
 11570  					return func() int32 {
 11571  						if (r_Q10) < (int32(-64) << 10) {
 11572  							return (int32(-64) << 10)
 11573  						}
 11574  						return r_Q10
 11575  					}()
 11576  				}()
 11577  			}()
 11578  
 11579  			/* Find two quantization level candidates and measure their rate-distortion */
 11580  			if r_Q10 < -1536 {
 11581  				q1_Q10 = ((func() int32 {
 11582  					if (10) == 1 {
 11583  						return (((r_Q10) >> 1) + ((r_Q10) & 1))
 11584  					}
 11585  					return ((((r_Q10) >> ((10) - 1)) + 1) >> 1)
 11586  				}()) << (10))
 11587  				r_Q10 = ((r_Q10) - (q1_Q10))
 11588  				rd1_Q10 = ((((-((q1_Q10) + (offset_Q10))) * (Lambda_Q10)) + ((int32(int16(r_Q10))) * (int32(int16(r_Q10))))) >> (10))
 11589  				rd2_Q10 = ((rd1_Q10) + (1024))
 11590  				rd2_Q10 = ((rd2_Q10) - ((Lambda_Q10) + ((r_Q10) << (1))))
 11591  				q2_Q10 = ((q1_Q10) + (1024))
 11592  			} else if r_Q10 > 512 {
 11593  				q1_Q10 = ((func() int32 {
 11594  					if (10) == 1 {
 11595  						return (((r_Q10) >> 1) + ((r_Q10) & 1))
 11596  					}
 11597  					return ((((r_Q10) >> ((10) - 1)) + 1) >> 1)
 11598  				}()) << (10))
 11599  				r_Q10 = ((r_Q10) - (q1_Q10))
 11600  				rd1_Q10 = (((((q1_Q10) + (offset_Q10)) * (Lambda_Q10)) + ((int32(int16(r_Q10))) * (int32(int16(r_Q10))))) >> (10))
 11601  				rd2_Q10 = ((rd1_Q10) + (1024))
 11602  				rd2_Q10 = ((rd2_Q10) - ((Lambda_Q10) - ((r_Q10) << (1))))
 11603  				q2_Q10 = ((q1_Q10) - (1024))
 11604  			} else { /* r_Q10 >= -1536 && q1_Q10 <= 512 */
 11605  				rr_Q20 = ((int32(int16(offset_Q10))) * (int32(int16(Lambda_Q10))))
 11606  				rd2_Q10 = (((rr_Q20) + ((int32(int16(r_Q10))) * (int32(int16(r_Q10))))) >> (10))
 11607  				rd1_Q10 = ((rd2_Q10) + (1024))
 11608  				rd1_Q10 = ((rd1_Q10) + (((Lambda_Q10) + ((r_Q10) << (1))) - ((rr_Q20) >> (9))))
 11609  				q1_Q10 = -1024
 11610  				q2_Q10 = 0
 11611  			}
 11612  
 11613  			if rd1_Q10 < rd2_Q10 {
 11614  				(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FRD_Q10 = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10) + (rd1_Q10))
 11615  				(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FRD_Q10 = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10) + (rd2_Q10))
 11616  				(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10 = q1_Q10
 11617  				(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FQ_Q10 = q2_Q10
 11618  			} else {
 11619  				(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FRD_Q10 = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10) + (rd2_Q10))
 11620  				(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FRD_Q10 = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10) + (rd1_Q10))
 11621  				(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10 = q2_Q10
 11622  				(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FQ_Q10 = q1_Q10
 11623  			}
 11624  
 11625  			/* Update states for best quantization */
 11626  
 11627  			/* Quantized excitation */
 11628  			exc_Q10 = ((offset_Q10) + ((*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10))
 11629  			exc_Q10 = ((exc_Q10 ^ dither) - dither)
 11630  
 11631  			/* Add predictions */
 11632  			LPC_exc_Q10 = (exc_Q10 + (func() int32 {
 11633  				if (4) == 1 {
 11634  					return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
 11635  				}
 11636  				return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
 11637  			}()))
 11638  			xq_Q10 = ((LPC_exc_Q10) + (LPC_pred_Q10))
 11639  
 11640  			/* Update states */
 11641  			sLF_AR_shp_Q10 = ((xq_Q10) - (n_AR_Q10))
 11642  			(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FsLTP_shp_Q10 = ((sLF_AR_shp_Q10) - (n_LF_Q10))
 11643  			(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FLF_AR_Q12 = ((sLF_AR_shp_Q10) << (2))
 11644  			(*NSQ_sample_struct)(unsafe.Pointer(psSS)).Fxq_Q14 = ((xq_Q10) << (4))
 11645  			(*NSQ_sample_struct)(unsafe.Pointer(psSS)).FLPC_exc_Q16 = ((LPC_exc_Q10) << (6))
 11646  
 11647  			/* Update states for second best quantization */
 11648  
 11649  			/* Quantized excitation */
 11650  			exc_Q10 = ((offset_Q10) + ((*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FQ_Q10))
 11651  			exc_Q10 = ((exc_Q10 ^ dither) - dither)
 11652  
 11653  			/* Add predictions */
 11654  			LPC_exc_Q10 = (exc_Q10 + (func() int32 {
 11655  				if (4) == 1 {
 11656  					return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
 11657  				}
 11658  				return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
 11659  			}()))
 11660  			xq_Q10 = ((LPC_exc_Q10) + (LPC_pred_Q10))
 11661  
 11662  			/* Update states */
 11663  			sLF_AR_shp_Q10 = ((xq_Q10) - (n_AR_Q10))
 11664  			(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FsLTP_shp_Q10 = ((sLF_AR_shp_Q10) - (n_LF_Q10))
 11665  			(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FLF_AR_Q12 = ((sLF_AR_shp_Q10) << (2))
 11666  			(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).Fxq_Q14 = ((xq_Q10) << (4))
 11667  			(*NSQ_sample_struct)(unsafe.Pointer(psSS + 1*24)).FLPC_exc_Q16 = ((LPC_exc_Q10) << (6))
 11668  		}
 11669  
 11670  		*(*int32)(unsafe.Pointer(smpl_buf_idx)) = ((*(*int32)(unsafe.Pointer(smpl_buf_idx)) - 1) & (32 - 1)) /* Index to newest samples              */
 11671  		last_smple_idx = ((*(*int32)(unsafe.Pointer(smpl_buf_idx)) + decisionDelay) & (32 - 1))              /* Index to decisionDelay old samples   */
 11672  
 11673  		/* Find winner */
 11674  		RDmin_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */))).FRD_Q10
 11675  		Winner_ind = 0
 11676  		for k = 1; k < nStatesDelayedDecision; k++ {
 11677  			if (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10 < RDmin_Q10 {
 11678  				RDmin_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10
 11679  				Winner_ind = k
 11680  			}
 11681  		}
 11682  
 11683  		/* Increase RD values of expired states */
 11684  		Winner_rand_state = *(*int32)(unsafe.Pointer((psDelDec + uintptr(Winner_ind)*1456 /* &.RandState */) + uintptr(last_smple_idx)*4))
 11685  		for k = 0; k < nStatesDelayedDecision; k++ {
 11686  			if *(*int32)(unsafe.Pointer((psDelDec + uintptr(k)*1456 /* &.RandState */) + uintptr(last_smple_idx)*4)) != Winner_rand_state {
 11687  				(*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10 = (((*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10) + (int32(0x7FFFFFFF) >> 4))
 11688  				(*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48) + 1*24)).FRD_Q10 = (((*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48) + 1*24)).FRD_Q10) + (int32(0x7FFFFFFF) >> 4))
 11689  
 11690  			}
 11691  		}
 11692  
 11693  		/* Find worst in first set and best in second set */
 11694  		RDmax_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */))).FRD_Q10
 11695  		RDmin_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */) + 1*24)).FRD_Q10
 11696  		RDmax_ind = 0
 11697  		RDmin_ind = 0
 11698  		for k = 1; k < nStatesDelayedDecision; k++ {
 11699  			/* find worst in first set */
 11700  			if (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10 > RDmax_Q10 {
 11701  				RDmax_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48))).FRD_Q10
 11702  				RDmax_ind = k
 11703  			}
 11704  			/* find best in second set */
 11705  			if (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ +uintptr(k)*48)+1*24)).FRD_Q10 < RDmin_Q10 {
 11706  				RDmin_Q10 = (*NSQ_sample_struct)(unsafe.Pointer((bp /* &psSampleState */ + uintptr(k)*48) + 1*24)).FRD_Q10
 11707  				RDmin_ind = k
 11708  			}
 11709  		}
 11710  
 11711  		/* Replace a state if best from second set outperforms worst in first set */
 11712  		if RDmin_Q10 < RDmax_Q10 {
 11713  			SKP_Silk_copy_del_dec_state(tls, (psDelDec + uintptr(RDmax_ind)*1456), (psDelDec + uintptr(RDmin_ind)*1456), i)
 11714  			libc.Xmemcpy(tls, (bp /* &psSampleState */ + uintptr(RDmax_ind)*48), ((bp /* &psSampleState */ + uintptr(RDmin_ind)*48) + 1*24), uint32(unsafe.Sizeof(NSQ_sample_struct{})))
 11715  		}
 11716  
 11717  		/* Write samples from winner to output and long-term filter states */
 11718  		psDD = (psDelDec + uintptr(Winner_ind)*1456)
 11719  		if (subfr > 0) || (i >= decisionDelay) {
 11720  			*(*int8)(unsafe.Pointer(q + uintptr((i - decisionDelay)))) = (int8((*(*int32)(unsafe.Pointer((psDD + 128 /* &.Q_Q10 */) + uintptr(last_smple_idx)*4))) >> (10)))
 11721  			*(*int16)(unsafe.Pointer(xq + uintptr((i-decisionDelay))*2)) = func() int16 {
 11722  				if (func() int32 {
 11723  					if (10) == 1 {
 11724  						return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11725  							if (16) == 1 {
 11726  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11727  							}
 11728  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11729  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11730  							if (16) == 1 {
 11731  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11732  							}
 11733  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11734  						}()))) & 1))
 11735  					}
 11736  					return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11737  						if (16) == 1 {
 11738  							return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11739  						}
 11740  						return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11741  					}()))) >> ((10) - 1)) + 1) >> 1)
 11742  				}()) > 0x7FFF {
 11743  					return int16(0x7FFF)
 11744  				}
 11745  				return func() int16 {
 11746  					if (func() int32 {
 11747  						if (10) == 1 {
 11748  							return (((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11749  								if (16) == 1 {
 11750  									return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11751  								}
 11752  								return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11753  							}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11754  								if (16) == 1 {
 11755  									return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11756  								}
 11757  								return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11758  							}()))) & 1))
 11759  						}
 11760  						return ((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11761  							if (16) == 1 {
 11762  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11763  							}
 11764  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11765  						}()))) >> ((10) - 1)) + 1) >> 1)
 11766  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 11767  						return libc.Int16FromInt32(0x8000)
 11768  					}
 11769  					return func() int16 {
 11770  						if (10) == 1 {
 11771  							return (int16(((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11772  								if (16) == 1 {
 11773  									return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11774  								}
 11775  								return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11776  							}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11777  								if (16) == 1 {
 11778  									return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11779  								}
 11780  								return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11781  							}()))) & 1)))
 11782  						}
 11783  						return (int16((((((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(last_smple_idx)*4))) * (func() int32 {
 11784  							if (16) == 1 {
 11785  								return (((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) & 1))
 11786  							}
 11787  							return ((((*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(last_smple_idx)*4))) >> ((16) - 1)) + 1) >> 1)
 11788  						}()))) >> ((10) - 1)) + 1) >> 1))
 11789  					}()
 11790  				}()
 11791  			}()
 11792  			*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx-decisionDelay))*4)) = *(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(last_smple_idx)*4))
 11793  			*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx-decisionDelay))*4)) = *(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(last_smple_idx)*4))
 11794  		}
 11795  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx++
 11796  		(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx++
 11797  
 11798  		/* Update states */
 11799  		for k = 0; k < nStatesDelayedDecision; k++ {
 11800  			psDD = (psDelDec + uintptr(k)*1456)
 11801  			psSS = (bp /* &psSampleState */ + uintptr(k)*48)
 11802  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12 = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FLF_AR_Q12
 11803  			*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr((32+i))*4)) = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).Fxq_Q14
 11804  			*(*int32)(unsafe.Pointer((psDD + 256 /* &.Xq_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (((*NSQ_sample_struct)(unsafe.Pointer(psSS)).Fxq_Q14) >> (4))
 11805  			*(*int32)(unsafe.Pointer((psDD + 128 /* &.Q_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10
 11806  			*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FLPC_exc_Q16
 11807  			*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FsLTP_shp_Q10
 11808  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed = (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed) + (((*NSQ_sample_struct)(unsafe.Pointer(psSS)).FQ_Q10) >> (10)))
 11809  			*(*int32)(unsafe.Pointer((psDD /* &.RandState */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = (*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FSeed
 11810  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FRD_Q10 = (*NSQ_sample_struct)(unsafe.Pointer(psSS)).FRD_Q10
 11811  			*(*int32)(unsafe.Pointer((psDD + 640 /* &.Gain_Q16 */) + uintptr(*(*int32)(unsafe.Pointer(smpl_buf_idx)))*4)) = Gain_Q16
 11812  		}
 11813  	}
 11814  	/* Update LPC states */
 11815  	for k = 0; k < nStatesDelayedDecision; k++ {
 11816  		psDD = (psDelDec + uintptr(k)*1456)
 11817  		libc.Xmemcpy(tls, psDD+832 /* &.sLPC_Q14 */, ((psDD + 832 /* &.sLPC_Q14 */) + uintptr(length)*4), (uint32(32) * uint32(unsafe.Sizeof(int32(0)))))
 11818  	}
 11819  }
 11820  
 11821  func SKP_Silk_nsq_del_dec_scale_states(tls *libc.TLS, NSQ uintptr, psDelDec uintptr, x uintptr, x_sc_Q10 uintptr, subfr_length int32, sLTP uintptr, sLTP_Q16 uintptr, subfr int32, nStatesDelayedDecision int32, smpl_buf_idx int32, LTP_scale_Q14 int32, Gains_Q16 uintptr, pitchL uintptr) { /* SKP_Silk_NSQ_del_dec.c:603:17: */
 11822  	var i int32
 11823  	var k int32
 11824  	var lag int32
 11825  	var inv_gain_Q16 int32
 11826  	var gain_adj_Q16 int32
 11827  	var inv_gain_Q32 int32
 11828  	var psDD uintptr
 11829  
 11830  	inv_gain_Q16 = SKP_INVERSE32_varQ(tls, func() int32 {
 11831  		if (*(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(subfr)*4))) > (1) {
 11832  			return *(*int32)(unsafe.Pointer(Gains_Q16 + uintptr(subfr)*4))
 11833  		}
 11834  		return 1
 11835  	}(), 32)
 11836  	inv_gain_Q16 = func() int32 {
 11837  		if (inv_gain_Q16) < (0x7FFF) {
 11838  			return inv_gain_Q16
 11839  		}
 11840  		return 0x7FFF
 11841  	}()
 11842  	lag = *(*int32)(unsafe.Pointer(pitchL + uintptr(subfr)*4))
 11843  
 11844  	/* After rewhitening the LTP state is un-scaled, so scale with inv_gain_Q16 */
 11845  	if (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag != 0 {
 11846  		inv_gain_Q32 = ((inv_gain_Q16) << (16))
 11847  		if subfr == 0 {
 11848  			/* Do LTP downscaling */
 11849  			inv_gain_Q32 = (((((inv_gain_Q32) >> 16) * (int32(int16(LTP_scale_Q14)))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(int16(LTP_scale_Q14)))) >> 16)) << (2))
 11850  		}
 11851  		for i = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx - lag) - (5 / 2)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx; i++ {
 11852  
 11853  			*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)) = ((((inv_gain_Q32) >> 16) * (int32(*(*int16)(unsafe.Pointer(sLTP + uintptr(i)*2))))) + ((((inv_gain_Q32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(sLTP + uintptr(i)*2))))) >> 16))
 11854  		}
 11855  	}
 11856  
 11857  	/* Adjust for changing gain */
 11858  	if inv_gain_Q16 != (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16 {
 11859  		gain_adj_Q16 = SKP_DIV32_varQ(tls, inv_gain_Q16, (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16, 16)
 11860  
 11861  		/* Scale long-term shaping state */
 11862  		for i = ((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx - (subfr_length * 4)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_shp_buf_idx; i++ {
 11863  			*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11864  				if (16) == 1 {
 11865  					return (((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) & 1))
 11866  				}
 11867  				return ((((*(*int32)(unsafe.Pointer((NSQ + 1920 /* &.sLTP_shp_Q10 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11868  			}())))
 11869  		}
 11870  
 11871  		/* Scale long-term prediction state */
 11872  		if (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Frewhite_flag == 0 {
 11873  			for i = (((*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx - lag) - (5 / 2)); i < (*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).FsLTP_buf_idx; i++ {
 11874  				*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11875  					if (16) == 1 {
 11876  						return (((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) & 1))
 11877  					}
 11878  					return ((((*(*int32)(unsafe.Pointer(sLTP_Q16 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11879  				}())))
 11880  			}
 11881  		}
 11882  
 11883  		for k = 0; k < nStatesDelayedDecision; k++ {
 11884  			psDD = (psDelDec + uintptr(k)*1456)
 11885  
 11886  			/* Scale scalar states */
 11887  			(*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12 = (((((gain_adj_Q16) >> 16) * (int32(int16((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12)))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12)))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11888  				if (16) == 1 {
 11889  					return ((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) >> 1) + (((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) & 1))
 11890  				}
 11891  				return (((((*NSQ_del_dec_struct)(unsafe.Pointer(psDD)).FLF_AR_Q12) >> ((16) - 1)) + 1) >> 1)
 11892  			}())))
 11893  
 11894  			/* Scale short-term prediction and shaping states */
 11895  			for i = 0; i < 32; i++ {
 11896  				*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11897  					if (16) == 1 {
 11898  						return (((*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4))) & 1))
 11899  					}
 11900  					return ((((*(*int32)(unsafe.Pointer((psDD + 832 /* &.sLPC_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11901  				}())))
 11902  			}
 11903  			for i = 0; i < 16; i++ {
 11904  				*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11905  					if (16) == 1 {
 11906  						return (((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4))) & 1))
 11907  					}
 11908  					return ((((*(*int32)(unsafe.Pointer((psDD + 768 /* &.sAR2_Q14 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11909  				}())))
 11910  			}
 11911  			for i = 0; i < 32; i++ {
 11912  				*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11913  					if (16) == 1 {
 11914  						return (((*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4))) & 1))
 11915  					}
 11916  					return ((((*(*int32)(unsafe.Pointer((psDD + 384 /* &.Pred_Q16 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11917  				}())))
 11918  				*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4)) = (((((gain_adj_Q16) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4)))))) + ((((gain_adj_Q16) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4)))))) >> 16)) + ((gain_adj_Q16) * (func() int32 {
 11919  					if (16) == 1 {
 11920  						return (((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4))) & 1))
 11921  					}
 11922  					return ((((*(*int32)(unsafe.Pointer((psDD + 512 /* &.Shape_Q10 */) + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 11923  				}())))
 11924  			}
 11925  		}
 11926  	}
 11927  
 11928  	/* Scale input */
 11929  	for i = 0; i < subfr_length; i++ {
 11930  		*(*int32)(unsafe.Pointer(x_sc_Q10 + uintptr(i)*4)) = (((int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))) * (int32(int16(inv_gain_Q16)))) >> (6))
 11931  	}
 11932  
 11933  	/* save inv_gain */
 11934  
 11935  	(*SKP_Silk_nsq_state)(unsafe.Pointer(NSQ)).Fprev_inv_gain_Q16 = inv_gain_Q16
 11936  }
 11937  
 11938  func SKP_Silk_copy_del_dec_state(tls *libc.TLS, DD_dst uintptr, DD_src uintptr, LPC_state_idx int32) { /* SKP_Silk_NSQ_del_dec.c:686:17: */
 11939  	libc.Xmemcpy(tls, DD_dst /* &.RandState */, DD_src /* &.RandState */, uint32(unsafe.Sizeof([32]int32{})))
 11940  	libc.Xmemcpy(tls, DD_dst+128 /* &.Q_Q10 */, DD_src+128 /* &.Q_Q10 */, uint32(unsafe.Sizeof([32]int32{})))
 11941  	libc.Xmemcpy(tls, DD_dst+384 /* &.Pred_Q16 */, DD_src+384 /* &.Pred_Q16 */, uint32(unsafe.Sizeof([32]int32{})))
 11942  	libc.Xmemcpy(tls, DD_dst+512 /* &.Shape_Q10 */, DD_src+512 /* &.Shape_Q10 */, uint32(unsafe.Sizeof([32]int32{})))
 11943  	libc.Xmemcpy(tls, DD_dst+256 /* &.Xq_Q10 */, DD_src+256 /* &.Xq_Q10 */, uint32(unsafe.Sizeof([32]int32{})))
 11944  	libc.Xmemcpy(tls, DD_dst+768 /* &.sAR2_Q14 */, DD_src+768 /* &.sAR2_Q14 */, uint32(unsafe.Sizeof([16]int32{})))
 11945  	libc.Xmemcpy(tls, ((DD_dst + 832 /* &.sLPC_Q14 */) + uintptr(LPC_state_idx)*4), ((DD_src + 832 /* &.sLPC_Q14 */) + uintptr(LPC_state_idx)*4), (uint32(32) * uint32(unsafe.Sizeof(int32(0)))))
 11946  	(*NSQ_del_dec_struct)(unsafe.Pointer(DD_dst)).FLF_AR_Q12 = (*NSQ_del_dec_struct)(unsafe.Pointer(DD_src)).FLF_AR_Q12
 11947  	(*NSQ_del_dec_struct)(unsafe.Pointer(DD_dst)).FSeed = (*NSQ_del_dec_struct)(unsafe.Pointer(DD_src)).FSeed
 11948  	(*NSQ_del_dec_struct)(unsafe.Pointer(DD_dst)).FSeedInit = (*NSQ_del_dec_struct)(unsafe.Pointer(DD_src)).FSeedInit
 11949  	(*NSQ_del_dec_struct)(unsafe.Pointer(DD_dst)).FRD_Q10 = (*NSQ_del_dec_struct)(unsafe.Pointer(DD_src)).FRD_Q10
 11950  }
 11951  
 11952  /*************************************************************/
 11953  /*      FIXED POINT CORE PITCH ANALYSIS FUNCTION             */
 11954  /*************************************************************/
 11955  func SKP_Silk_pitch_analysis_core(tls *libc.TLS, signal uintptr, pitch_out uintptr, lagIndex uintptr, contourIndex uintptr, LTPCorr_Q15 uintptr, prevLag int32, search_thres1_Q16 int32, search_thres2_Q15 int32, Fs_kHz int32, complexity int32, forLJC int32) int32 { /* SKP_Silk_pitch_analysis_core.c:65:9: */
 11956  	bp := tls.Alloc(20836)
 11957  	defer tls.Free(20836)
 11958  
 11959  	// var signal_8kHz [480]int16 at bp+1796, 960
 11960  
 11961  	// var signal_4kHz [240]int16 at bp+2812, 480
 11962  
 11963  	// var scratch_mem [2880]int32 at bp+3876, 11520
 11964  
 11965  	var input_signal_ptr uintptr
 11966  	// var filt_state [7]int32 at bp+1768, 28
 11967  
 11968  	var i int32
 11969  	var k int32
 11970  	var d int32
 11971  	var j int32
 11972  	// var C [4][221]int16 at bp, 1768
 11973  
 11974  	var target_ptr uintptr
 11975  	var basis_ptr uintptr
 11976  	var cross_corr int32
 11977  	var normalizer int32
 11978  	var energy int32
 11979  	var shift int32
 11980  	var energy_basis int32
 11981  	var energy_target int32
 11982  	// var d_srch [24]int32 at bp+3292, 96
 11983  
 11984  	// var d_comp [221]int16 at bp+3388, 442
 11985  
 11986  	var Cmax int32
 11987  	var length_d_srch int32
 11988  	var length_d_comp int32
 11989  	var sum int32
 11990  	var threshold int32
 11991  	var temp32 int32
 11992  	var CBimax int32
 11993  	var CBimax_new int32
 11994  	var CBimax_old int32
 11995  	var lag int32
 11996  	var start_lag int32
 11997  	var end_lag int32
 11998  	var lag_new int32
 11999  	// var CC [11]int32 at bp+3832, 44
 12000  
 12001  	var CCmax int32
 12002  	var CCmax_b int32
 12003  	var CCmax_new_b int32
 12004  	var CCmax_new int32
 12005  	// var energies_st3 [4][34][5]int32 at bp+18116, 2720
 12006  
 12007  	// var crosscorr_st3 [4][34][5]int32 at bp+15396, 2720
 12008  
 12009  	var lag_counter int32
 12010  	var frame_length int32
 12011  	var frame_length_8kHz int32
 12012  	var frame_length_4kHz int32
 12013  	var max_sum_sq_length int32
 12014  	var sf_length int32
 12015  	var sf_length_8kHz int32
 12016  	var min_lag int32
 12017  	var min_lag_8kHz int32
 12018  	var min_lag_4kHz int32
 12019  	var max_lag int32
 12020  	var max_lag_8kHz int32
 12021  	var max_lag_4kHz int32
 12022  	var contour_bias int32
 12023  	var diff int32
 12024  	var lz int32
 12025  	var lshift int32
 12026  	var cbk_offset int32
 12027  	var cbk_size int32
 12028  	var nb_cbks_stage2 int32
 12029  	var delta_lag_log2_sqr_Q7 int32
 12030  	var lag_log2_Q7 int32
 12031  	var prevLag_log2_Q7 int32
 12032  	var prev_lag_bias_Q15 int32
 12033  	var corr_thres_Q15 int32
 12034  
 12035  	/* Check for valid sampling frequency */
 12036  
 12037  	/* Check for valid complexity setting */
 12038  
 12039  	/* Setup frame lengths max / min lag for the sampling frequency */
 12040  	frame_length = (40 * Fs_kHz)
 12041  	frame_length_4kHz = (40 * 4)
 12042  	frame_length_8kHz = (40 * 8)
 12043  	sf_length = ((frame_length) >> (3))
 12044  	sf_length_8kHz = ((frame_length_8kHz) >> (3))
 12045  	min_lag = (2 * Fs_kHz)
 12046  	min_lag_4kHz = (2 * 4)
 12047  	min_lag_8kHz = (2 * 8)
 12048  	max_lag = (18 * Fs_kHz)
 12049  	max_lag_4kHz = (18 * 4)
 12050  	max_lag_8kHz = (18 * 8)
 12051  
 12052  	libc.Xmemset(tls, bp /* &C[0] */, 0, ((uint32(unsafe.Sizeof(int16(0))) * uint32(4)) * (uint32((int32((18 * 24)) >> 1) + 5))))
 12053  
 12054  	/* Resample from input sampled at Fs_kHz to 8 kHz */
 12055  	if Fs_kHz == 16 {
 12056  		libc.Xmemset(tls, bp+1768 /* &filt_state[0] */, 0, (uint32(2) * uint32(unsafe.Sizeof(int32(0)))))
 12057  		SKP_Silk_resampler_down2(tls, bp+1768 /* &filt_state[0] */, bp+1796 /* &signal_8kHz[0] */, signal, frame_length)
 12058  	} else if Fs_kHz == 12 {
 12059  		// var R23 [6]int32 at bp+2756, 24
 12060  
 12061  		libc.Xmemset(tls, bp+2756 /* &R23[0] */, 0, (uint32(6) * uint32(unsafe.Sizeof(int32(0)))))
 12062  		SKP_Silk_resampler_down2_3(tls, bp+2756 /* &R23[0] */, bp+1796 /* &signal_8kHz[0] */, signal, (40 * 12))
 12063  	} else if Fs_kHz == 24 {
 12064  		// var filt_state_fix [8]int32 at bp+2780, 32
 12065  
 12066  		libc.Xmemset(tls, bp+2780 /* &filt_state_fix[0] */, 0, (uint32(8) * uint32(unsafe.Sizeof(int32(0)))))
 12067  		SKP_Silk_resampler_down3(tls, bp+2780 /* &filt_state_fix[0] */, bp+1796 /* &signal_8kHz[0] */, signal, (24 * 40))
 12068  	} else {
 12069  
 12070  		libc.Xmemcpy(tls, bp+1796 /* &signal_8kHz[0] */, signal, (uint32(frame_length_8kHz) * uint32(unsafe.Sizeof(int16(0)))))
 12071  	}
 12072  	/* Decimate again to 4 kHz */
 12073  	libc.Xmemset(tls, bp+1768 /* &filt_state[0] */, 0, (uint32(2) * uint32(unsafe.Sizeof(int32(0))))) /* Set state to zero */
 12074  	SKP_Silk_resampler_down2(tls, bp+1768 /* &filt_state[0] */, bp+2812 /* &signal_4kHz[0] */, bp+1796 /* &signal_8kHz[0] */, frame_length_8kHz)
 12075  
 12076  	/* Low-pass filter */
 12077  	for i = (frame_length_4kHz - 1); i > 0; i-- {
 12078  		*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)) = func() int16 {
 12079  			if ((int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)))) + (int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr((i-1))*2))))) > 0x7FFF {
 12080  				return int16(0x7FFF)
 12081  			}
 12082  			return func() int16 {
 12083  				if ((int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)))) + (int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr((i-1))*2))))) < (int32(libc.Int16FromInt32(0x8000))) {
 12084  					return libc.Int16FromInt32(0x8000)
 12085  				}
 12086  				return (int16((int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)))) + (int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr((i-1))*2))))))
 12087  			}()
 12088  		}()
 12089  	}
 12090  
 12091  	/*******************************************************************************
 12092  	 ** Scale 4 kHz signal down to prevent correlations measures from overflowing
 12093  	 ** find scaling as max scaling for each 8kHz(?) subframe
 12094  	 *******************************************************************************/
 12095  
 12096  	/* Inner product is calculated with different lengths, so scale for the worst case */
 12097  	max_sum_sq_length = SKP_max_32(tls, sf_length_8kHz, ((frame_length_4kHz) >> (1)))
 12098  	shift = SKP_FIX_P_Ana_find_scaling(tls, bp+2812 /* &signal_4kHz[0] */, frame_length_4kHz, max_sum_sq_length)
 12099  	if shift > 0 {
 12100  		for i = 0; i < frame_length_4kHz; i++ {
 12101  			*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)) = (int16((int32(*(*int16)(unsafe.Pointer(bp + 2812 /* &signal_4kHz[0] */ + uintptr(i)*2)))) >> (shift)))
 12102  		}
 12103  	}
 12104  
 12105  	/******************************************************************************
 12106  	 * FIRST STAGE, operating in 4 khz
 12107  	 ******************************************************************************/
 12108  	target_ptr = (bp + 2812 /* &signal_4kHz */ + uintptr(((frame_length_4kHz)>>(1)))*2)
 12109  	for k = 0; k < 2; k++ {
 12110  		/* Check that we are within range of the array */
 12111  
 12112  		basis_ptr = (target_ptr - uintptr(min_lag_4kHz)*2)
 12113  
 12114  		/* Check that we are within range of the array */
 12115  
 12116  		normalizer = 0
 12117  		cross_corr = 0
 12118  		/* Calculate first vector products before loop */
 12119  		cross_corr = SKP_Silk_inner_prod_aligned(tls, target_ptr, basis_ptr, sf_length_8kHz)
 12120  		normalizer = SKP_Silk_inner_prod_aligned(tls, basis_ptr, basis_ptr, sf_length_8kHz)
 12121  		normalizer = func() int32 {
 12122  			if ((uint32((normalizer) + ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))) & 0x80000000) == uint32(0) {
 12123  				return func() int32 {
 12124  					if ((uint32((normalizer) & ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))) & 0x80000000) != uint32(0) {
 12125  						return libc.Int32FromUint32(0x80000000)
 12126  					}
 12127  					return ((normalizer) + ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))
 12128  				}()
 12129  			}
 12130  			return func() int32 {
 12131  				if ((uint32((normalizer) | ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))) & 0x80000000) == uint32(0) {
 12132  					return 0x7FFFFFFF
 12133  				}
 12134  				return ((normalizer) + ((int32(int16(sf_length_8kHz))) * (int32(int16(4000)))))
 12135  			}()
 12136  		}()
 12137  
 12138  		temp32 = ((cross_corr) / (SKP_Silk_SQRT_APPROX(tls, normalizer) + 1))
 12139  		*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*442) + uintptr(min_lag_4kHz)*2)) = func() int16 {
 12140  			if (temp32) > 0x7FFF {
 12141  				return int16(0x7FFF)
 12142  			}
 12143  			return func() int16 {
 12144  				if (temp32) < (int32(libc.Int16FromInt32(0x8000))) {
 12145  					return libc.Int16FromInt32(0x8000)
 12146  				}
 12147  				return int16(temp32)
 12148  			}()
 12149  		}() /* Q0 */
 12150  
 12151  		/* From now on normalizer is computed recursively */
 12152  		for d = (min_lag_4kHz + 1); d <= max_lag_4kHz; d++ {
 12153  			basis_ptr -= 2
 12154  
 12155  			/* Check that we are within range of the array */
 12156  
 12157  			cross_corr = SKP_Silk_inner_prod_aligned(tls, target_ptr, basis_ptr, sf_length_8kHz)
 12158  
 12159  			/* Add contribution of new sample and remove contribution from oldest sample */
 12160  			normalizer = normalizer + (((int32(*(*int16)(unsafe.Pointer(basis_ptr)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr))))) - ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(sf_length_8kHz)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(sf_length_8kHz)*2))))))
 12161  
 12162  			temp32 = ((cross_corr) / (SKP_Silk_SQRT_APPROX(tls, normalizer) + 1))
 12163  			*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*442) + uintptr(d)*2)) = func() int16 {
 12164  				if (temp32) > 0x7FFF {
 12165  					return int16(0x7FFF)
 12166  				}
 12167  				return func() int16 {
 12168  					if (temp32) < (int32(libc.Int16FromInt32(0x8000))) {
 12169  						return libc.Int16FromInt32(0x8000)
 12170  					}
 12171  					return int16(temp32)
 12172  				}()
 12173  			}() /* Q0 */
 12174  		}
 12175  		/* Update target pointer */
 12176  		target_ptr += 2 * (uintptr(sf_length_8kHz))
 12177  	}
 12178  
 12179  	/* Combine two subframes into single correlation measure and apply short-lag bias */
 12180  	for i = max_lag_4kHz; i >= min_lag_4kHz; i-- {
 12181  		sum = (int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */) + uintptr(i)*2))) + int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */ + 1*442) + uintptr(i)*2)))) /* Q0 */
 12182  
 12183  		sum = ((sum) >> (1)) /* Q-1 */
 12184  
 12185  		sum = ((sum) + ((((sum) >> 16) * (int32((int16((-i) << (4)))))) + ((((sum) & 0x0000FFFF) * (int32((int16((-i) << (4)))))) >> 16))) /* Q-1 */
 12186  
 12187  		*(*int16)(unsafe.Pointer((bp /* &C[0] */) + uintptr(i)*2)) = int16(sum) /* Q-1 */
 12188  	}
 12189  
 12190  	/* Sort */
 12191  	length_d_srch = (4 + (2 * complexity))
 12192  
 12193  	SKP_Silk_insertion_sort_decreasing_int16(tls, ((bp /* &C */) + uintptr(min_lag_4kHz)*2), bp+3292 /* &d_srch[0] */, ((max_lag_4kHz - min_lag_4kHz) + 1), length_d_srch)
 12194  
 12195  	/* Escape if correlation is very low already here */
 12196  	target_ptr = (bp + 2812 /* &signal_4kHz */ + uintptr(((frame_length_4kHz)>>(1)))*2)
 12197  	energy = SKP_Silk_inner_prod_aligned(tls, target_ptr, target_ptr, ((frame_length_4kHz) >> (1)))
 12198  	energy = func() int32 {
 12199  		if ((uint32((energy) + (1000))) & 0x80000000) != 0 {
 12200  			return 0x7FFFFFFF
 12201  		}
 12202  		return ((energy) + (1000))
 12203  	}() /* Q0 */
 12204  	Cmax = int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */) + uintptr(min_lag_4kHz)*2))) /* Q-1 */
 12205  	threshold = ((int32(int16(Cmax))) * (int32(int16(Cmax))))                           /* Q-2 */
 12206  	/* Compare in Q-2 domain */
 12207  	if ((energy) >> (4 + 2)) > threshold {
 12208  		libc.Xmemset(tls, pitch_out, 0, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
 12209  		*(*int32)(unsafe.Pointer(LTPCorr_Q15)) = 0
 12210  		*(*int32)(unsafe.Pointer(lagIndex)) = 0
 12211  		*(*int32)(unsafe.Pointer(contourIndex)) = 0
 12212  		return 1
 12213  	}
 12214  
 12215  	threshold = ((((search_thres1_Q16) >> 16) * (int32(int16(Cmax)))) + ((((search_thres1_Q16) & 0x0000FFFF) * (int32(int16(Cmax)))) >> 16))
 12216  	for i = 0; i < length_d_srch; i++ {
 12217  		/* Convert to 8 kHz indices for the sorted correlation that exceeds the threshold */
 12218  		if int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */) + uintptr((min_lag_4kHz+i))*2))) > threshold {
 12219  			*(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(i)*4)) + min_lag_4kHz) << 1)
 12220  		} else {
 12221  			length_d_srch = i
 12222  			break
 12223  		}
 12224  	}
 12225  
 12226  	for i = (min_lag_8kHz - 5); i < (max_lag_8kHz + 5); i++ {
 12227  		*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(i)*2)) = int16(0)
 12228  	}
 12229  	for i = 0; i < length_d_srch; i++ {
 12230  		*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(*(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(i)*4)))*2)) = int16(1)
 12231  	}
 12232  
 12233  	/* Convolution */
 12234  	for i = (max_lag_8kHz + 3); i >= min_lag_8kHz; i-- {
 12235  		*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp */ + uintptr(i)*2)) += int16((int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-1))*2))) + int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-2))*2)))))
 12236  	}
 12237  
 12238  	length_d_srch = 0
 12239  	for i = min_lag_8kHz; i < (max_lag_8kHz + 1); i++ {
 12240  		if int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i+1))*2))) > 0 {
 12241  			*(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(length_d_srch)*4)) = i
 12242  			length_d_srch++
 12243  		}
 12244  	}
 12245  
 12246  	/* Convolution */
 12247  	for i = (max_lag_8kHz + 3); i >= min_lag_8kHz; i-- {
 12248  		*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp */ + uintptr(i)*2)) += int16(((int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-1))*2))) + int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-2))*2)))) + int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr((i-3))*2)))))
 12249  	}
 12250  
 12251  	length_d_comp = 0
 12252  	for i = min_lag_8kHz; i < (max_lag_8kHz + 4); i++ {
 12253  		if int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(i)*2))) > 0 {
 12254  			*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(length_d_comp)*2)) = (int16(i - 2))
 12255  			length_d_comp++
 12256  		}
 12257  	}
 12258  
 12259  	/**********************************************************************************
 12260  	 ** SECOND STAGE, operating at 8 kHz, on lag sections with high correlation
 12261  	 *************************************************************************************/
 12262  
 12263  	/******************************************************************************
 12264  	 ** Scale signal down to avoid correlations measures from overflowing
 12265  	 *******************************************************************************/
 12266  	/* find scaling as max scaling for each subframe */
 12267  	shift = SKP_FIX_P_Ana_find_scaling(tls, bp+1796 /* &signal_8kHz[0] */, frame_length_8kHz, sf_length_8kHz)
 12268  	if shift > 0 {
 12269  		for i = 0; i < frame_length_8kHz; i++ {
 12270  			*(*int16)(unsafe.Pointer(bp + 1796 /* &signal_8kHz[0] */ + uintptr(i)*2)) = (int16((int32(*(*int16)(unsafe.Pointer(bp + 1796 /* &signal_8kHz[0] */ + uintptr(i)*2)))) >> (shift)))
 12271  		}
 12272  	}
 12273  
 12274  	/*********************************************************************************
 12275  	 * Find energy of each subframe projected onto its history, for a range of delays
 12276  	 *********************************************************************************/
 12277  	libc.Xmemset(tls, bp /* &C[0] */, 0, ((uint32(4 * ((int32((18 * 24)) >> 1) + 5))) * uint32(unsafe.Sizeof(int16(0)))))
 12278  
 12279  	target_ptr = (bp + 1796 /* &signal_8kHz */ + uintptr(frame_length_4kHz)*2) /* point to middle of frame */
 12280  	for k = 0; k < 4; k++ {
 12281  
 12282  		/* Check that we are within range of the array */
 12283  
 12284  		energy_target = SKP_Silk_inner_prod_aligned(tls, target_ptr, target_ptr, sf_length_8kHz)
 12285  		// ToDo: Calculate 1 / energy_target here and save one division inside next for loop
 12286  		for j = 0; j < length_d_comp; j++ {
 12287  			d = int32(*(*int16)(unsafe.Pointer(bp + 3388 /* &d_comp[0] */ + uintptr(j)*2)))
 12288  			basis_ptr = (target_ptr - uintptr(d)*2)
 12289  
 12290  			/* Check that we are within range of the array */
 12291  
 12292  			cross_corr = SKP_Silk_inner_prod_aligned(tls, target_ptr, basis_ptr, sf_length_8kHz)
 12293  			energy_basis = SKP_Silk_inner_prod_aligned(tls, basis_ptr, basis_ptr, sf_length_8kHz)
 12294  			if cross_corr > 0 {
 12295  				energy = func() int32 {
 12296  					if (energy_target) > (energy_basis) {
 12297  						return energy_target
 12298  					}
 12299  					return energy_basis
 12300  				}() /* Find max to make sure first division < 1.0 */
 12301  				lz = SKP_Silk_CLZ32(tls, cross_corr)
 12302  				lshift = func() int32 {
 12303  					if (0) > (15) {
 12304  						return func() int32 {
 12305  							if (lz - 1) > (0) {
 12306  								return 0
 12307  							}
 12308  							return func() int32 {
 12309  								if (lz - 1) < (15) {
 12310  									return 15
 12311  								}
 12312  								return (lz - 1)
 12313  							}()
 12314  						}()
 12315  					}
 12316  					return func() int32 {
 12317  						if (lz - 1) > (15) {
 12318  							return 15
 12319  						}
 12320  						return func() int32 {
 12321  							if (lz - 1) < (0) {
 12322  								return 0
 12323  							}
 12324  							return (lz - 1)
 12325  						}()
 12326  					}()
 12327  				}()
 12328  				temp32 = (((cross_corr) << (lshift)) / (((energy) >> (15 - lshift)) + 1)) /* Q15 */
 12329  
 12330  				temp32 = ((((cross_corr) >> 16) * (int32(int16(temp32)))) + ((((cross_corr) & 0x0000FFFF) * (int32(int16(temp32)))) >> 16)) /* Q(-1), cc * ( cc / max(b, t) ) */
 12331  				temp32 = func() int32 {
 12332  					if ((uint32((temp32) + (temp32))) & 0x80000000) == uint32(0) {
 12333  						return func() int32 {
 12334  							if ((uint32((temp32) & (temp32))) & 0x80000000) != uint32(0) {
 12335  								return libc.Int32FromUint32(0x80000000)
 12336  							}
 12337  							return ((temp32) + (temp32))
 12338  						}()
 12339  					}
 12340  					return func() int32 {
 12341  						if ((uint32((temp32) | (temp32))) & 0x80000000) == uint32(0) {
 12342  							return 0x7FFFFFFF
 12343  						}
 12344  						return ((temp32) + (temp32))
 12345  					}()
 12346  				}() /* Q(0) */
 12347  				lz = SKP_Silk_CLZ32(tls, temp32)
 12348  				lshift = func() int32 {
 12349  					if (0) > (15) {
 12350  						return func() int32 {
 12351  							if (lz - 1) > (0) {
 12352  								return 0
 12353  							}
 12354  							return func() int32 {
 12355  								if (lz - 1) < (15) {
 12356  									return 15
 12357  								}
 12358  								return (lz - 1)
 12359  							}()
 12360  						}()
 12361  					}
 12362  					return func() int32 {
 12363  						if (lz - 1) > (15) {
 12364  							return 15
 12365  						}
 12366  						return func() int32 {
 12367  							if (lz - 1) < (0) {
 12368  								return 0
 12369  							}
 12370  							return (lz - 1)
 12371  						}()
 12372  					}()
 12373  				}()
 12374  				energy = func() int32 {
 12375  					if (energy_target) < (energy_basis) {
 12376  						return energy_target
 12377  					}
 12378  					return energy_basis
 12379  				}()
 12380  				*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*442) + uintptr(d)*2)) = int16((((temp32) << (lshift)) / (((energy) >> (15 - lshift)) + 1))) // Q15
 12381  			} else {
 12382  				*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*442) + uintptr(d)*2)) = int16(0)
 12383  			}
 12384  		}
 12385  		target_ptr += 2 * (uintptr(sf_length_8kHz))
 12386  	}
 12387  
 12388  	/* search over lag range and lags codebook */
 12389  	/* scale factor for lag codebook, as a function of center lag */
 12390  
 12391  	CCmax = libc.Int32FromUint32(0x80000000)
 12392  	CCmax_b = libc.Int32FromUint32(0x80000000)
 12393  
 12394  	CBimax = 0 /* To avoid returning undefined lag values */
 12395  	lag = -1   /* To check if lag with strong enough correlation has been found */
 12396  
 12397  	if prevLag > 0 {
 12398  		if Fs_kHz == 12 {
 12399  			prevLag = (((prevLag) << (1)) / (3))
 12400  		} else if Fs_kHz == 16 {
 12401  			prevLag = ((prevLag) >> (1))
 12402  		} else if Fs_kHz == 24 {
 12403  			prevLag = ((prevLag) / (3))
 12404  		}
 12405  		prevLag_log2_Q7 = SKP_Silk_lin2log(tls, prevLag)
 12406  	} else {
 12407  		prevLag_log2_Q7 = 0
 12408  	}
 12409  
 12410  	corr_thres_Q15 = (((int32(int16(search_thres2_Q15))) * (int32(int16(search_thres2_Q15)))) >> (13))
 12411  
 12412  	/* If input is 8 khz use a larger codebook here because it is last stage */
 12413  	if (Fs_kHz == 8) && (complexity > 0) {
 12414  		nb_cbks_stage2 = 11
 12415  	} else {
 12416  		nb_cbks_stage2 = 3
 12417  	}
 12418  
 12419  	for k = 0; k < length_d_srch; k++ {
 12420  		d = *(*int32)(unsafe.Pointer(bp + 3292 /* &d_srch[0] */ + uintptr(k)*4))
 12421  		for j = 0; j < nb_cbks_stage2; j++ {
 12422  			*(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(j)*4)) = 0
 12423  			for i = 0; i < 4; i++ {
 12424  				/* Try all codebooks */
 12425  				*(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(j)*4)) = (*(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(j)*4)) + int32(*(*int16)(unsafe.Pointer((bp /* &C[0] */ + uintptr(i)*442) + uintptr((d+int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2)) + uintptr(i)*22) + uintptr(j)*2)))))*2))))
 12426  			}
 12427  		}
 12428  		/* Find best codebook */
 12429  		CCmax_new = libc.Int32FromUint32(0x80000000)
 12430  		CBimax_new = 0
 12431  		for i = 0; i < nb_cbks_stage2; i++ {
 12432  			if *(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(i)*4)) > CCmax_new {
 12433  				CCmax_new = *(*int32)(unsafe.Pointer(bp + 3832 /* &CC[0] */ + uintptr(i)*4))
 12434  				CBimax_new = i
 12435  			}
 12436  		}
 12437  
 12438  		/* Bias towards shorter lags */
 12439  		lag_log2_Q7 = SKP_Silk_lin2log(tls, d) /* Q7 */
 12440  
 12441  		if forLJC != 0 {
 12442  			CCmax_new_b = CCmax_new
 12443  		} else {
 12444  			CCmax_new_b = (CCmax_new - (((int32((int16(4 * 6554)))) * (int32(int16(lag_log2_Q7)))) >> (7))) /* Q15 */
 12445  		}
 12446  
 12447  		/* Bias towards previous lag */
 12448  
 12449  		if prevLag > 0 {
 12450  			delta_lag_log2_sqr_Q7 = (lag_log2_Q7 - prevLag_log2_Q7)
 12451  
 12452  			delta_lag_log2_sqr_Q7 = (((int32(int16(delta_lag_log2_sqr_Q7))) * (int32(int16(delta_lag_log2_sqr_Q7)))) >> (7))
 12453  			prev_lag_bias_Q15 = (((int32((int16(4 * 6554)))) * (int32(int16(*(*int32)(unsafe.Pointer(LTPCorr_Q15)))))) >> (15)) /* Q15 */
 12454  			prev_lag_bias_Q15 = (((prev_lag_bias_Q15) * (delta_lag_log2_sqr_Q7)) / (delta_lag_log2_sqr_Q7 + (int32(1) << 6)))
 12455  			CCmax_new_b = CCmax_new_b - (prev_lag_bias_Q15) /* Q15 */
 12456  		}
 12457  
 12458  		if ((CCmax_new_b > CCmax_b) && (CCmax_new > corr_thres_Q15)) && (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2))) + uintptr(CBimax_new)*2))) <= min_lag_8kHz) {
 12459  			CCmax_b = CCmax_new_b
 12460  			CCmax = CCmax_new
 12461  			lag = d
 12462  			CBimax = CBimax_new
 12463  		}
 12464  	}
 12465  
 12466  	if lag == -1 {
 12467  		/* No suitable candidate found */
 12468  		libc.Xmemset(tls, pitch_out, 0, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
 12469  		*(*int32)(unsafe.Pointer(LTPCorr_Q15)) = 0
 12470  		*(*int32)(unsafe.Pointer(lagIndex)) = 0
 12471  		*(*int32)(unsafe.Pointer(contourIndex)) = 0
 12472  		return 1
 12473  	}
 12474  
 12475  	if Fs_kHz > 8 {
 12476  
 12477  		/******************************************************************************
 12478  		 ** Scale input signal down to avoid correlations measures from overflowing
 12479  		 *******************************************************************************/
 12480  		/* find scaling as max scaling for each subframe */
 12481  		shift = SKP_FIX_P_Ana_find_scaling(tls, signal, frame_length, sf_length)
 12482  		if shift > 0 {
 12483  			/* Move signal to scratch mem because the input signal should be unchanged */
 12484  			/* Reuse the 32 bit scratch mem vector, use a 16 bit pointer from now */
 12485  			input_signal_ptr = bp + 3876 /* scratch_mem */
 12486  			for i = 0; i < frame_length; i++ {
 12487  				*(*int16)(unsafe.Pointer(input_signal_ptr + uintptr(i)*2)) = (int16((int32(*(*int16)(unsafe.Pointer(signal + uintptr(i)*2)))) >> (shift)))
 12488  			}
 12489  		} else {
 12490  			input_signal_ptr = signal
 12491  		}
 12492  		/*********************************************************************************/
 12493  
 12494  		/* Search in original signal */
 12495  
 12496  		CBimax_old = CBimax
 12497  		/* Compensate for decimation */
 12498  
 12499  		if Fs_kHz == 12 {
 12500  			lag = (((int32(int16(lag))) * (int32(int16(3)))) >> (1))
 12501  		} else if Fs_kHz == 16 {
 12502  			lag = ((lag) << (1))
 12503  		} else {
 12504  			lag = ((int32(int16(lag))) * (int32(int16(3))))
 12505  		}
 12506  
 12507  		lag = func() int32 {
 12508  			if (min_lag) > (max_lag) {
 12509  				return func() int32 {
 12510  					if (lag) > (min_lag) {
 12511  						return min_lag
 12512  					}
 12513  					return func() int32 {
 12514  						if (lag) < (max_lag) {
 12515  							return max_lag
 12516  						}
 12517  						return lag
 12518  					}()
 12519  				}()
 12520  			}
 12521  			return func() int32 {
 12522  				if (lag) > (max_lag) {
 12523  					return max_lag
 12524  				}
 12525  				return func() int32 {
 12526  					if (lag) < (min_lag) {
 12527  						return min_lag
 12528  					}
 12529  					return lag
 12530  				}()
 12531  			}()
 12532  		}()
 12533  		start_lag = SKP_max_int(tls, (lag - 2), min_lag)
 12534  		end_lag = SKP_min_int(tls, (lag + 2), max_lag)
 12535  		lag_new = lag /* to avoid undefined lag */
 12536  		CBimax = 0    /* to avoid undefined lag */
 12537  
 12538  		*(*int32)(unsafe.Pointer(LTPCorr_Q15)) = SKP_Silk_SQRT_APPROX(tls, ((CCmax) << (13))) /* Output normalized correlation */
 12539  
 12540  		CCmax = libc.Int32FromUint32(0x80000000)
 12541  		/* pitch lags according to second stage */
 12542  		for k = 0; k < 4; k++ {
 12543  			*(*int32)(unsafe.Pointer(pitch_out + uintptr(k)*4)) = (lag + (2 * int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2)) + uintptr(k)*22) + uintptr(CBimax_old)*2)))))
 12544  		}
 12545  		/* Calculate the correlations and energies needed in stage 3 */
 12546  		SKP_FIX_P_Ana_calc_corr_st3(tls, bp+15396 /* &crosscorr_st3[0] */, input_signal_ptr, start_lag, sf_length, complexity)
 12547  		SKP_FIX_P_Ana_calc_energy_st3(tls, bp+18116 /* &energies_st3[0] */, input_signal_ptr, start_lag, sf_length, complexity)
 12548  
 12549  		lag_counter = 0
 12550  
 12551  		contour_bias = ((52429) / (lag))
 12552  
 12553  		/* Setup cbk parameters acording to complexity setting */
 12554  		cbk_size = int32(SKP_Silk_cbk_sizes_stage3[complexity])
 12555  		cbk_offset = int32(SKP_Silk_cbk_offsets_stage3[complexity])
 12556  
 12557  		for d = start_lag; d <= end_lag; d++ {
 12558  			for j = cbk_offset; j < (cbk_offset + cbk_size); j++ {
 12559  				cross_corr = 0
 12560  				energy = 0
 12561  				for k = 0; k < 4; k++ {
 12562  
 12563  					energy = energy + ((*(*int32)(unsafe.Pointer(((bp + 18116 /* &energies_st3[0] */ + uintptr(k)*680) + uintptr(j)*20) + uintptr(lag_counter)*4))) >> (2)) /* use mean, to avoid overflow */
 12564  
 12565  					cross_corr = cross_corr + ((*(*int32)(unsafe.Pointer(((bp + 15396 /* &crosscorr_st3[0] */ + uintptr(k)*680) + uintptr(j)*20) + uintptr(lag_counter)*4))) >> (2)) /* use mean, to avoid overflow */
 12566  				}
 12567  				if cross_corr > 0 {
 12568  					/* Divide cross_corr / energy and get result in Q15 */
 12569  					lz = SKP_Silk_CLZ32(tls, cross_corr)
 12570  					/* Divide with result in Q13, cross_corr could be larger than energy */
 12571  					lshift = func() int32 {
 12572  						if (0) > (13) {
 12573  							return func() int32 {
 12574  								if (lz - 1) > (0) {
 12575  									return 0
 12576  								}
 12577  								return func() int32 {
 12578  									if (lz - 1) < (13) {
 12579  										return 13
 12580  									}
 12581  									return (lz - 1)
 12582  								}()
 12583  							}()
 12584  						}
 12585  						return func() int32 {
 12586  							if (lz - 1) > (13) {
 12587  								return 13
 12588  							}
 12589  							return func() int32 {
 12590  								if (lz - 1) < (0) {
 12591  									return 0
 12592  								}
 12593  								return (lz - 1)
 12594  							}()
 12595  						}()
 12596  					}()
 12597  					CCmax_new = (((cross_corr) << (lshift)) / (((energy) >> (13 - lshift)) + 1))
 12598  					CCmax_new = func() int32 {
 12599  						if (CCmax_new) > 0x7FFF {
 12600  							return 0x7FFF
 12601  						}
 12602  						return func() int32 {
 12603  							if (CCmax_new) < (int32(libc.Int16FromInt32(0x8000))) {
 12604  								return int32(libc.Int16FromInt32(0x8000))
 12605  							}
 12606  							return CCmax_new
 12607  						}()
 12608  					}()
 12609  					CCmax_new = ((((cross_corr) >> 16) * (int32(int16(CCmax_new)))) + ((((cross_corr) & 0x0000FFFF) * (int32(int16(CCmax_new)))) >> 16))
 12610  					/* Saturate */
 12611  					if CCmax_new > (int32((0x7FFFFFFF)) >> (3)) {
 12612  						CCmax_new = 0x7FFFFFFF
 12613  					} else {
 12614  						CCmax_new = ((CCmax_new) << (3))
 12615  					}
 12616  					/* Reduce depending on flatness of contour */
 12617  					diff = (j - (int32((34)) >> (1)))
 12618  					diff = ((diff) * (diff))
 12619  					diff = (0x7FFF - (((contour_bias) * (diff)) >> (5))) /* Q20 -> Q15 */
 12620  
 12621  					CCmax_new = (((((CCmax_new) >> 16) * (int32(int16(diff)))) + ((((CCmax_new) & 0x0000FFFF) * (int32(int16(diff)))) >> 16)) << (1))
 12622  				} else {
 12623  					CCmax_new = 0
 12624  				}
 12625  
 12626  				if (CCmax_new > CCmax) && ((d + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3))) + uintptr(j)*2)))) <= max_lag) {
 12627  					CCmax = CCmax_new
 12628  					lag_new = d
 12629  					CBimax = j
 12630  				}
 12631  			}
 12632  			lag_counter++
 12633  		}
 12634  
 12635  		for k = 0; k < 4; k++ {
 12636  			*(*int32)(unsafe.Pointer(pitch_out + uintptr(k)*4)) = (lag_new + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3)) + uintptr(k)*68) + uintptr(CBimax)*2))))
 12637  		}
 12638  		*(*int32)(unsafe.Pointer(lagIndex)) = (lag_new - min_lag)
 12639  		*(*int32)(unsafe.Pointer(contourIndex)) = CBimax
 12640  	} else {
 12641  		/* Save Lags and correlation */
 12642  		CCmax = func() int32 {
 12643  			if (CCmax) > (0) {
 12644  				return CCmax
 12645  			}
 12646  			return 0
 12647  		}()
 12648  		*(*int32)(unsafe.Pointer(LTPCorr_Q15)) = SKP_Silk_SQRT_APPROX(tls, ((CCmax) << (13))) /* Output normalized correlation */
 12649  		for k = 0; k < 4; k++ {
 12650  			*(*int32)(unsafe.Pointer(pitch_out + uintptr(k)*4)) = (lag + int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage2)) + uintptr(k)*22) + uintptr(CBimax)*2))))
 12651  		}
 12652  		*(*int32)(unsafe.Pointer(lagIndex)) = (lag - min_lag_8kHz)
 12653  		*(*int32)(unsafe.Pointer(contourIndex)) = CBimax
 12654  	}
 12655  
 12656  	/* return as voiced */
 12657  	return 0
 12658  }
 12659  
 12660  /*************************************************************************/
 12661  /* Calculates the correlations used in stage 3 search. In order to cover */
 12662  /* the whole lag codebook for all the searched offset lags (lag +- 2),   */
 12663  /*************************************************************************/
 12664  func SKP_FIX_P_Ana_calc_corr_st3(tls *libc.TLS, cross_corr_st3 uintptr, signal uintptr, start_lag int32, sf_length int32, complexity int32) { /* SKP_Silk_pitch_analysis_core.c:569:6: */
 12665  	bp := tls.Alloc(88)
 12666  	defer tls.Free(88)
 12667  
 12668  	var target_ptr uintptr
 12669  	var basis_ptr uintptr
 12670  	var cross_corr int32
 12671  	var i int32
 12672  	var j int32
 12673  	var k int32
 12674  	var lag_counter int32
 12675  	var cbk_offset int32
 12676  	var cbk_size int32
 12677  	var delta int32
 12678  	var idx int32
 12679  	// var scratch_mem [22]int32 at bp, 88
 12680  
 12681  	cbk_offset = int32(SKP_Silk_cbk_offsets_stage3[complexity])
 12682  	cbk_size = int32(SKP_Silk_cbk_sizes_stage3[complexity])
 12683  
 12684  	target_ptr = (signal + uintptr(((sf_length)<<(2)))*2) /* Pointer to middle of frame */
 12685  	for k = 0; k < 4; k++ {
 12686  		lag_counter = 0
 12687  
 12688  		/* Calculate the correlations for each subframe */
 12689  		for j = int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4)))); j <= int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4) + 1*2))); j++ {
 12690  			basis_ptr = (target_ptr - uintptr((start_lag+j))*2)
 12691  			cross_corr = SKP_Silk_inner_prod_aligned(tls, target_ptr, basis_ptr, sf_length)
 12692  
 12693  			*(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr(lag_counter)*4)) = cross_corr
 12694  			lag_counter++
 12695  		}
 12696  
 12697  		delta = int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4))))
 12698  		for i = cbk_offset; i < (cbk_offset + cbk_size); i++ {
 12699  			/* Fill out the 3 dim array that stores the correlations for */
 12700  			/* each code_book vector for each start lag */
 12701  			idx = (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3)) + uintptr(k)*68) + uintptr(i)*2))) - delta)
 12702  			for j = 0; j < 5; j++ {
 12703  
 12704  				*(*int32)(unsafe.Pointer(((cross_corr_st3 + uintptr(k)*680) + uintptr(i)*20) + uintptr(j)*4)) = *(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr((idx+j))*4))
 12705  			}
 12706  		}
 12707  		target_ptr += 2 * (uintptr(sf_length))
 12708  	}
 12709  }
 12710  
 12711  /********************************************************************/
 12712  /* Calculate the energies for first two subframes. The energies are */
 12713  /* calculated recursively.                                          */
 12714  /********************************************************************/
 12715  func SKP_FIX_P_Ana_calc_energy_st3(tls *libc.TLS, energies_st3 uintptr, signal uintptr, start_lag int32, sf_length int32, complexity int32) { /* SKP_Silk_pitch_analysis_core.c:621:6: */
 12716  	bp := tls.Alloc(88)
 12717  	defer tls.Free(88)
 12718  
 12719  	var target_ptr uintptr
 12720  	var basis_ptr uintptr
 12721  	var energy int32
 12722  	var k int32
 12723  	var i int32
 12724  	var j int32
 12725  	var lag_counter int32
 12726  	var cbk_offset int32
 12727  	var cbk_size int32
 12728  	var delta int32
 12729  	var idx int32
 12730  	// var scratch_mem [22]int32 at bp, 88
 12731  
 12732  	cbk_offset = int32(SKP_Silk_cbk_offsets_stage3[complexity])
 12733  	cbk_size = int32(SKP_Silk_cbk_sizes_stage3[complexity])
 12734  
 12735  	target_ptr = (signal + uintptr(((sf_length)<<(2)))*2)
 12736  	for k = 0; k < 4; k++ {
 12737  		lag_counter = 0
 12738  
 12739  		/* Calculate the energy for first lag */
 12740  		basis_ptr = (target_ptr - uintptr((start_lag+int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4))))))*2)
 12741  		energy = SKP_Silk_inner_prod_aligned(tls, basis_ptr, basis_ptr, sf_length)
 12742  
 12743  		*(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr(lag_counter)*4)) = energy
 12744  		lag_counter++
 12745  
 12746  		for i = 1; i < ((int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4) + 1*2))) - int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4))))) + 1); i++ {
 12747  			/* remove part outside new window */
 12748  			energy = energy - ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr((sf_length-i))*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr((sf_length-i))*2)))))
 12749  
 12750  			/* add part that comes into window */
 12751  			energy = func() int32 {
 12752  				if ((uint32((energy) + ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))) & 0x80000000) == uint32(0) {
 12753  					return func() int32 {
 12754  						if ((uint32((energy) & ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))) & 0x80000000) != uint32(0) {
 12755  							return libc.Int32FromUint32(0x80000000)
 12756  						}
 12757  						return ((energy) + ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))
 12758  					}()
 12759  				}
 12760  				return func() int32 {
 12761  					if ((uint32((energy) | ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))) & 0x80000000) == uint32(0) {
 12762  						return 0x7FFFFFFF
 12763  					}
 12764  					return ((energy) + ((int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2)))) * (int32(*(*int16)(unsafe.Pointer(basis_ptr + uintptr(-i)*2))))))
 12765  				}()
 12766  			}()
 12767  
 12768  			*(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr(lag_counter)*4)) = energy
 12769  			lag_counter++
 12770  		}
 12771  
 12772  		delta = int32(*(*int16)(unsafe.Pointer(((uintptr(unsafe.Pointer(&SKP_Silk_Lag_range_stage3)) + uintptr(complexity)*16) + uintptr(k)*4))))
 12773  		for i = cbk_offset; i < (cbk_offset + cbk_size); i++ {
 12774  			/* Fill out the 3 dim array that stores the correlations for    */
 12775  			/* each code_book vector for each start lag                        */
 12776  			idx = (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_CB_lags_stage3)) + uintptr(k)*68) + uintptr(i)*2))) - delta)
 12777  			for j = 0; j < 5; j++ {
 12778  
 12779  				*(*int32)(unsafe.Pointer(((energies_st3 + uintptr(k)*680) + uintptr(i)*20) + uintptr(j)*4)) = *(*int32)(unsafe.Pointer(bp /* &scratch_mem[0] */ + uintptr((idx+j))*4))
 12780  
 12781  			}
 12782  		}
 12783  		target_ptr += 2 * (uintptr(sf_length))
 12784  	}
 12785  }
 12786  
 12787  func SKP_FIX_P_Ana_find_scaling(tls *libc.TLS, signal uintptr, signal_length int32, sum_sqr_len int32) int32 { /* SKP_Silk_pitch_analysis_core.c:681:11: */
 12788  	var nbits int32
 12789  	var x_max int32
 12790  
 12791  	x_max = int32(SKP_Silk_int16_array_maxabs(tls, signal, signal_length))
 12792  
 12793  	if x_max < 0x7FFF {
 12794  		/* Number of bits needed for the sum of the squares */
 12795  		nbits = (32 - SKP_Silk_CLZ32(tls, ((int32(int16(x_max)))*(int32(int16(x_max))))))
 12796  	} else {
 12797  		/* Here we don't know if x_max should have been SKP_int16_MAX + 1, so we expect the worst case */
 12798  		nbits = 30
 12799  	}
 12800  	nbits = nbits + (17 - SKP_Silk_CLZ16(tls, int16(sum_sqr_len)))
 12801  
 12802  	/* Without a guarantee of saturation, we need to keep the 31st bit free */
 12803  	if nbits < 31 {
 12804  		return 0
 12805  	} else {
 12806  		return (nbits - 30)
 12807  	}
 12808  	return int32(0)
 12809  }
 12810  
 12811  /***********************************************************************
 12812  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 12813  Redistribution and use in source and binary forms, with or without
 12814  modification, (subject to the limitations in the disclaimer below)
 12815  are permitted provided that the following conditions are met:
 12816  - Redistributions of source code must retain the above copyright notice,
 12817  this list of conditions and the following disclaimer.
 12818  - Redistributions in binary form must reproduce the above copyright
 12819  notice, this list of conditions and the following disclaimer in the
 12820  documentation and/or other materials provided with the distribution.
 12821  - Neither the name of Skype Limited, nor the names of specific
 12822  contributors, may be used to endorse or promote products derived from
 12823  this software without specific prior written permission.
 12824  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 12825  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 12826  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 12827  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 12828  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 12829  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 12830  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 12831  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 12832  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 12833  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 12834  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 12835  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 12836  ***********************************************************************/
 12837  
 12838  /************************************************************/
 12839  /* Definitions For Fix pitch estimator                      */
 12840  /************************************************************/
 12841  
 12842  /********************************************************/
 12843  /* Auto Generated File from generate_pitch_est_tables.m */
 12844  /********************************************************/
 12845  
 12846  var SKP_Silk_CB_lags_stage2 = [4][11]int16{
 12847  	{int16(0), int16(2), int16(-1), int16(-1), int16(-1), int16(0), int16(0), int16(1), int16(1), int16(0), int16(1)},
 12848  	{int16(0), int16(1), int16(0), int16(0), int16(0), int16(0), int16(0), int16(1), int16(0), int16(0), int16(0)},
 12849  	{int16(0), int16(0), int16(1), int16(0), int16(0), int16(0), int16(1), int16(0), int16(0), int16(0), int16(0)},
 12850  	{int16(0), int16(-1), int16(2), int16(1), int16(0), int16(1), int16(1), int16(0), int16(0), int16(-1), int16(-1)},
 12851  } /* SKP_Silk_pitch_est_tables.c:35:17 */
 12852  
 12853  var SKP_Silk_CB_lags_stage3 = [4][34]int16{
 12854  	{int16(-9), int16(-7), int16(-6), int16(-5), int16(-5), int16(-4), int16(-4), int16(-3), int16(-3), int16(-2), int16(-2), int16(-2), int16(-1), int16(-1), int16(-1), int16(0), int16(0), int16(0), int16(1), int16(1), int16(0), int16(1), int16(2), int16(2), int16(2), int16(3), int16(3), int16(4), int16(4), int16(5), int16(6), int16(5), int16(6), int16(8)},
 12855  	{int16(-3), int16(-2), int16(-2), int16(-2), int16(-1), int16(-1), int16(-1), int16(-1), int16(-1), int16(0), int16(0), int16(-1), int16(0), int16(0), int16(0), int16(0), int16(0), int16(0), int16(1), int16(0), int16(0), int16(0), int16(1), int16(1), int16(0), int16(1), int16(1), int16(2), int16(1), int16(2), int16(2), int16(2), int16(2), int16(3)},
 12856  	{int16(3), int16(3), int16(2), int16(2), int16(2), int16(2), int16(1), int16(2), int16(1), int16(1), int16(0), int16(1), int16(1), int16(0), int16(0), int16(0), int16(1), int16(0), int16(0), int16(0), int16(0), int16(0), int16(0), int16(-1), int16(0), int16(0), int16(-1), int16(-1), int16(-1), int16(-1), int16(-1), int16(-2), int16(-2), int16(-2)},
 12857  	{int16(9), int16(8), int16(6), int16(5), int16(6), int16(5), int16(4), int16(4), int16(3), int16(3), int16(2), int16(2), int16(2), int16(1), int16(0), int16(1), int16(1), int16(0), int16(0), int16(0), int16(-1), int16(-1), int16(-1), int16(-2), int16(-2), int16(-2), int16(-3), int16(-3), int16(-4), int16(-4), int16(-5), int16(-5), int16(-6), int16(-7)},
 12858  } /* SKP_Silk_pitch_est_tables.c:43:17 */
 12859  
 12860  var SKP_Silk_Lag_range_stage3 = [3][4][2]int16{
 12861  	{
 12862  		/* Lags to search for low number of stage3 cbks */
 12863  		{int16(-2), int16(6)},
 12864  		{int16(-1), int16(5)},
 12865  		{int16(-1), int16(5)},
 12866  		{int16(-2), int16(7)},
 12867  	},
 12868  	/* Lags to search for middle number of stage3 cbks */
 12869  	{
 12870  		{int16(-4), int16(8)},
 12871  		{int16(-1), int16(6)},
 12872  		{int16(-1), int16(6)},
 12873  		{int16(-4), int16(9)},
 12874  	},
 12875  	/* Lags to search for max number of stage3 cbks */
 12876  	{
 12877  		{int16(-9), int16(12)},
 12878  		{int16(-3), int16(7)},
 12879  		{int16(-2), int16(7)},
 12880  		{int16(-7), int16(13)},
 12881  	},
 12882  } /* SKP_Silk_pitch_est_tables.c:51:17 */
 12883  
 12884  var SKP_Silk_cbk_sizes_stage3 = [3]int16{
 12885  	int16(16),
 12886  	int16(24),
 12887  	int16(34),
 12888  } /* SKP_Silk_pitch_est_tables.c:76:17 */
 12889  
 12890  var SKP_Silk_cbk_offsets_stage3 = [3]int16{
 12891  	(int16(int32((34 - 16)) >> 1)),
 12892  	(int16(int32((34 - 24)) >> 1)),
 12893  	int16(0),
 12894  } /* SKP_Silk_pitch_est_tables.c:83:17 */
 12895  
 12896  /***********************************************************************
 12897  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 12898  Redistribution and use in source and binary forms, with or without
 12899  modification, (subject to the limitations in the disclaimer below)
 12900  are permitted provided that the following conditions are met:
 12901  - Redistributions of source code must retain the above copyright notice,
 12902  this list of conditions and the following disclaimer.
 12903  - Redistributions in binary form must reproduce the above copyright
 12904  notice, this list of conditions and the following disclaimer in the
 12905  documentation and/or other materials provided with the distribution.
 12906  - Neither the name of Skype Limited, nor the names of specific
 12907  contributors, may be used to endorse or promote products derived from
 12908  this software without specific prior written permission.
 12909  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 12910  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 12911  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 12912  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 12913  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 12914  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 12915  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 12916  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 12917  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 12918  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 12919  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 12920  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 12921  ***********************************************************************/
 12922  
 12923  var HARM_ATT_Q15 = [2]int16{int16(32440), int16(31130)}              /* SKP_Silk_PLC.c:32:24 */ /* 0.99, 0.95 */
 12924  var PLC_RAND_ATTENUATE_V_Q15 = [2]int16{int16(31130), int16(26214)}  /* SKP_Silk_PLC.c:33:24 */ /* 0.95, 0.8 */
 12925  var PLC_RAND_ATTENUATE_UV_Q15 = [2]int16{int16(32440), int16(29491)} /* SKP_Silk_PLC.c:34:24 */
 12926  
 12927  /* 0.99, 0.9 */
 12928  
 12929  func SKP_Silk_PLC_Reset(tls *libc.TLS, psDec uintptr) { /* SKP_Silk_PLC.c:36:6: */
 12930  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsPLC.FpitchL_Q8 = (((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length) >> (1))
 12931  }
 12932  
 12933  func SKP_Silk_PLC(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32, lost int32) { /* SKP_Silk_PLC.c:43:6: */
 12934  	/* PLC control function */
 12935  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz != (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsPLC.Ffs_kHz {
 12936  		SKP_Silk_PLC_Reset(tls, psDec)
 12937  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsPLC.Ffs_kHz = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz
 12938  	}
 12939  
 12940  	if lost != 0 {
 12941  		/****************************/
 12942  		/* Generate Signal          */
 12943  		/****************************/
 12944  		SKP_Silk_PLC_conceal(tls, psDec, psDecCtrl, signal, length)
 12945  
 12946  		(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt++
 12947  	} else {
 12948  		/****************************/
 12949  		/* Update state             */
 12950  		/****************************/
 12951  		SKP_Silk_PLC_update(tls, psDec, psDecCtrl, signal, length)
 12952  	}
 12953  }
 12954  
 12955  /**************************************************/
 12956  /* Update state of PLC                            */
 12957  /**************************************************/
 12958  func SKP_Silk_PLC_update(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32) { /* SKP_Silk_PLC.c:75:6: */
 12959  	var LTP_Gain_Q14 int32
 12960  	var temp_LTP_Gain_Q14 int32
 12961  	var i int32
 12962  	var j int32
 12963  	var psPLC uintptr
 12964  
 12965  	psPLC = (psDec + 13588 /* &.sPLC */)
 12966  
 12967  	/* Update parameters used in case of packet loss */
 12968  	(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype = (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype
 12969  	LTP_Gain_Q14 = 0
 12970  	if (*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).Fsigtype == 0 {
 12971  		/* Find the parameters for the last subframe which contains a pitch pulse */
 12972  		for j = 0; (j * (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length) < *(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + 3*4)); j++ {
 12973  			temp_LTP_Gain_Q14 = 0
 12974  			for i = 0; i < 5; i++ {
 12975  				temp_LTP_Gain_Q14 = temp_LTP_Gain_Q14 + (int32(*(*int16)(unsafe.Pointer((psDecCtrl + 100 /* &.LTPCoef_Q14 */) + uintptr(((((4-1)-j)*5)+i))*2))))
 12976  			}
 12977  			if temp_LTP_Gain_Q14 > LTP_Gain_Q14 {
 12978  				LTP_Gain_Q14 = temp_LTP_Gain_Q14
 12979  				libc.Xmemcpy(tls, psPLC+4 /* &.LTPCoef_Q14 */, ((psDecCtrl + 100 /* &.LTPCoef_Q14 */) + uintptr(((int32((int16((4 - 1) - j))))*(int32(int16(5)))))*2), (uint32(5) * uint32(unsafe.Sizeof(int16(0)))))
 12980  
 12981  				(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8 = ((*(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + uintptr(((4-1)-j))*4))) << (8))
 12982  			}
 12983  		}
 12984  
 12985  		libc.Xmemset(tls, psPLC+4 /* &.LTPCoef_Q14 */, 0, (uint32(5) * uint32(unsafe.Sizeof(int16(0)))))
 12986  		*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + 2*2)) = int16(LTP_Gain_Q14)
 12987  
 12988  		/* Limit LT coefs */
 12989  		if LTP_Gain_Q14 < 11469 {
 12990  			var scale_Q10 int32
 12991  			var tmp int32
 12992  
 12993  			tmp = (int32((11469)) << (10))
 12994  			scale_Q10 = ((tmp) / (func() int32 {
 12995  				if (LTP_Gain_Q14) > (1) {
 12996  					return LTP_Gain_Q14
 12997  				}
 12998  				return 1
 12999  			}()))
 13000  			for i = 0; i < 5; i++ {
 13001  				*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + uintptr(i)*2)) = (int16(((int32(*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + uintptr(i)*2)))) * (int32(int16(scale_Q10)))) >> (10)))
 13002  			}
 13003  		} else if LTP_Gain_Q14 > 15565 {
 13004  			var scale_Q14 int32
 13005  			var tmp int32
 13006  
 13007  			tmp = (int32((15565)) << (14))
 13008  			scale_Q14 = ((tmp) / (func() int32 {
 13009  				if (LTP_Gain_Q14) > (1) {
 13010  					return LTP_Gain_Q14
 13011  				}
 13012  				return 1
 13013  			}()))
 13014  			for i = 0; i < 5; i++ {
 13015  				*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + uintptr(i)*2)) = (int16(((int32(*(*int16)(unsafe.Pointer((psPLC + 4 /* &.LTPCoef_Q14 */) + uintptr(i)*2)))) * (int32(int16(scale_Q14)))) >> (14)))
 13016  			}
 13017  		}
 13018  	} else {
 13019  		(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8 = (((int32(int16((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz))) * (int32(int16(18)))) << (8))
 13020  		libc.Xmemset(tls, psPLC+4 /* &.LTPCoef_Q14 */, 0, (uint32(5) * uint32(unsafe.Sizeof(int16(0)))))
 13021  	}
 13022  
 13023  	/* Save LPC coeficients */
 13024  	libc.Xmemcpy(tls, psPLC+14 /* &.prevLPC_Q12 */, ((psDecCtrl + 36 /* &.PredCoef_Q12 */) + 1*32), (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint32(unsafe.Sizeof(int16(0)))))
 13025  	(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FprevLTP_scale_Q14 = int16((*SKP_Silk_decoder_control)(unsafe.Pointer(psDecCtrl)).FLTP_scale_Q14)
 13026  
 13027  	/* Save Gains */
 13028  	libc.Xmemcpy(tls, psPLC+72 /* &.prevGain_Q16 */, psDecCtrl+16 /* &.Gains_Q16 */, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
 13029  }
 13030  
 13031  func SKP_Silk_PLC_conceal(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32) { /* SKP_Silk_PLC.c:146:6: */
 13032  	bp := tls.Alloc(2932)
 13033  	defer tls.Free(2932)
 13034  
 13035  	var i int32
 13036  	var j int32
 13037  	var k int32
 13038  	var B_Q14 uintptr
 13039  	// var exc_buf [480]int16 at bp, 960
 13040  
 13041  	var exc_buf_ptr uintptr
 13042  	var rand_scale_Q14 int16
 13043  	// var A_Q12_tmp struct {_ [0]uint32;Fas_int16 [16]int16;} at bp+2900, 32
 13044  
 13045  	var rand_seed int32
 13046  	var harm_Gain_Q15 int32
 13047  	var rand_Gain_Q15 int32
 13048  	var lag int32
 13049  	var idx int32
 13050  	var sLTP_buf_idx int32
 13051  	// var shift1 int32 at bp+964, 4
 13052  
 13053  	// var shift2 int32 at bp+972, 4
 13054  
 13055  	// var energy1 int32 at bp+960, 4
 13056  
 13057  	// var energy2 int32 at bp+968, 4
 13058  
 13059  	var rand_ptr uintptr
 13060  	var pred_lag_ptr uintptr
 13061  	// var sig_Q10 [480]int32 at bp+980, 1920
 13062  
 13063  	var sig_Q10_ptr uintptr
 13064  	var LPC_exc_Q10 int32
 13065  	var LPC_pred_Q10 int32
 13066  	var LTP_pred_Q14 int32
 13067  	var psPLC uintptr
 13068  	psPLC = (psDec + 13588 /* &.sPLC */)
 13069  
 13070  	/* Update LTP buffer */
 13071  	libc.Xmemcpy(tls, psDec+1048 /* &.sLTP_Q16 */, ((psDec + 1048 /* &.sLTP_Q16 */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length)*4), (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length) * uint32(unsafe.Sizeof(int32(0)))))
 13072  
 13073  	/* LPC concealment. Apply BWE to previous LPC */
 13074  	SKP_Silk_bwexpander(tls, psPLC+14 /* &.prevLPC_Q12 */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order, 64880)
 13075  
 13076  	/* Find random noise component */
 13077  	/* Scale previous excitation signal */
 13078  	exc_buf_ptr = bp /* &exc_buf[0] */
 13079  	for k = (int32(4) >> 1); k < 4; k++ {
 13080  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
 13081  			*(*int16)(unsafe.Pointer(exc_buf_ptr + uintptr(i)*2)) = (int16((((((*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr((i+(k*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)))*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr((i+(k*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)))*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer((psDec + 5432 /* &.exc_Q10 */) + uintptr((i+(k*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)))*4))) * (func() int32 {
 13082  				if (16) == 1 {
 13083  					return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4))) & 1))
 13084  				}
 13085  				return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + uintptr(k)*4))) >> ((16) - 1)) + 1) >> 1)
 13086  			}()))) >> (10)))
 13087  		}
 13088  		exc_buf_ptr += 2 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
 13089  	}
 13090  	/* Find the subframe with lowest energy of the last two and use that as random noise generator */
 13091  	SKP_Silk_sum_sqr_shift(tls, bp+960 /* &energy1 */, bp+964 /* &shift1 */, bp /* &exc_buf[0] */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)
 13092  	SKP_Silk_sum_sqr_shift(tls, bp+968 /* &energy2 */, bp+972 /* &shift2 */, (bp /* &exc_buf */ + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)*2), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)
 13093  
 13094  	if ((*(*int32)(unsafe.Pointer(bp + 960 /* energy1 */))) >> (*(*int32)(unsafe.Pointer(bp + 972 /* shift2 */)))) < ((*(*int32)(unsafe.Pointer(bp + 968 /* energy2 */))) >> (*(*int32)(unsafe.Pointer(bp + 964 /* shift1 */)))) {
 13095  		/* First sub-frame has lowest energy */
 13096  		rand_ptr = ((psDec + 5432 /* &.exc_Q10 */) + uintptr(SKP_max_int(tls, 0, ((3*(*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)-128)))*4)
 13097  	} else {
 13098  		/* Second sub-frame has lowest energy */
 13099  		rand_ptr = ((psDec + 5432 /* &.exc_Q10 */) + uintptr(SKP_max_int(tls, 0, ((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length-128)))*4)
 13100  	}
 13101  
 13102  	/* Setup Gain to random noise component */
 13103  	B_Q14 = psPLC + 4 /* &.LTPCoef_Q14 */
 13104  	rand_scale_Q14 = (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FrandScale_Q14
 13105  
 13106  	/* Setup attenuation gains */
 13107  	harm_Gain_Q15 = int32(HARM_ATT_Q15[SKP_min_int(tls, (2-1), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt)])
 13108  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype == 0 {
 13109  		rand_Gain_Q15 = int32(PLC_RAND_ATTENUATE_V_Q15[SKP_min_int(tls, (2-1), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt)])
 13110  	} else {
 13111  		rand_Gain_Q15 = int32(PLC_RAND_ATTENUATE_UV_Q15[SKP_min_int(tls, (2-1), (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt)])
 13112  	}
 13113  
 13114  	/* First Lost frame */
 13115  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt == 0 {
 13116  		rand_scale_Q14 = (int16(int32(1) << 14))
 13117  
 13118  		/* Reduce random noise Gain for voiced frames */
 13119  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype == 0 {
 13120  			for i = 0; i < 5; i++ {
 13121  				rand_scale_Q14 = int16(int32(rand_scale_Q14) - (int32(*(*int16)(unsafe.Pointer(B_Q14 + uintptr(i)*2)))))
 13122  			}
 13123  			rand_scale_Q14 = SKP_max_16(tls, int16(3277), rand_scale_Q14) /* 0.2 */
 13124  			rand_scale_Q14 = (int16(((int32(rand_scale_Q14)) * (int32((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FprevLTP_scale_Q14))) >> (14)))
 13125  		}
 13126  
 13127  		/* Reduce random noise for unvoiced frames with high LPC gain */
 13128  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fprev_sigtype == 1 {
 13129  			// var invGain_Q30 int32 at bp+976, 4
 13130  
 13131  			var down_scale_Q30 int32
 13132  
 13133  			SKP_Silk_LPC_inverse_pred_gain(tls, bp+976 /* &invGain_Q30 */, psPLC+14 /* &.prevLPC_Q12 */, (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order)
 13134  
 13135  			down_scale_Q30 = SKP_min_32(tls, (int32((int32(1) << 30)) >> (3)), *(*int32)(unsafe.Pointer(bp + 976 /* invGain_Q30 */)))
 13136  			down_scale_Q30 = SKP_max_32(tls, (int32((int32(1) << 30)) >> (8)), down_scale_Q30)
 13137  			down_scale_Q30 = ((down_scale_Q30) << (3))
 13138  
 13139  			rand_Gain_Q15 = (((((down_scale_Q30) >> 16) * (int32(int16(rand_Gain_Q15)))) + ((((down_scale_Q30) & 0x0000FFFF) * (int32(int16(rand_Gain_Q15)))) >> 16)) >> (14))
 13140  		}
 13141  	}
 13142  
 13143  	rand_seed = (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Frand_seed
 13144  	lag = func() int32 {
 13145  		if (8) == 1 {
 13146  			return ((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> 1) + (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) & 1))
 13147  		}
 13148  		return (((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> ((8) - 1)) + 1) >> 1)
 13149  	}()
 13150  	sLTP_buf_idx = (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length
 13151  
 13152  	/***************************/
 13153  	/* LTP synthesis filtering */
 13154  	/***************************/
 13155  	sig_Q10_ptr = bp + 980 /* &sig_Q10[0] */
 13156  	for k = 0; k < 4; k++ {
 13157  		/* Setup pointer */
 13158  		pred_lag_ptr = ((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(((sLTP_buf_idx-lag)+(5/2)))*4)
 13159  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
 13160  			rand_seed = (int32((uint32(907633515)) + ((uint32(rand_seed)) * (uint32(196314165)))))
 13161  			idx = (((rand_seed) >> (25)) & (128 - 1))
 13162  
 13163  			/* Unrolled loop */
 13164  			LTP_pred_Q14 = ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14))))) >> 16))
 13165  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 1*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-1)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 1*2))))) >> 16)))
 13166  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 2*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-2)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 2*2))))) >> 16)))
 13167  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 3*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-3)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 3*2))))) >> 16)))
 13168  			LTP_pred_Q14 = ((LTP_pred_Q14) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 4*2))))) + ((((*(*int32)(unsafe.Pointer(pred_lag_ptr + libc.UintptrFromInt32(-4)*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + 4*2))))) >> 16)))
 13169  			pred_lag_ptr += 4
 13170  
 13171  			/* Generate LPC residual */
 13172  			LPC_exc_Q10 = (((((*(*int32)(unsafe.Pointer(rand_ptr + uintptr(idx)*4))) >> 16) * (int32(rand_scale_Q14))) + ((((*(*int32)(unsafe.Pointer(rand_ptr + uintptr(idx)*4))) & 0x0000FFFF) * (int32(rand_scale_Q14))) >> 16)) << (2)) /* Random noise part */
 13173  			LPC_exc_Q10 = ((LPC_exc_Q10) + (func() int32 {
 13174  				if (4) == 1 {
 13175  					return (((LTP_pred_Q14) >> 1) + ((LTP_pred_Q14) & 1))
 13176  				}
 13177  				return ((((LTP_pred_Q14) >> ((4) - 1)) + 1) >> 1)
 13178  			}())) /* Harmonic part */
 13179  
 13180  			/* Update states */
 13181  			*(*int32)(unsafe.Pointer((psDec + 1048 /* &.sLTP_Q16 */) + uintptr(sLTP_buf_idx)*4)) = ((LPC_exc_Q10) << (6))
 13182  			sLTP_buf_idx++
 13183  
 13184  			/* Save LPC residual */
 13185  			*(*int32)(unsafe.Pointer(sig_Q10_ptr + uintptr(i)*4)) = LPC_exc_Q10
 13186  		}
 13187  		sig_Q10_ptr += 4 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
 13188  		/* Gradually reduce LTP gain */
 13189  		for j = 0; j < 5; j++ {
 13190  			*(*int16)(unsafe.Pointer(B_Q14 + uintptr(j)*2)) = (int16(((int32(int16(harm_Gain_Q15))) * (int32(*(*int16)(unsafe.Pointer(B_Q14 + uintptr(j)*2))))) >> (15)))
 13191  		}
 13192  		/* Gradually reduce excitation gain */
 13193  		rand_scale_Q14 = (int16(((int32(rand_scale_Q14)) * (int32(int16(rand_Gain_Q15)))) >> (15)))
 13194  
 13195  		/* Slowly increase pitch lag */
 13196  		*(*int32)(unsafe.Pointer(psPLC /* &.pitchL_Q8 */)) += (((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> 16) * (int32(int16(655)))) + (((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) & 0x0000FFFF) * (int32(int16(655)))) >> 16))
 13197  		(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8 = SKP_min_32(tls, (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8, (((int32(int16(18))) * (int32(int16((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Ffs_kHz)))) << (8)))
 13198  		lag = func() int32 {
 13199  			if (8) == 1 {
 13200  				return ((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> 1) + (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) & 1))
 13201  			}
 13202  			return (((((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FpitchL_Q8) >> ((8) - 1)) + 1) >> 1)
 13203  		}()
 13204  	}
 13205  
 13206  	/***************************/
 13207  	/* LPC synthesis filtering */
 13208  	/***************************/
 13209  	sig_Q10_ptr = bp + 980 /* &sig_Q10[0] */
 13210  	/* Preload LPC coeficients to array on stack. Gives small performance gain */
 13211  	libc.Xmemcpy(tls, bp+2900 /* &A_Q12_tmp */ /* &.as_int16 */, psPLC+14 /* &.prevLPC_Q12 */, (uint32((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order) * uint32(unsafe.Sizeof(int16(0)))))
 13212  	/* check that unrolling works */
 13213  	for k = 0; k < 4; k++ {
 13214  		for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length; i++ {
 13215  			/* partly unrolled */
 13216  			LPC_pred_Q10 = ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-1))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */)))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-1))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */)))))) >> 16))
 13217  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-2))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 1*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-2))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 1*2))))) >> 16)))
 13218  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-3))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 2*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-3))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 2*2))))) >> 16)))
 13219  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-4))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 3*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-4))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 3*2))))) >> 16)))
 13220  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-5))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 4*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-5))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 4*2))))) >> 16)))
 13221  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-6))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 5*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-6))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 5*2))))) >> 16)))
 13222  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-7))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 6*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-7))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 6*2))))) >> 16)))
 13223  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-8))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 7*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-8))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 7*2))))) >> 16)))
 13224  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-9))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 8*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-9))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 8*2))))) >> 16)))
 13225  			LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-10))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 9*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr(((16+i)-10))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + 9*2))))) >> 16)))
 13226  
 13227  			for j = 10; j < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FLPC_order; j++ {
 13228  				LPC_pred_Q10 = ((LPC_pred_Q10) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((((16+i)-j)-1))*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + uintptr(j)*2))))) + ((((*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((((16+i)-j)-1))*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer((bp + 2900 /* &A_Q12_tmp */ /* &.as_int16 */) + uintptr(j)*2))))) >> 16)))
 13229  			}
 13230  			/* Add prediction to LPC residual */
 13231  			*(*int32)(unsafe.Pointer(sig_Q10_ptr + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(sig_Q10_ptr + uintptr(i)*4))) + (LPC_pred_Q10))
 13232  
 13233  			/* Update states */
 13234  			*(*int32)(unsafe.Pointer((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((16+i))*4)) = ((*(*int32)(unsafe.Pointer(sig_Q10_ptr + uintptr(i)*4))) << (4))
 13235  		}
 13236  		sig_Q10_ptr += 4 * (uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length))
 13237  		/* Update LPC filter state */
 13238  		libc.Xmemcpy(tls, psDec+4888 /* &.sLPC_Q14 */, ((psDec + 4888 /* &.sLPC_Q14 */) + uintptr((*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fsubfr_length)*4), (uint32(16) * uint32(unsafe.Sizeof(int32(0)))))
 13239  	}
 13240  
 13241  	/* Scale with Gain */
 13242  	for i = 0; i < (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).Fframe_length; i++ {
 13243  		*(*int16)(unsafe.Pointer(signal + uintptr(i)*2)) = func() int16 {
 13244  			if (func() int32 {
 13245  				if (10) == 1 {
 13246  					return (((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13247  						if (16) == 1 {
 13248  							return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13249  						}
 13250  						return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13251  					}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13252  						if (16) == 1 {
 13253  							return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13254  						}
 13255  						return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13256  					}()))) & 1))
 13257  				}
 13258  				return ((((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13259  					if (16) == 1 {
 13260  						return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13261  					}
 13262  					return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13263  				}()))) >> ((10) - 1)) + 1) >> 1)
 13264  			}()) > 0x7FFF {
 13265  				return int16(0x7FFF)
 13266  			}
 13267  			return func() int16 {
 13268  				if (func() int32 {
 13269  					if (10) == 1 {
 13270  						return (((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13271  							if (16) == 1 {
 13272  								return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13273  							}
 13274  							return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13275  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13276  							if (16) == 1 {
 13277  								return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13278  							}
 13279  							return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13280  						}()))) & 1))
 13281  					}
 13282  					return ((((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13283  						if (16) == 1 {
 13284  							return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13285  						}
 13286  						return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13287  					}()))) >> ((10) - 1)) + 1) >> 1)
 13288  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 13289  					return libc.Int16FromInt32(0x8000)
 13290  				}
 13291  				return func() int16 {
 13292  					if (10) == 1 {
 13293  						return (int16(((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13294  							if (16) == 1 {
 13295  								return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13296  							}
 13297  							return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13298  						}()))) >> 1) + ((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13299  							if (16) == 1 {
 13300  								return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13301  							}
 13302  							return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13303  						}()))) & 1)))
 13304  					}
 13305  					return (int16((((((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 980 /* &sig_Q10[0] */ + uintptr(i)*4))) * (func() int32 {
 13306  						if (16) == 1 {
 13307  							return (((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) & 1))
 13308  						}
 13309  						return ((((*(*int32)(unsafe.Pointer((psPLC + 72 /* &.prevGain_Q16 */) + 3*4))) >> ((16) - 1)) + 1) >> 1)
 13310  					}()))) >> ((10) - 1)) + 1) >> 1))
 13311  				}()
 13312  			}()
 13313  		}()
 13314  	}
 13315  
 13316  	/**************************************/
 13317  	/* Update states                      */
 13318  	/**************************************/
 13319  	(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Frand_seed = rand_seed
 13320  	(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).FrandScale_Q14 = rand_scale_Q14
 13321  	for i = 0; i < 4; i++ {
 13322  		*(*int32)(unsafe.Pointer((psDecCtrl /* &.pitchL */) + uintptr(i)*4)) = lag
 13323  	}
 13324  }
 13325  
 13326  /* Glues concealed frames with new good recieved frames             */
 13327  func SKP_Silk_PLC_glue_frames(tls *libc.TLS, psDec uintptr, psDecCtrl uintptr, signal uintptr, length int32) { /* SKP_Silk_PLC.c:333:6: */
 13328  	bp := tls.Alloc(8)
 13329  	defer tls.Free(8)
 13330  
 13331  	var i int32
 13332  	// var energy_shift int32 at bp+4, 4
 13333  
 13334  	// var energy int32 at bp, 4
 13335  
 13336  	var psPLC uintptr
 13337  	psPLC = (psDec + 13588 /* &.sPLC */)
 13338  
 13339  	if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FlossCnt != 0 {
 13340  		/* Calculate energy in concealed residual */
 13341  		SKP_Silk_sum_sqr_shift(tls, (psPLC + 60 /* &.conc_energy */), (psPLC + 64 /* &.conc_energy_shift */), signal, length)
 13342  
 13343  		(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Flast_frame_lost = 1
 13344  	} else {
 13345  		if (*SKP_Silk_decoder_state)(unsafe.Pointer(psDec)).FsPLC.Flast_frame_lost != 0 {
 13346  			/* Calculate residual in decoded signal if last frame was lost */
 13347  			SKP_Silk_sum_sqr_shift(tls, bp /* &energy */, bp+4 /* &energy_shift */, signal, length)
 13348  
 13349  			/* Normalize energies */
 13350  			if *(*int32)(unsafe.Pointer(bp + 4 /* energy_shift */)) > (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy_shift {
 13351  				(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy = (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy) >> (*(*int32)(unsafe.Pointer(bp + 4 /* energy_shift */)) - (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy_shift))
 13352  			} else if *(*int32)(unsafe.Pointer(bp + 4 /* energy_shift */)) < (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy_shift {
 13353  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) >> ((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy_shift - *(*int32)(unsafe.Pointer(bp + 4 /* energy_shift */))))
 13354  			}
 13355  
 13356  			/* Fade in the energy difference */
 13357  			if *(*int32)(unsafe.Pointer(bp /* energy */)) > (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy {
 13358  				var frac_Q24 int32
 13359  				var LZ int32
 13360  				var gain_Q12 int32
 13361  				var slope_Q12 int32
 13362  
 13363  				LZ = SKP_Silk_CLZ32(tls, (*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy)
 13364  				LZ = (LZ - 1)
 13365  				(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy = (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy) << (LZ))
 13366  				*(*int32)(unsafe.Pointer(bp /* energy */)) = ((*(*int32)(unsafe.Pointer(bp /* energy */))) >> (SKP_max_32(tls, (24 - LZ), 0)))
 13367  
 13368  				frac_Q24 = (((*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Fconc_energy) / (func() int32 {
 13369  					if (*(*int32)(unsafe.Pointer(bp /* energy */))) > (1) {
 13370  						return *(*int32)(unsafe.Pointer(bp /* energy */))
 13371  					}
 13372  					return 1
 13373  				}()))
 13374  
 13375  				gain_Q12 = SKP_Silk_SQRT_APPROX(tls, frac_Q24)
 13376  				slope_Q12 = (((int32(1) << 12) - gain_Q12) / (length))
 13377  
 13378  				for i = 0; i < length; i++ {
 13379  					*(*int16)(unsafe.Pointer(signal + uintptr(i)*2)) = (int16(((gain_Q12) * (int32(*(*int16)(unsafe.Pointer(signal + uintptr(i)*2))))) >> (12)))
 13380  					gain_Q12 = gain_Q12 + (slope_Q12)
 13381  					gain_Q12 = func() int32 {
 13382  						if (gain_Q12) < (int32(1) << 12) {
 13383  							return gain_Q12
 13384  						}
 13385  						return (int32(1) << 12)
 13386  					}()
 13387  				}
 13388  			}
 13389  		}
 13390  		(*SKP_Silk_PLC_struct)(unsafe.Pointer(psPLC)).Flast_frame_lost = 0
 13391  
 13392  	}
 13393  }
 13394  
 13395  func SKP_Silk_warped_LPC_analysis_filter_FIX(tls *libc.TLS, state uintptr, res uintptr, coef_Q13 uintptr, input uintptr, lambda_Q16 int16, length int32, order int32) { /* SKP_Silk_prefilter_FIX.c:42:6: */
 13396  	var n int32
 13397  	var i int32
 13398  	var acc_Q11 int32
 13399  	var tmp1 int32
 13400  	var tmp2 int32
 13401  
 13402  	/* Order must be even */
 13403  
 13404  	for n = 0; n < length; n++ {
 13405  		/* Output of lowpass section */
 13406  		tmp2 = ((*(*int32)(unsafe.Pointer(state))) + ((((*(*int32)(unsafe.Pointer(state + 1*4))) >> 16) * (int32(lambda_Q16))) + ((((*(*int32)(unsafe.Pointer(state + 1*4))) & 0x0000FFFF) * (int32(lambda_Q16))) >> 16)))
 13407  		*(*int32)(unsafe.Pointer(state)) = ((int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2)))) << (14))
 13408  		/* Output of allpass section */
 13409  		tmp1 = ((*(*int32)(unsafe.Pointer(state + 1*4))) + ((((*(*int32)(unsafe.Pointer(state + 2*4)) - tmp2) >> 16) * (int32(lambda_Q16))) + ((((*(*int32)(unsafe.Pointer(state + 2*4)) - tmp2) & 0x0000FFFF) * (int32(lambda_Q16))) >> 16)))
 13410  		*(*int32)(unsafe.Pointer(state + 1*4)) = tmp2
 13411  		acc_Q11 = ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(coef_Q13))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(coef_Q13))))) >> 16))
 13412  		/* Loop over allpass sections */
 13413  		for i = 2; i < order; i = i + (2) {
 13414  			/* Output of allpass section */
 13415  			tmp2 = ((*(*int32)(unsafe.Pointer(state + uintptr(i)*4))) + ((((*(*int32)(unsafe.Pointer(state + uintptr((i+1))*4)) - tmp1) >> 16) * (int32(lambda_Q16))) + ((((*(*int32)(unsafe.Pointer(state + uintptr((i+1))*4)) - tmp1) & 0x0000FFFF) * (int32(lambda_Q16))) >> 16)))
 13416  			*(*int32)(unsafe.Pointer(state + uintptr(i)*4)) = tmp1
 13417  			acc_Q11 = ((acc_Q11) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr((i-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr((i-1))*2))))) >> 16)))
 13418  			/* Output of allpass section */
 13419  			tmp1 = ((*(*int32)(unsafe.Pointer(state + uintptr((i+1))*4))) + ((((*(*int32)(unsafe.Pointer(state + uintptr((i+2))*4)) - tmp2) >> 16) * (int32(lambda_Q16))) + ((((*(*int32)(unsafe.Pointer(state + uintptr((i+2))*4)) - tmp2) & 0x0000FFFF) * (int32(lambda_Q16))) >> 16)))
 13420  			*(*int32)(unsafe.Pointer(state + uintptr((i+1))*4)) = tmp2
 13421  			acc_Q11 = ((acc_Q11) + ((((tmp2) >> 16) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr(i)*2))))) + ((((tmp2) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr(i)*2))))) >> 16)))
 13422  		}
 13423  		*(*int32)(unsafe.Pointer(state + uintptr(order)*4)) = tmp1
 13424  		acc_Q11 = ((acc_Q11) + ((((tmp1) >> 16) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr((order-1))*2))))) + ((((tmp1) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(coef_Q13 + uintptr((order-1))*2))))) >> 16)))
 13425  		*(*int16)(unsafe.Pointer(res + uintptr(n)*2)) = func() int16 {
 13426  			if (int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2))) - (func() int32 {
 13427  				if (11) == 1 {
 13428  					return (((acc_Q11) >> 1) + ((acc_Q11) & 1))
 13429  				}
 13430  				return ((((acc_Q11) >> ((11) - 1)) + 1) >> 1)
 13431  			}())) > 0x7FFF {
 13432  				return int16(0x7FFF)
 13433  			}
 13434  			return func() int16 {
 13435  				if (int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2))) - (func() int32 {
 13436  					if (11) == 1 {
 13437  						return (((acc_Q11) >> 1) + ((acc_Q11) & 1))
 13438  					}
 13439  					return ((((acc_Q11) >> ((11) - 1)) + 1) >> 1)
 13440  				}())) < (int32(libc.Int16FromInt32(0x8000))) {
 13441  					return libc.Int16FromInt32(0x8000)
 13442  				}
 13443  				return (int16(int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2))) - (func() int32 {
 13444  					if (11) == 1 {
 13445  						return (((acc_Q11) >> 1) + ((acc_Q11) & 1))
 13446  					}
 13447  					return ((((acc_Q11) >> ((11) - 1)) + 1) >> 1)
 13448  				}())))
 13449  			}()
 13450  		}()
 13451  	}
 13452  }
 13453  
 13454  func SKP_Silk_prefilter_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, xw uintptr, x uintptr) { /* SKP_Silk_prefilter_FIX.c:83:6: */
 13455  	bp := tls.Alloc(756)
 13456  	defer tls.Free(756)
 13457  
 13458  	var P uintptr = (psEnc + 19556 /* &.sPrefilt */)
 13459  	var j int32
 13460  	var k int32
 13461  	var lag int32
 13462  	var tmp_32 int32
 13463  	var AR1_shp_Q13 uintptr
 13464  	var px uintptr
 13465  	var pxw uintptr
 13466  	var HarmShapeGain_Q12 int32
 13467  	var Tilt_Q14 int32
 13468  	var HarmShapeFIRPacked_Q12 int32
 13469  	var LF_shp_Q14 int32
 13470  	// var x_filt_Q12 [120]int32 at bp+276, 480
 13471  
 13472  	// var st_res [136]int16 at bp, 272
 13473  
 13474  	// var B_Q12 [2]int16 at bp+272, 4
 13475  
 13476  	/* Setup pointers */
 13477  	px = x
 13478  	pxw = xw
 13479  	lag = (*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FlagPrev
 13480  	for k = 0; k < 4; k++ {
 13481  		/* Update Variables that change per sub frame */
 13482  		if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 13483  			lag = *(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 108 /* &.pitchL */) + uintptr(k)*4))
 13484  		}
 13485  
 13486  		/* Noise shape parameters */
 13487  		HarmShapeGain_Q12 = ((((*(*int32)(unsafe.Pointer((psEncCtrl + 572 /* &.HarmShapeGain_Q14 */) + uintptr(k)*4))) >> 16) * (int32((int16(16384 - *(*int32)(unsafe.Pointer((psEncCtrl + 540 /* &.HarmBoost_Q14 */) + uintptr(k)*4))))))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 572 /* &.HarmShapeGain_Q14 */) + uintptr(k)*4))) & 0x0000FFFF) * (int32((int16(16384 - *(*int32)(unsafe.Pointer((psEncCtrl + 540 /* &.HarmBoost_Q14 */) + uintptr(k)*4))))))) >> 16))
 13488  
 13489  		HarmShapeFIRPacked_Q12 = ((HarmShapeGain_Q12) >> (2))
 13490  		HarmShapeFIRPacked_Q12 = HarmShapeFIRPacked_Q12 | (((HarmShapeGain_Q12) >> (1)) << (16))
 13491  		Tilt_Q14 = *(*int32)(unsafe.Pointer((psEncCtrl + 556 /* &.Tilt_Q14 */) + uintptr(k)*4))
 13492  		LF_shp_Q14 = *(*int32)(unsafe.Pointer((psEncCtrl + 508 /* &.LF_shp_Q14 */) + uintptr(k)*4))
 13493  		AR1_shp_Q13 = ((psEncCtrl + 252 /* &.AR1_Q13 */) + uintptr((k*16))*2)
 13494  
 13495  		/* Short term FIR filtering*/
 13496  		SKP_Silk_warped_LPC_analysis_filter_FIX(tls, P+1024 /* &.sAR_shp */, bp /* &st_res[0] */, AR1_shp_Q13, px,
 13497  			int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fwarping_Q16), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FshapingLPCOrder)
 13498  
 13499  		/* reduce (mainly) low frequencies during harmonic emphasis */
 13500  		*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */)) = func() int16 {
 13501  			if (2) == 1 {
 13502  				return (int16(((*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4))) & 1)))
 13503  			}
 13504  			return (int16((((*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4))) >> ((2) - 1)) + 1) >> 1))
 13505  		}()
 13506  		tmp_32 = ((SKP_FIX_CONST(tls, 0.05, 26)) + ((int32(int16(*(*int32)(unsafe.Pointer((psEncCtrl + 540 /* &.HarmBoost_Q14 */) + uintptr(k)*4))))) * (int32(int16(HarmShapeGain_Q12)))))                                                                                       /* Q26 */
 13507  		tmp_32 = ((tmp_32) + ((int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14))) * (int32(int16(SKP_FIX_CONST(tls, 0.1, 12))))))                                                                                                      /* Q26 */
 13508  		tmp_32 = ((((tmp_32) >> 16) * (int32(int16(-*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)))))) + ((((tmp_32) & 0x0000FFFF) * (int32(int16(-*(*int32)(unsafe.Pointer((psEncCtrl + 524 /* &.GainsPre_Q14 */) + uintptr(k)*4)))))) >> 16)) /* Q24 */
 13509  		tmp_32 = func() int32 {
 13510  			if (12) == 1 {
 13511  				return (((tmp_32) >> 1) + ((tmp_32) & 1))
 13512  			}
 13513  			return ((((tmp_32) >> ((12) - 1)) + 1) >> 1)
 13514  		}() /* Q12 */
 13515  		*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */ + 1*2)) = func() int16 {
 13516  			if (tmp_32) > 0x7FFF {
 13517  				return int16(0x7FFF)
 13518  			}
 13519  			return func() int16 {
 13520  				if (tmp_32) < (int32(libc.Int16FromInt32(0x8000))) {
 13521  					return libc.Int16FromInt32(0x8000)
 13522  				}
 13523  				return int16(tmp_32)
 13524  			}()
 13525  		}()
 13526  
 13527  		*(*int32)(unsafe.Pointer(bp + 276 /* &x_filt_Q12[0] */)) = (((int32(*(*int16)(unsafe.Pointer(bp /* &st_res[0] */)))) * (int32(*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */))))) + ((int32(int16((*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsHarmHP))) * (int32(*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */ + 1*2))))))
 13528  		for j = 1; j < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length; j++ {
 13529  			*(*int32)(unsafe.Pointer(bp + 276 /* &x_filt_Q12[0] */ + uintptr(j)*4)) = (((int32(*(*int16)(unsafe.Pointer(bp /* &st_res[0] */ + uintptr(j)*2)))) * (int32(*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */))))) + ((int32(*(*int16)(unsafe.Pointer(bp /* &st_res[0] */ + uintptr((j-1))*2)))) * (int32(*(*int16)(unsafe.Pointer(bp + 272 /* &B_Q12[0] */ + 1*2))))))
 13530  		}
 13531  		(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsHarmHP = int32(*(*int16)(unsafe.Pointer(bp /* &st_res[0] */ + uintptr(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length-1))*2)))
 13532  
 13533  		SKP_Silk_prefilt_FIX(tls, P, bp+276 /* &x_filt_Q12[0] */, pxw, HarmShapeFIRPacked_Q12, Tilt_Q14,
 13534  			LF_shp_Q14, lag, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length)
 13535  
 13536  		px += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
 13537  		pxw += 2 * (uintptr((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
 13538  	}
 13539  
 13540  	(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FlagPrev = *(*int32)(unsafe.Pointer((psEncCtrl /* &.sCmn */ + 108 /* &.pitchL */) + 3*4))
 13541  }
 13542  
 13543  /* SKP_Silk_prefilter. Prefilter for finding Quantizer input signal                           */
 13544  func SKP_Silk_prefilt_FIX(tls *libc.TLS, P uintptr, st_res_Q12 uintptr, xw uintptr, HarmShapeFIRPacked_Q12 int32, Tilt_Q14 int32, LF_shp_Q14 int32, lag int32, length int32) { /* SKP_Silk_prefilter_FIX.c:150:17: */
 13545  	var i int32
 13546  	var idx int32
 13547  	var LTP_shp_buf_idx int32
 13548  	var n_LTP_Q12 int32
 13549  	var n_Tilt_Q10 int32
 13550  	var n_LF_Q10 int32
 13551  	var sLF_MA_shp_Q12 int32
 13552  	var sLF_AR_shp_Q12 int32
 13553  	var LTP_shp_buf uintptr
 13554  
 13555  	/* To speed up use temp variables instead of using the struct */
 13556  	LTP_shp_buf = P /* &.sLTP_shp */
 13557  	LTP_shp_buf_idx = (*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLTP_shp_buf_idx
 13558  	sLF_AR_shp_Q12 = (*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLF_AR_shp_Q12
 13559  	sLF_MA_shp_Q12 = (*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLF_MA_shp_Q12
 13560  
 13561  	for i = 0; i < length; i++ {
 13562  		if lag > 0 {
 13563  			/* unrolled loop */
 13564  
 13565  			idx = (lag + LTP_shp_buf_idx)
 13566  			n_LTP_Q12 = ((int32(*(*int16)(unsafe.Pointer(LTP_shp_buf + uintptr((((idx-(3/2))-1)&(512-1)))*2)))) * (int32(int16(HarmShapeFIRPacked_Q12))))
 13567  			n_LTP_Q12 = ((n_LTP_Q12) + ((int32(*(*int16)(unsafe.Pointer(LTP_shp_buf + uintptr(((idx-(3/2))&(512-1)))*2)))) * ((HarmShapeFIRPacked_Q12) >> 16)))
 13568  			n_LTP_Q12 = ((n_LTP_Q12) + ((int32(*(*int16)(unsafe.Pointer(LTP_shp_buf + uintptr((((idx-(3/2))+1)&(512-1)))*2)))) * (int32(int16(HarmShapeFIRPacked_Q12)))))
 13569  		} else {
 13570  			n_LTP_Q12 = 0
 13571  		}
 13572  
 13573  		n_Tilt_Q10 = ((((sLF_AR_shp_Q12) >> 16) * (int32(int16(Tilt_Q14)))) + ((((sLF_AR_shp_Q12) & 0x0000FFFF) * (int32(int16(Tilt_Q14)))) >> 16))
 13574  		n_LF_Q10 = (((((sLF_AR_shp_Q12) >> 16) * ((LF_shp_Q14) >> 16)) + ((((sLF_AR_shp_Q12) & 0x0000FFFF) * ((LF_shp_Q14) >> 16)) >> 16)) + ((((sLF_MA_shp_Q12) >> 16) * (int32(int16(LF_shp_Q14)))) + ((((sLF_MA_shp_Q12) & 0x0000FFFF) * (int32(int16(LF_shp_Q14)))) >> 16)))
 13575  
 13576  		sLF_AR_shp_Q12 = ((*(*int32)(unsafe.Pointer(st_res_Q12 + uintptr(i)*4))) - ((n_Tilt_Q10) << (2)))
 13577  		sLF_MA_shp_Q12 = ((sLF_AR_shp_Q12) - ((n_LF_Q10) << (2)))
 13578  
 13579  		LTP_shp_buf_idx = ((LTP_shp_buf_idx - 1) & (512 - 1))
 13580  		*(*int16)(unsafe.Pointer(LTP_shp_buf + uintptr(LTP_shp_buf_idx)*2)) = func() int16 {
 13581  			if (func() int32 {
 13582  				if (12) == 1 {
 13583  					return (((sLF_MA_shp_Q12) >> 1) + ((sLF_MA_shp_Q12) & 1))
 13584  				}
 13585  				return ((((sLF_MA_shp_Q12) >> ((12) - 1)) + 1) >> 1)
 13586  			}()) > 0x7FFF {
 13587  				return int16(0x7FFF)
 13588  			}
 13589  			return func() int16 {
 13590  				if (func() int32 {
 13591  					if (12) == 1 {
 13592  						return (((sLF_MA_shp_Q12) >> 1) + ((sLF_MA_shp_Q12) & 1))
 13593  					}
 13594  					return ((((sLF_MA_shp_Q12) >> ((12) - 1)) + 1) >> 1)
 13595  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 13596  					return libc.Int16FromInt32(0x8000)
 13597  				}
 13598  				return func() int16 {
 13599  					if (12) == 1 {
 13600  						return (int16(((sLF_MA_shp_Q12) >> 1) + ((sLF_MA_shp_Q12) & 1)))
 13601  					}
 13602  					return (int16((((sLF_MA_shp_Q12) >> ((12) - 1)) + 1) >> 1))
 13603  				}()
 13604  			}()
 13605  		}()
 13606  
 13607  		*(*int16)(unsafe.Pointer(xw + uintptr(i)*2)) = func() int16 {
 13608  			if (func() int32 {
 13609  				if (12) == 1 {
 13610  					return ((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> 1) + (((sLF_MA_shp_Q12) - (n_LTP_Q12)) & 1))
 13611  				}
 13612  				return (((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> ((12) - 1)) + 1) >> 1)
 13613  			}()) > 0x7FFF {
 13614  				return int16(0x7FFF)
 13615  			}
 13616  			return func() int16 {
 13617  				if (func() int32 {
 13618  					if (12) == 1 {
 13619  						return ((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> 1) + (((sLF_MA_shp_Q12) - (n_LTP_Q12)) & 1))
 13620  					}
 13621  					return (((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> ((12) - 1)) + 1) >> 1)
 13622  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 13623  					return libc.Int16FromInt32(0x8000)
 13624  				}
 13625  				return func() int16 {
 13626  					if (12) == 1 {
 13627  						return (int16((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> 1) + (((sLF_MA_shp_Q12) - (n_LTP_Q12)) & 1)))
 13628  					}
 13629  					return (int16(((((sLF_MA_shp_Q12) - (n_LTP_Q12)) >> ((12) - 1)) + 1) >> 1))
 13630  				}()
 13631  			}()
 13632  		}()
 13633  	}
 13634  
 13635  	/* Copy temp variable back to state */
 13636  	(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLF_AR_shp_Q12 = sLF_AR_shp_Q12
 13637  	(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLF_MA_shp_Q12 = sLF_MA_shp_Q12
 13638  	(*SKP_Silk_prefilter_state_FIX)(unsafe.Pointer(P)).FsLTP_shp_buf_idx = LTP_shp_buf_idx
 13639  }
 13640  
 13641  /***********************************************************************
 13642  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 13643  Redistribution and use in source and binary forms, with or without
 13644  modification, (subject to the limitations in the disclaimer below)
 13645  are permitted provided that the following conditions are met:
 13646  - Redistributions of source code must retain the above copyright notice,
 13647  this list of conditions and the following disclaimer.
 13648  - Redistributions in binary form must reproduce the above copyright
 13649  notice, this list of conditions and the following disclaimer in the
 13650  documentation and/or other materials provided with the distribution.
 13651  - Neither the name of Skype Limited, nor the names of specific
 13652  contributors, may be used to endorse or promote products derived from
 13653  this software without specific prior written permission.
 13654  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 13655  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 13656  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 13657  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 13658  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 13659  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 13660  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 13661  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 13662  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 13663  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 13664  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 13665  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 13666  ***********************************************************************/
 13667  
 13668  /*******************/
 13669  /* Pitch estimator */
 13670  /*******************/
 13671  
 13672  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
 13673  
 13674  /* Bandwidth expansion for whitening filter in pitch analysis */
 13675  
 13676  /* Threshold used by pitch estimator for early escape */
 13677  
 13678  /*********************/
 13679  /* Linear prediction */
 13680  /*********************/
 13681  
 13682  /* LPC analysis defines: regularization and bandwidth expansion */
 13683  
 13684  /* LTP analysis defines */
 13685  
 13686  /* LTP quantization settings */
 13687  
 13688  /***********************/
 13689  /* High pass filtering */
 13690  /***********************/
 13691  
 13692  /* Smoothing parameters for low end of pitch frequency range estimation */
 13693  
 13694  /* Min and max values for low end of pitch frequency range estimation */
 13695  
 13696  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
 13697  
 13698  /***********/
 13699  /* Various */
 13700  /***********/
 13701  
 13702  /* Required speech activity for counting frame as active */
 13703  
 13704  /* Speech Activity LBRR enable threshold (needs tuning) */
 13705  
 13706  /*************************/
 13707  /* Perceptual parameters */
 13708  /*************************/
 13709  
 13710  /* reduction in coding SNR during low speech activity */
 13711  
 13712  /* factor for reducing quantization noise during voiced speech */
 13713  
 13714  /* factor for reducing quantization noise for unvoiced sparse signals */
 13715  
 13716  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
 13717  
 13718  /* warping control */
 13719  
 13720  /* fraction added to first autocorrelation value */
 13721  
 13722  /* noise shaping filter chirp factor */
 13723  
 13724  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
 13725  
 13726  /* gain reduction for fricatives */
 13727  
 13728  /* extra harmonic boosting (signal shaping) at low bitrates */
 13729  
 13730  /* extra harmonic boosting (signal shaping) for noisy input signals */
 13731  
 13732  /* harmonic noise shaping */
 13733  
 13734  /* extra harmonic noise shaping for high bitrates or noisy input */
 13735  
 13736  /* parameter for shaping noise towards higher frequencies */
 13737  
 13738  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
 13739  
 13740  /* parameter for applying a high-pass tilt to the input signal */
 13741  
 13742  /* parameter for extra high-pass tilt to the input signal at high rates */
 13743  
 13744  /* parameter for reducing noise at the very low frequencies */
 13745  
 13746  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
 13747  
 13748  /* noise floor to put a lower limit on the quantization step size */
 13749  
 13750  /* noise floor relative to active speech gain level */
 13751  
 13752  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
 13753  
 13754  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
 13755  
 13756  /* parameters defining the R/D tradeoff in the residual quantizer */
 13757  
 13758  /* Processing of gains */
 13759  func SKP_Silk_process_gains_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr) { /* SKP_Silk_process_gains_FIX.c:32:6: */
 13760  	var psShapeSt uintptr = (psEnc + 19540 /* &.sShape */)
 13761  	var k int32
 13762  	var s_Q16 int32
 13763  	var InvMaxSqrVal_Q16 int32
 13764  	var gain int32
 13765  	var gain_squared int32
 13766  	var ResNrg int32
 13767  	var ResNrgPart int32
 13768  	var quant_offset_Q10 int32
 13769  
 13770  	/* Gain reduction when LTP coding gain is high */
 13771  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 13772  		/*s = -0.5f * SKP_sigmoid( 0.25f * ( psEncCtrl->LTPredCodGain - 12.0f ) ); */
 13773  		s_Q16 = -SKP_Silk_sigm_Q15(tls, func() int32 {
 13774  			if (4) == 1 {
 13775  				return ((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 - SKP_FIX_CONST(tls, 12.0, 7)) >> 1) + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 - SKP_FIX_CONST(tls, 12.0, 7)) & 1))
 13776  			}
 13777  			return (((((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 - SKP_FIX_CONST(tls, 12.0, 7)) >> ((4) - 1)) + 1) >> 1)
 13778  		}())
 13779  		for k = 0; k < 4; k++ {
 13780  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) >> 16) * (int32(int16(s_Q16)))) + ((((*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(s_Q16)))) >> 16)))
 13781  		}
 13782  	}
 13783  
 13784  	/* Limit the quantized signal */
 13785  	InvMaxSqrVal_Q16 = ((SKP_Silk_log2lin(tls, ((((SKP_FIX_CONST(tls, 70.0, 7) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7) >> 16) * (int32(int16(SKP_FIX_CONST(tls, 0.33, 16))))) + ((((SKP_FIX_CONST(tls, 70.0, 7) - (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcurrent_SNR_dB_Q7) & 0x0000FFFF) * (int32(int16(SKP_FIX_CONST(tls, 0.33, 16))))) >> 16)))) / ((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Fsubfr_length))
 13786  
 13787  	for k = 0; k < 4; k++ {
 13788  		/* Soft limit on ratio residual energy and squared gains */
 13789  		ResNrg = *(*int32)(unsafe.Pointer((psEncCtrl + 640 /* &.ResNrg */) + uintptr(k)*4))
 13790  		ResNrgPart = (((((ResNrg) >> 16) * (int32(int16(InvMaxSqrVal_Q16)))) + ((((ResNrg) & 0x0000FFFF) * (int32(int16(InvMaxSqrVal_Q16)))) >> 16)) + ((ResNrg) * (func() int32 {
 13791  			if (16) == 1 {
 13792  				return (((InvMaxSqrVal_Q16) >> 1) + ((InvMaxSqrVal_Q16) & 1))
 13793  			}
 13794  			return ((((InvMaxSqrVal_Q16) >> ((16) - 1)) + 1) >> 1)
 13795  		}())))
 13796  		if *(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4)) > 0 {
 13797  			if *(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4)) < 32 {
 13798  				ResNrgPart = func() int32 {
 13799  					if (*(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4))) == 1 {
 13800  						return (((ResNrgPart) >> 1) + ((ResNrgPart) & 1))
 13801  					}
 13802  					return ((((ResNrgPart) >> ((*(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4))) - 1)) + 1) >> 1)
 13803  				}()
 13804  			} else {
 13805  				ResNrgPart = 0
 13806  			}
 13807  		} else if *(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4)) != 0 {
 13808  			if ResNrgPart > (int32((0x7FFFFFFF)) >> (-*(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4)))) {
 13809  				ResNrgPart = 0x7FFFFFFF
 13810  			} else {
 13811  				ResNrgPart = ((ResNrgPart) << (-*(*int32)(unsafe.Pointer((psEncCtrl + 656 /* &.ResNrgQ */) + uintptr(k)*4))))
 13812  			}
 13813  		}
 13814  		gain = *(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4))
 13815  		gain_squared = func() int32 {
 13816  			if ((uint32((ResNrgPart) + (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))) & 0x80000000) == uint32(0) {
 13817  				return func() int32 {
 13818  					if ((uint32((ResNrgPart) & (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))) & 0x80000000) != uint32(0) {
 13819  						return libc.Int32FromUint32(0x80000000)
 13820  					}
 13821  					return ((ResNrgPart) + (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))
 13822  				}()
 13823  			}
 13824  			return func() int32 {
 13825  				if ((uint32((ResNrgPart) | (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))) & 0x80000000) == uint32(0) {
 13826  					return 0x7FFFFFFF
 13827  				}
 13828  				return ((ResNrgPart) + (int32(((int64_t(gain)) * (int64_t(gain))) >> (32))))
 13829  			}()
 13830  		}()
 13831  		if gain_squared < 0x7FFF {
 13832  			/* recalculate with higher precision */
 13833  			gain_squared = ((((ResNrgPart) << (16)) + ((((gain) >> 16) * (int32(int16(gain)))) + ((((gain) & 0x0000FFFF) * (int32(int16(gain)))) >> 16))) + ((gain) * (func() int32 {
 13834  				if (16) == 1 {
 13835  					return (((gain) >> 1) + ((gain) & 1))
 13836  				}
 13837  				return ((((gain) >> ((16) - 1)) + 1) >> 1)
 13838  			}())))
 13839  
 13840  			gain = SKP_Silk_SQRT_APPROX(tls, gain_squared) /* Q8   */
 13841  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = ((func() int32 {
 13842  				if (int32((libc.Int32FromUint32(0x80000000))) >> (8)) > (int32((0x7FFFFFFF)) >> (8)) {
 13843  					return func() int32 {
 13844  						if (gain) > (int32((libc.Int32FromUint32(0x80000000))) >> (8)) {
 13845  							return (int32((libc.Int32FromUint32(0x80000000))) >> (8))
 13846  						}
 13847  						return func() int32 {
 13848  							if (gain) < (int32((0x7FFFFFFF)) >> (8)) {
 13849  								return (int32((0x7FFFFFFF)) >> (8))
 13850  							}
 13851  							return gain
 13852  						}()
 13853  					}()
 13854  				}
 13855  				return func() int32 {
 13856  					if (gain) > (int32((0x7FFFFFFF)) >> (8)) {
 13857  						return (int32((0x7FFFFFFF)) >> (8))
 13858  					}
 13859  					return func() int32 {
 13860  						if (gain) < (int32((libc.Int32FromUint32(0x80000000))) >> (8)) {
 13861  							return (int32((libc.Int32FromUint32(0x80000000))) >> (8))
 13862  						}
 13863  						return gain
 13864  					}()
 13865  				}()
 13866  			}()) << (8)) /* Q16  */
 13867  		} else {
 13868  			gain = SKP_Silk_SQRT_APPROX(tls, gain_squared) /* Q0   */
 13869  			*(*int32)(unsafe.Pointer((psEncCtrl + 128 /* &.Gains_Q16 */) + uintptr(k)*4)) = ((func() int32 {
 13870  				if (int32((libc.Int32FromUint32(0x80000000))) >> (16)) > (int32((0x7FFFFFFF)) >> (16)) {
 13871  					return func() int32 {
 13872  						if (gain) > (int32((libc.Int32FromUint32(0x80000000))) >> (16)) {
 13873  							return (int32((libc.Int32FromUint32(0x80000000))) >> (16))
 13874  						}
 13875  						return func() int32 {
 13876  							if (gain) < (int32((0x7FFFFFFF)) >> (16)) {
 13877  								return (int32((0x7FFFFFFF)) >> (16))
 13878  							}
 13879  							return gain
 13880  						}()
 13881  					}()
 13882  				}
 13883  				return func() int32 {
 13884  					if (gain) > (int32((0x7FFFFFFF)) >> (16)) {
 13885  						return (int32((0x7FFFFFFF)) >> (16))
 13886  					}
 13887  					return func() int32 {
 13888  						if (gain) < (int32((libc.Int32FromUint32(0x80000000))) >> (16)) {
 13889  							return (int32((libc.Int32FromUint32(0x80000000))) >> (16))
 13890  						}
 13891  						return gain
 13892  					}()
 13893  				}()
 13894  			}()) << (16)) /* Q16  */
 13895  		}
 13896  	}
 13897  
 13898  	/* Noise shaping quantization */
 13899  	SKP_Silk_gains_quant(tls, psEncCtrl /* &.sCmn */ +72 /* &.GainsIndices */, psEncCtrl+128, /* &.Gains_Q16 */
 13900  		(psShapeSt /* &.LastGainIndex */), (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnFramesInPayloadBuf)
 13901  	/* Set quantizer offset for voiced signals. Larger offset when LTP coding gain is low or tilt is high (ie low-pass) */
 13902  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 13903  		if ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLTPredCodGain_Q7 + (((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_tilt_Q15) >> (8))) > SKP_FIX_CONST(tls, 1.0, 7) {
 13904  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 0
 13905  		} else {
 13906  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType = 1
 13907  		}
 13908  	}
 13909  
 13910  	/* Quantizer boundary adjustment */
 13911  	quant_offset_Q10 = int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_Quantization_Offsets_Q10)) + uintptr((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype)*4) + uintptr((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FQuantOffsetType)*2)))
 13912  	(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FLambda_Q10 = (((((SKP_FIX_CONST(tls, 1.2, 10) +
 13913  		((int32(int16(SKP_FIX_CONST(tls, float64(-0.05), 10)))) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FnStatesDelayedDecision))))) +
 13914  		((((SKP_FIX_CONST(tls, float64(-0.3), 18)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((SKP_FIX_CONST(tls, float64(-0.3), 18)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16))) +
 13915  		((((SKP_FIX_CONST(tls, float64(-0.2), 12)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) + ((((SKP_FIX_CONST(tls, float64(-0.2), 12)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Finput_quality_Q14)))) >> 16))) +
 13916  		((((SKP_FIX_CONST(tls, float64(-0.1), 12)) >> 16) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) + ((((SKP_FIX_CONST(tls, float64(-0.1), 12)) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fcoding_quality_Q14)))) >> 16))) +
 13917  		((((SKP_FIX_CONST(tls, 1.5, 16)) >> 16) * (int32(int16(quant_offset_Q10)))) + ((((SKP_FIX_CONST(tls, 1.5, 16)) & 0x0000FFFF) * (int32(int16(quant_offset_Q10)))) >> 16)))
 13918  
 13919  }
 13920  
 13921  /* Limit, stabilize, convert and quantize NLSFs.    */
 13922  func SKP_Silk_process_NLSFs_FIX(tls *libc.TLS, psEnc uintptr, psEncCtrl uintptr, pNLSF_Q15 uintptr) { /* SKP_Silk_process_NLSFs_FIX.c:31:6: */
 13923  	bp := tls.Alloc(192)
 13924  	defer tls.Free(192)
 13925  
 13926  	var doInterpolate int32
 13927  	// var pNLSFW_Q6 [16]int32 at bp, 64
 13928  
 13929  	var NLSF_mu_Q15 int32
 13930  	var NLSF_mu_fluc_red_Q16 int32
 13931  	var i_sqr_Q15 int32
 13932  	var psNLSF_CB uintptr
 13933  
 13934  	/* Used only for NLSF interpolation */
 13935  	// var pNLSF0_temp_Q15 [16]int32 at bp+64, 64
 13936  
 13937  	// var pNLSFW0_temp_Q6 [16]int32 at bp+128, 64
 13938  
 13939  	var i int32
 13940  
 13941  	/***********************/
 13942  	/* Calculate mu values */
 13943  	/***********************/
 13944  	if (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype == 0 {
 13945  		/* NLSF_mu           = 0.002f - 0.001f * psEnc->speech_activity; */
 13946  		/* NLSF_mu_fluc_red  = 0.1f   - 0.05f  * psEnc->speech_activity; */
 13947  		NLSF_mu_Q15 = ((66) + (((int32((-8388)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((-8388) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))
 13948  		NLSF_mu_fluc_red_Q16 = ((6554) + (((int32((-838848)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((-838848) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))
 13949  	} else {
 13950  		/* NLSF_mu           = 0.005f - 0.004f * psEnc->speech_activity; */
 13951  		/* NLSF_mu_fluc_red  = 0.2f   - 0.1f   * psEnc->speech_activity - 0.1f * psEncCtrl->sparseness; */
 13952  		NLSF_mu_Q15 = ((164) + (((int32((-33554)) >> 16) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) + ((((-33554) & 0x0000FFFF) * (int32(int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8)))) >> 16)))
 13953  		NLSF_mu_fluc_red_Q16 = ((13107) + (((int32((-1677696)) >> 16) * (int32((int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) + ((((-1677696) & 0x0000FFFF) * (int32((int16((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).Fspeech_activity_Q8 + (*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).Fsparseness_Q8))))) >> 16)))
 13954  	}
 13955  
 13956  	NLSF_mu_Q15 = func() int32 {
 13957  		if (NLSF_mu_Q15) > (1) {
 13958  			return NLSF_mu_Q15
 13959  		}
 13960  		return 1
 13961  	}()
 13962  
 13963  	/* Calculate NLSF weights */
 13964  
 13965  	SKP_Silk_NLSF_VQ_weights_laroia(tls, bp /* &pNLSFW_Q6[0] */, pNLSF_Q15, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13966  
 13967  	/* Update NLSF weights for interpolated NLSFs */
 13968  	doInterpolate = (libc.Bool32(((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FuseInterpolatedNLSFs == 1) && ((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2 < (int32(1) << 2))))
 13969  	if doInterpolate != 0 {
 13970  
 13971  		/* Calculate the interpolated NLSF vector for the first half */
 13972  		SKP_Silk_interpolate(tls, bp+64 /* &pNLSF0_temp_Q15[0] */, psEnc+20672 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, pNLSF_Q15,
 13973  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13974  
 13975  		/* Calculate first half NLSF weights for the interpolated NLSFs */
 13976  
 13977  		SKP_Silk_NLSF_VQ_weights_laroia(tls, bp+128 /* &pNLSFW0_temp_Q6[0] */, bp+64 /* &pNLSF0_temp_Q15[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13978  
 13979  		/* Update NLSF weights with contribution from first half */
 13980  		i_sqr_Q15 = (((int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2))) * (int32(int16((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2)))) << (11))
 13981  		for i = 0; i < (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder; i++ {
 13982  			*(*int32)(unsafe.Pointer(bp /* &pNLSFW_Q6[0] */ + uintptr(i)*4)) = (((*(*int32)(unsafe.Pointer(bp /* &pNLSFW_Q6[0] */ + uintptr(i)*4))) >> (1)) + ((((*(*int32)(unsafe.Pointer(bp + 128 /* &pNLSFW0_temp_Q6[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(i_sqr_Q15)))) + ((((*(*int32)(unsafe.Pointer(bp + 128 /* &pNLSFW0_temp_Q6[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(i_sqr_Q15)))) >> 16)))
 13983  
 13984  		}
 13985  	}
 13986  
 13987  	/* Set pointer to the NLSF codebook for the current signal type and LPC order */
 13988  	psNLSF_CB = *(*uintptr)(unsafe.Pointer((psEnc /* &.sCmn */ + 16248 /* &.psNLSF_CB */) + uintptr((*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.Fsigtype)*4))
 13989  
 13990  	/* Quantize NLSF parameters given the trained NLSF codebooks */
 13991  
 13992  	SKP_Silk_NLSF_MSVQ_encode_FIX(tls, psEncCtrl /* &.sCmn */ +28 /* &.NLSFIndices */, pNLSF_Q15, psNLSF_CB,
 13993  		psEnc+20672 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, bp /* &pNLSFW_Q6[0] */, NLSF_mu_Q15, NLSF_mu_fluc_red_Q16,
 13994  		(*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FNLSF_MSVQ_Survivors, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.Ffirst_frame_after_reset)
 13995  
 13996  	/* Convert quantized NLSFs back to LPC coefficients */
 13997  	SKP_Silk_NLSF2A_stable(tls, ((psEncCtrl + 144 /* &.PredCoef_Q12 */) + 1*32), pNLSF_Q15, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 13998  
 13999  	if doInterpolate != 0 {
 14000  		/* Calculate the interpolated, quantized LSF vector for the first half */
 14001  		SKP_Silk_interpolate(tls, bp+64 /* &pNLSF0_temp_Q15[0] */, psEnc+20672 /* &.sPred */ +12 /* &.prev_NLSFq_Q15 */, pNLSF_Q15,
 14002  			(*SKP_Silk_encoder_control_FIX)(unsafe.Pointer(psEncCtrl)).FsCmn.FNLSFInterpCoef_Q2, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 14003  
 14004  		/* Convert back to LPC coefficients */
 14005  		SKP_Silk_NLSF2A_stable(tls, (psEncCtrl + 144 /* &.PredCoef_Q12 */), bp+64 /* &pNLSF0_temp_Q15[0] */, (*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder)
 14006  
 14007  	} else {
 14008  		/* Copy LPC coefficients for first half from second half */
 14009  		libc.Xmemcpy(tls, (psEncCtrl + 144 /* &.PredCoef_Q12 */), ((psEncCtrl + 144 /* &.PredCoef_Q12 */) + 1*32), (uint32((*SKP_Silk_encoder_state_FIX)(unsafe.Pointer(psEnc)).FsCmn.FpredictLPCOrder) * uint32(unsafe.Sizeof(int16(0)))))
 14010  	}
 14011  }
 14012  
 14013  func SKP_Silk_quant_LTP_gains_FIX(tls *libc.TLS, B_Q14 uintptr, cbk_index uintptr, periodicity_index uintptr, W_Q18 uintptr, mu_Q8 int32, lowComplexity int32) { /* SKP_Silk_quant_LTP_gains_FIX.c:30:6: */
 14014  	bp := tls.Alloc(20)
 14015  	defer tls.Free(20)
 14016  
 14017  	var j int32
 14018  	var k int32
 14019  	// var temp_idx [4]int32 at bp, 16
 14020  
 14021  	var cbk_size int32
 14022  	var cl_ptr uintptr
 14023  	var cbk_ptr_Q14 uintptr
 14024  	var b_Q14_ptr uintptr
 14025  	var W_Q18_ptr uintptr
 14026  	// var rate_dist_subfr int32 at bp+16, 4
 14027  
 14028  	var rate_dist int32
 14029  	var min_rate_dist int32
 14030  
 14031  	/***************************************************/
 14032  	/* iterate over different codebooks with different */
 14033  	/* rates/distortions, and choose best */
 14034  	/***************************************************/
 14035  	min_rate_dist = 0x7FFFFFFF
 14036  	for k = 0; k < 3; k++ {
 14037  		cl_ptr = SKP_Silk_LTP_gain_BITS_Q6_ptrs[k]
 14038  		cbk_ptr_Q14 = SKP_Silk_LTP_vq_ptrs_Q14[k]
 14039  		cbk_size = SKP_Silk_LTP_vq_sizes[k]
 14040  
 14041  		/* Setup pointer to first subframe */
 14042  		W_Q18_ptr = W_Q18
 14043  		b_Q14_ptr = B_Q14
 14044  
 14045  		rate_dist = 0
 14046  		for j = 0; j < 4; j++ {
 14047  
 14048  			SKP_Silk_VQ_WMat_EC_FIX(tls,
 14049  				(bp /* &temp_idx */ + uintptr(j)*4), /* O    index of best codebook vector                           */
 14050  				bp+16,                               /* &rate_dist_subfr */ /* O    best weighted quantization error + mu * rate            */
 14051  				b_Q14_ptr,                           /* I    input vector to be quantized                            */
 14052  				W_Q18_ptr,                           /* I    weighting matrix                                        */
 14053  				cbk_ptr_Q14,                         /* I    codebook                                                */
 14054  				cl_ptr,                              /* I    code length for each codebook vector                    */
 14055  				mu_Q8,                               /* I    tradeoff between weighted error and rate                */
 14056  				cbk_size)
 14057  
 14058  			rate_dist = func() int32 {
 14059  				if ((uint32((rate_dist) + (*(*int32)(unsafe.Pointer(bp + 16 /* rate_dist_subfr */))))) & 0x80000000) != 0 {
 14060  					return 0x7FFFFFFF
 14061  				}
 14062  				return ((rate_dist) + (*(*int32)(unsafe.Pointer(bp + 16 /* rate_dist_subfr */))))
 14063  			}()
 14064  
 14065  			b_Q14_ptr += 2 * (uintptr(5))
 14066  			W_Q18_ptr += 4 * (uintptr(5 * 5))
 14067  		}
 14068  
 14069  		/* Avoid never finding a codebook */
 14070  		rate_dist = func() int32 {
 14071  			if (0x7FFFFFFF - 1) < (rate_dist) {
 14072  				return (0x7FFFFFFF - 1)
 14073  			}
 14074  			return rate_dist
 14075  		}()
 14076  
 14077  		if rate_dist < min_rate_dist {
 14078  			min_rate_dist = rate_dist
 14079  			libc.Xmemcpy(tls, cbk_index, bp /* &temp_idx[0] */, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
 14080  			*(*int32)(unsafe.Pointer(periodicity_index)) = k
 14081  		}
 14082  
 14083  		/* Break early in low-complexity mode if rate distortion is below threshold */
 14084  		if (lowComplexity != 0) && (rate_dist < SKP_Silk_LTP_gain_middle_avg_RD_Q14) {
 14085  			break
 14086  		}
 14087  	}
 14088  
 14089  	cbk_ptr_Q14 = SKP_Silk_LTP_vq_ptrs_Q14[*(*int32)(unsafe.Pointer(periodicity_index))]
 14090  	for j = 0; j < 4; j++ {
 14091  		for k = 0; k < 5; k++ {
 14092  			*(*int16)(unsafe.Pointer(B_Q14 + uintptr(((j*5)+k))*2)) = *(*int16)(unsafe.Pointer(cbk_ptr_Q14 + uintptr(((k)+((*(*int32)(unsafe.Pointer(cbk_index + uintptr(j)*4)))*(5))))*2))
 14093  		}
 14094  	}
 14095  }
 14096  
 14097  /* Range encoder for one symbol */
 14098  func SKP_Silk_range_encoder(tls *libc.TLS, psRC uintptr, data int32, prob uintptr) { /* SKP_Silk_range_coder.c:31:6: */
 14099  	var low_Q16 uint32
 14100  	var high_Q16 uint32
 14101  	var base_tmp uint32
 14102  	var range_Q32 uint32
 14103  
 14104  	/* Copy structure data */
 14105  	var base_Q32 uint32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32
 14106  	var range_Q16 uint32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16
 14107  	var bufferIx int32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx
 14108  	var buffer uintptr = psRC + 20 /* &.buffer */
 14109  
 14110  	if (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror != 0 {
 14111  		return
 14112  	}
 14113  
 14114  	/* Update interval */
 14115  	low_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr(data)*2)))
 14116  	high_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr((data+1))*2)))
 14117  	base_tmp = base_Q32 /* save current base, to test for carry */
 14118  	base_Q32 = base_Q32 + ((range_Q16) * (low_Q16))
 14119  	range_Q32 = ((range_Q16) * (high_Q16 - low_Q16))
 14120  
 14121  	/* Check for carry */
 14122  	if base_Q32 < base_tmp {
 14123  		/* Propagate carry in buffer */
 14124  		var bufferIx_tmp int32 = bufferIx
 14125  		for (int32(libc.PreIncUint8(&*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PreDecInt32(&bufferIx_tmp, 1)))), 1))) == 0 {
 14126  		}
 14127  	}
 14128  
 14129  	/* Check normalization */
 14130  	if (range_Q32 & 0xFF000000) != 0 {
 14131  		/* No normalization */
 14132  		range_Q16 = ((range_Q32) >> (16))
 14133  	} else {
 14134  		if (range_Q32 & 0xFFFF0000) != 0 {
 14135  			/* Normalization of 8 bits shift */
 14136  			range_Q16 = ((range_Q32) >> (8))
 14137  		} else {
 14138  			/* Normalization of 16 bits shift */
 14139  			range_Q16 = range_Q32
 14140  			/* Make sure not to write beyond buffer */
 14141  			if bufferIx >= (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14142  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -1
 14143  				return
 14144  			}
 14145  			/* Write one byte to buffer */
 14146  			*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PostIncInt32(&bufferIx, 1)))) = (uint8((base_Q32) >> (24)))
 14147  			base_Q32 = ((base_Q32) << (8))
 14148  		}
 14149  		/* Make sure not to write beyond buffer */
 14150  		if bufferIx >= (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14151  			(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -1
 14152  			return
 14153  		}
 14154  		/* Write one byte to buffer */
 14155  		*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PostIncInt32(&bufferIx, 1)))) = (uint8((base_Q32) >> (24)))
 14156  		base_Q32 = ((base_Q32) << (8))
 14157  	}
 14158  
 14159  	/* Copy structure data back */
 14160  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32 = base_Q32
 14161  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16 = range_Q16
 14162  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx = bufferIx
 14163  }
 14164  
 14165  /* Range encoder for multiple symbols */
 14166  func SKP_Silk_range_encoder_multi(tls *libc.TLS, psRC uintptr, data uintptr, prob uintptr, nSymbols int32) { /* SKP_Silk_range_coder.c:101:6: */
 14167  	var k int32
 14168  	for k = 0; k < nSymbols; k++ {
 14169  		SKP_Silk_range_encoder(tls, psRC, *(*int32)(unsafe.Pointer(data + uintptr(k)*4)), *(*uintptr)(unsafe.Pointer(prob + uintptr(k)*4)))
 14170  	}
 14171  }
 14172  
 14173  /* Range decoder for one symbol */
 14174  func SKP_Silk_range_decoder(tls *libc.TLS, data uintptr, psRC uintptr, prob uintptr, probIx int32) { /* SKP_Silk_range_coder.c:115:6: */
 14175  	var low_Q16 uint32
 14176  	var high_Q16 uint32
 14177  	var base_tmp uint32
 14178  	var range_Q32 uint32
 14179  
 14180  	/* Copy structure data */
 14181  	var base_Q32 uint32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32
 14182  	var range_Q16 uint32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16
 14183  	var bufferIx int32 = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx
 14184  	var buffer uintptr = ((psRC + 20 /* &.buffer */) + 4)
 14185  
 14186  	if (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror != 0 {
 14187  		/* Set output to zero */
 14188  		*(*int32)(unsafe.Pointer(data)) = 0
 14189  		return
 14190  	}
 14191  
 14192  	high_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr(probIx)*2)))
 14193  	base_tmp = ((range_Q16) * (high_Q16))
 14194  	if base_tmp > base_Q32 {
 14195  		for 1 != 0 {
 14196  			low_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr(libc.PreDecInt32(&probIx, 1))*2)))
 14197  			base_tmp = ((range_Q16) * (low_Q16))
 14198  			if base_tmp <= base_Q32 {
 14199  				break
 14200  			}
 14201  			high_Q16 = low_Q16
 14202  			/* Test for out of range */
 14203  			if high_Q16 == uint32(0) {
 14204  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -2
 14205  				/* Set output to zero */
 14206  				*(*int32)(unsafe.Pointer(data)) = 0
 14207  				return
 14208  			}
 14209  		}
 14210  	} else {
 14211  		for 1 != 0 {
 14212  			low_Q16 = high_Q16
 14213  			high_Q16 = uint32(*(*uint16)(unsafe.Pointer(prob + uintptr(libc.PreIncInt32(&probIx, 1))*2)))
 14214  			base_tmp = ((range_Q16) * (high_Q16))
 14215  			if base_tmp > base_Q32 {
 14216  				probIx--
 14217  				break
 14218  			}
 14219  			/* Test for out of range */
 14220  			if high_Q16 == uint32(0xFFFF) {
 14221  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -2
 14222  				/* Set output to zero */
 14223  				*(*int32)(unsafe.Pointer(data)) = 0
 14224  				return
 14225  			}
 14226  		}
 14227  	}
 14228  	*(*int32)(unsafe.Pointer(data)) = probIx
 14229  	base_Q32 = base_Q32 - ((range_Q16) * (low_Q16))
 14230  	range_Q32 = ((range_Q16) * (high_Q16 - low_Q16))
 14231  
 14232  	/* Check normalization */
 14233  	if (range_Q32 & 0xFF000000) != 0 {
 14234  		/* No normalization */
 14235  		range_Q16 = ((range_Q32) >> (16))
 14236  	} else {
 14237  		if (range_Q32 & 0xFFFF0000) != 0 {
 14238  			/* Normalization of 8 bits shift */
 14239  			range_Q16 = ((range_Q32) >> (8))
 14240  			/* Check for errors */
 14241  			if ((base_Q32) >> (24)) != 0 {
 14242  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -3
 14243  				/* Set output to zero */
 14244  				*(*int32)(unsafe.Pointer(data)) = 0
 14245  				return
 14246  			}
 14247  		} else {
 14248  			/* Normalization of 16 bits shift */
 14249  			range_Q16 = range_Q32
 14250  			/* Check for errors */
 14251  			if ((base_Q32) >> (16)) != 0 {
 14252  				(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -3
 14253  				/* Set output to zero */
 14254  				*(*int32)(unsafe.Pointer(data)) = 0
 14255  				return
 14256  			}
 14257  			/* Update base */
 14258  			base_Q32 = ((base_Q32) << (8))
 14259  			/* Make sure not to read beyond buffer */
 14260  			if bufferIx < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14261  				/* Read one byte from buffer */
 14262  				base_Q32 = base_Q32 | (uint32(*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PostIncInt32(&bufferIx, 1))))))
 14263  			}
 14264  		}
 14265  		/* Update base */
 14266  		base_Q32 = ((base_Q32) << (8))
 14267  		/* Make sure not to read beyond buffer */
 14268  		if bufferIx < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14269  			/* Read one byte from buffer */
 14270  			base_Q32 = base_Q32 | (uint32(*(*uint8)(unsafe.Pointer(buffer + uintptr(libc.PostIncInt32(&bufferIx, 1))))))
 14271  		}
 14272  	}
 14273  
 14274  	/* Check for zero interval length */
 14275  	if range_Q16 == uint32(0) {
 14276  		(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -4
 14277  		/* Set output to zero */
 14278  		*(*int32)(unsafe.Pointer(data)) = 0
 14279  		return
 14280  	}
 14281  
 14282  	/* Copy structure data back */
 14283  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32 = base_Q32
 14284  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16 = range_Q16
 14285  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx = bufferIx
 14286  }
 14287  
 14288  /* Range decoder for multiple symbols */
 14289  func SKP_Silk_range_decoder_multi(tls *libc.TLS, data uintptr, psRC uintptr, prob uintptr, probStartIx uintptr, nSymbols int32) { /* SKP_Silk_range_coder.c:234:6: */
 14290  	var k int32
 14291  	for k = 0; k < nSymbols; k++ {
 14292  		SKP_Silk_range_decoder(tls, (data + uintptr(k)*4), psRC, *(*uintptr)(unsafe.Pointer(prob + uintptr(k)*4)), *(*int32)(unsafe.Pointer(probStartIx + uintptr(k)*4)))
 14293  	}
 14294  }
 14295  
 14296  /* Initialize range encoder */
 14297  func SKP_Silk_range_enc_init(tls *libc.TLS, psRC uintptr) { /* SKP_Silk_range_coder.c:249:6: */
 14298  	/* Initialize structure */
 14299  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength = 1024
 14300  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16 = uint32(0x0000FFFF)
 14301  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx = 0
 14302  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32 = uint32(0)
 14303  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = 0
 14304  }
 14305  
 14306  /* Initialize range decoder */
 14307  func SKP_Silk_range_dec_init(tls *libc.TLS, psRC uintptr, buffer uintptr, bufferLength int32) { /* SKP_Silk_range_coder.c:262:6: */
 14308  	/* check input */
 14309  	if (bufferLength > 1024) || (bufferLength < 0) {
 14310  		(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -8
 14311  		return
 14312  	}
 14313  	/* Initialize structure */
 14314  	/* Copy to internal buffer */
 14315  	libc.Xmemcpy(tls, psRC+20 /* &.buffer */, buffer, (uint32(bufferLength) * uint32(unsafe.Sizeof(uint8(0)))))
 14316  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength = bufferLength
 14317  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx = 0
 14318  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32 = (((((uint32(*(*uint8)(unsafe.Pointer(buffer)))) << (24)) | ((uint32(*(*uint8)(unsafe.Pointer(buffer + 1)))) << (16))) | ((uint32(*(*uint8)(unsafe.Pointer(buffer + 2)))) << (8))) | uint32(*(*uint8)(unsafe.Pointer(buffer + 3))))
 14319  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16 = uint32(0x0000FFFF)
 14320  	(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = 0
 14321  }
 14322  
 14323  /* Determine length of bitstream */
 14324  func SKP_Silk_range_coder_get_length(tls *libc.TLS, psRC uintptr, nBytes uintptr) int32 { /* SKP_Silk_range_coder.c:288:9: */
 14325  	var nBits int32
 14326  
 14327  	/* Number of bits in stream */
 14328  	nBits = (((((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx) << (3)) + SKP_Silk_CLZ32(tls, (int32((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Frange_Q16-uint32(1))))) - 14)
 14329  
 14330  	*(*int32)(unsafe.Pointer(nBytes)) = ((nBits + 7) >> (3))
 14331  
 14332  	/* Return number of bits in bitstream */
 14333  	return nBits
 14334  }
 14335  
 14336  /* Write shortest uniquely decodable stream to buffer, and determine its length */
 14337  func SKP_Silk_range_enc_wrap_up(tls *libc.TLS, psRC uintptr) { /* SKP_Silk_range_coder.c:305:6: */
 14338  	bp := tls.Alloc(4)
 14339  	defer tls.Free(4)
 14340  
 14341  	var bufferIx_tmp int32
 14342  	var bits_to_store int32
 14343  	var bits_in_stream int32
 14344  	// var nBytes int32 at bp, 4
 14345  
 14346  	var mask int32
 14347  	var base_Q24 uint32
 14348  
 14349  	/* Lower limit of interval, shifted 8 bits to the right */
 14350  	base_Q24 = (((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Fbase_Q32) >> (8))
 14351  
 14352  	bits_in_stream = SKP_Silk_range_coder_get_length(tls, psRC, bp /* &nBytes */)
 14353  
 14354  	/* Number of additional bits (1..9) required to be stored to stream */
 14355  	bits_to_store = (bits_in_stream - (((*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx) << (3)))
 14356  	/* Round up to required resolution */
 14357  	base_Q24 = base_Q24 + (uint32(int32((0x00800000)) >> (bits_to_store - 1)))
 14358  	base_Q24 = base_Q24 & (uint32((0xFFFFFFFF)) << (24 - bits_to_store))
 14359  
 14360  	/* Check for carry */
 14361  	if (base_Q24 & uint32(0x01000000)) != 0 {
 14362  		/* Propagate carry in buffer */
 14363  		bufferIx_tmp = (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx
 14364  		for (int32(libc.PreIncUint8(&(*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr(libc.PreDecInt32(&bufferIx_tmp, 1))))), 1))) == 0 {
 14365  		}
 14366  	}
 14367  
 14368  	/* Store to stream, making sure not to write beyond buffer */
 14369  	if (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14370  		*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr(libc.PostIncInt32(&(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx, 1)))) = (uint8((base_Q24) >> (16)))
 14371  		if bits_to_store > 8 {
 14372  			if (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14373  				*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr(libc.PostIncInt32(&(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferIx, 1)))) = (uint8((base_Q24) >> (8)))
 14374  			}
 14375  		}
 14376  	}
 14377  
 14378  	/* Fill up any remaining bits in the last byte with 1s */
 14379  	if (bits_in_stream & 7) != 0 {
 14380  		mask = (int32((0xFF)) >> (bits_in_stream & 7))
 14381  		if (*(*int32)(unsafe.Pointer(bp /* nBytes */)) - 1) < (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14382  			*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr((*(*int32)(unsafe.Pointer(bp /* nBytes */)) - 1)))) |= uint8((mask))
 14383  		}
 14384  	}
 14385  }
 14386  
 14387  /* Check that any remaining bits in the last byte are set to 1 */
 14388  func SKP_Silk_range_coder_check_after_decoding(tls *libc.TLS, psRC uintptr) { /* SKP_Silk_range_coder.c:350:6: */
 14389  	bp := tls.Alloc(4)
 14390  	defer tls.Free(4)
 14391  
 14392  	var bits_in_stream int32
 14393  	// var nBytes int32 at bp, 4
 14394  
 14395  	var mask int32
 14396  
 14397  	bits_in_stream = SKP_Silk_range_coder_get_length(tls, psRC, bp /* &nBytes */)
 14398  
 14399  	/* Make sure not to read beyond buffer */
 14400  	if (*(*int32)(unsafe.Pointer(bp /* nBytes */)) - 1) >= (*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).FbufferLength {
 14401  		(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -5
 14402  		return
 14403  	}
 14404  
 14405  	/* Test any remaining bits in last byte */
 14406  	if (bits_in_stream & 7) != 0 {
 14407  		mask = (int32((0xFF)) >> (bits_in_stream & 7))
 14408  		if (int32(*(*uint8)(unsafe.Pointer((psRC + 20 /* &.buffer */) + uintptr((*(*int32)(unsafe.Pointer(bp /* nBytes */)) - 1))))) & mask) != mask {
 14409  			(*SKP_Silk_range_coder_state)(unsafe.Pointer(psRC)).Ferror = -5
 14410  			return
 14411  		}
 14412  	}
 14413  }
 14414  
 14415  /* Add noise to matrix diagonal */
 14416  func SKP_Silk_regularize_correlations_FIX(tls *libc.TLS, XX uintptr, xx uintptr, noise int32, D int32) { /* SKP_Silk_regularize_correlations_FIX.c:31:6: */
 14417  	var i int32
 14418  	for i = 0; i < D; i++ {
 14419  		*(*int32)(unsafe.Pointer(((XX) + uintptr((((i)*(D))+(i)))*4))) = ((*(*int32)(unsafe.Pointer(((XX) + uintptr((((i)*(D))+(i)))*4)))) + (noise))
 14420  	}
 14421  	*(*int32)(unsafe.Pointer(xx)) += (noise)
 14422  }
 14423  
 14424  /* Greatest common divisor */
 14425  func gcd(tls *libc.TLS, a int32, b int32) int32 { /* SKP_Silk_resampler.c:66:18: */
 14426  	var tmp int32
 14427  	for b > 0 {
 14428  		tmp = (a - (b * ((a) / (b))))
 14429  		a = b
 14430  		b = tmp
 14431  	}
 14432  	return a
 14433  }
 14434  
 14435  /* Initialize/reset the resampler state for a given pair of input/output sampling rates */
 14436  func SKP_Silk_resampler_init(tls *libc.TLS, S uintptr, Fs_Hz_in int32, Fs_Hz_out int32) int32 { /* SKP_Silk_resampler.c:81:9: */
 14437  	var cycleLen int32
 14438  	var cyclesPerBatch int32
 14439  	var up2 int32 = 0
 14440  	var down2 int32 = 0
 14441  
 14442  	/* Clear state */
 14443  	libc.Xmemset(tls, S, 0, uint32(unsafe.Sizeof(SKP_Silk_resampler_state_struct{})))
 14444  
 14445  	/* Input checking */
 14446  	if (((Fs_Hz_in < 8000) || (Fs_Hz_in > 192000)) || (Fs_Hz_out < 8000)) || (Fs_Hz_out > 192000) {
 14447  
 14448  		return -1
 14449  	}
 14450  
 14451  	/* Determine pre downsampling and post upsampling */
 14452  	if Fs_Hz_in > 96000 {
 14453  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers = 2
 14454  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fdown_pre_function = *(*uintptr)(unsafe.Pointer(&struct {
 14455  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14456  		}{SKP_Silk_resampler_private_down4}))
 14457  	} else if Fs_Hz_in > 48000 {
 14458  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers = 1
 14459  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fdown_pre_function = *(*uintptr)(unsafe.Pointer(&struct {
 14460  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14461  		}{SKP_Silk_resampler_down2}))
 14462  	} else {
 14463  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers = 0
 14464  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fdown_pre_function = uintptr(0)
 14465  	}
 14466  
 14467  	if Fs_Hz_out > 96000 {
 14468  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers = 2
 14469  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup_post_function = *(*uintptr)(unsafe.Pointer(&struct {
 14470  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14471  		}{SKP_Silk_resampler_private_up4}))
 14472  	} else if Fs_Hz_out > 48000 {
 14473  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers = 1
 14474  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup_post_function = *(*uintptr)(unsafe.Pointer(&struct {
 14475  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14476  		}{SKP_Silk_resampler_up2}))
 14477  	} else {
 14478  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers = 0
 14479  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup_post_function = uintptr(0)
 14480  	}
 14481  
 14482  	if ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers + (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers) > 0 {
 14483  		/* Ratio of output/input samples */
 14484  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16 = ((((Fs_Hz_out) << (13)) / (Fs_Hz_in)) << (3))
 14485  		/* Make sure the ratio is rounded up */
 14486  		for ((((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) >> 16) * (int32(int16(Fs_Hz_in)))) + (((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) & 0x0000FFFF) * (int32(int16(Fs_Hz_in)))) >> 16)) + (((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) * (func() int32 {
 14487  			if (16) == 1 {
 14488  				return (((Fs_Hz_in) >> 1) + ((Fs_Hz_in) & 1))
 14489  			}
 14490  			return ((((Fs_Hz_in) >> ((16) - 1)) + 1) >> 1)
 14491  		}()))) < Fs_Hz_out {
 14492  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16++
 14493  		}
 14494  
 14495  		/* Batch size is 10 ms */
 14496  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSizePrePost = ((Fs_Hz_in) / (100))
 14497  
 14498  		/* Convert sampling rate to those after pre-downsampling and before post-upsampling */
 14499  		Fs_Hz_in = ((Fs_Hz_in) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers))
 14500  		Fs_Hz_out = ((Fs_Hz_out) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers))
 14501  	}
 14502  
 14503  	/* Number of samples processed per batch */
 14504  	/* First, try 10 ms frames */
 14505  	(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize = ((Fs_Hz_in) / (100))
 14506  	if ((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize) * (100)) != Fs_Hz_in) || ((Fs_Hz_in % 100) != 0) {
 14507  		/* No integer number of input or output samples with 10 ms frames, use greatest common divisor */
 14508  		cycleLen = ((Fs_Hz_in) / (gcd(tls, Fs_Hz_in, Fs_Hz_out)))
 14509  		cyclesPerBatch = ((480) / (cycleLen))
 14510  		if cyclesPerBatch == 0 {
 14511  			/* cycleLen too big, let's just use the maximum batch size. Some distortion will result. */
 14512  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize = 480
 14513  
 14514  		} else {
 14515  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize = ((cyclesPerBatch) * (cycleLen))
 14516  		}
 14517  	}
 14518  
 14519  	/* Find resampler with the right sampling ratio */
 14520  	if Fs_Hz_out > Fs_Hz_in {
 14521  		/* Upsample */
 14522  		if Fs_Hz_out == ((Fs_Hz_in) * (2)) { /* Fs_out : Fs_in = 2 : 1 */
 14523  			/* Special case: directly use 2x upsampler */
 14524  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14525  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14526  			}{SKP_Silk_resampler_private_up2_HQ_wrapper}))
 14527  		} else {
 14528  			/* Default resampler */
 14529  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14530  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14531  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14532  			up2 = 1
 14533  			if Fs_Hz_in > 24000 {
 14534  				/* Low-quality all-pass upsampler */
 14535  				(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup2_function = *(*uintptr)(unsafe.Pointer(&struct {
 14536  					f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14537  				}{SKP_Silk_resampler_up2}))
 14538  			} else {
 14539  				/* High-quality all-pass upsampler */
 14540  				(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup2_function = *(*uintptr)(unsafe.Pointer(&struct {
 14541  					f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14542  				}{SKP_Silk_resampler_private_up2_HQ}))
 14543  			}
 14544  		}
 14545  	} else if Fs_Hz_out < Fs_Hz_in {
 14546  		/* Downsample */
 14547  		if ((Fs_Hz_out) * (4)) == ((Fs_Hz_in) * (3)) { /* Fs_out : Fs_in = 3 : 4 */
 14548  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 3
 14549  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_3_4_COEFS))
 14550  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14551  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14552  			}{SKP_Silk_resampler_private_down_FIR}))
 14553  		} else if ((Fs_Hz_out) * (3)) == ((Fs_Hz_in) * (2)) { /* Fs_out : Fs_in = 2 : 3 */
 14554  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 2
 14555  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_2_3_COEFS))
 14556  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14557  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14558  			}{SKP_Silk_resampler_private_down_FIR}))
 14559  		} else if ((Fs_Hz_out) * (2)) == Fs_Hz_in { /* Fs_out : Fs_in = 1 : 2 */
 14560  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 1
 14561  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_2_COEFS))
 14562  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14563  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14564  			}{SKP_Silk_resampler_private_down_FIR}))
 14565  		} else if ((Fs_Hz_out) * (8)) == ((Fs_Hz_in) * (3)) { /* Fs_out : Fs_in = 3 : 8 */
 14566  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 3
 14567  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_3_8_COEFS))
 14568  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14569  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14570  			}{SKP_Silk_resampler_private_down_FIR}))
 14571  		} else if ((Fs_Hz_out) * (3)) == Fs_Hz_in { /* Fs_out : Fs_in = 1 : 3 */
 14572  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 1
 14573  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_3_COEFS))
 14574  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14575  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14576  			}{SKP_Silk_resampler_private_down_FIR}))
 14577  		} else if ((Fs_Hz_out) * (4)) == Fs_Hz_in { /* Fs_out : Fs_in = 1 : 4 */
 14578  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 1
 14579  			down2 = 1
 14580  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_2_COEFS))
 14581  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14582  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14583  			}{SKP_Silk_resampler_private_down_FIR}))
 14584  		} else if ((Fs_Hz_out) * (6)) == Fs_Hz_in { /* Fs_out : Fs_in = 1 : 6 */
 14585  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs = 1
 14586  			down2 = 1
 14587  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_3_COEFS))
 14588  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14589  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14590  			}{SKP_Silk_resampler_private_down_FIR}))
 14591  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (80)) { /* Fs_out : Fs_in = 80 : 441 */
 14592  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_80_441_ARMA4_COEFS))
 14593  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14594  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14595  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14596  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (120)) { /* Fs_out : Fs_in = 120 : 441 */
 14597  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_120_441_ARMA4_COEFS))
 14598  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14599  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14600  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14601  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (160)) { /* Fs_out : Fs_in = 160 : 441 */
 14602  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_160_441_ARMA4_COEFS))
 14603  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14604  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14605  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14606  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (240)) { /* Fs_out : Fs_in = 240 : 441 */
 14607  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_240_441_ARMA4_COEFS))
 14608  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14609  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14610  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14611  		} else if ((Fs_Hz_out) * (441)) == ((Fs_Hz_in) * (320)) { /* Fs_out : Fs_in = 320 : 441 */
 14612  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs = uintptr(unsafe.Pointer(&SKP_Silk_Resampler_320_441_ARMA4_COEFS))
 14613  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14614  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14615  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14616  		} else {
 14617  			/* Default resampler */
 14618  			(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14619  				f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14620  			}{SKP_Silk_resampler_private_IIR_FIR}))
 14621  			up2 = 1
 14622  			if Fs_Hz_in > 24000 {
 14623  				/* Low-quality all-pass upsampler */
 14624  				(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup2_function = *(*uintptr)(unsafe.Pointer(&struct {
 14625  					f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14626  				}{SKP_Silk_resampler_up2}))
 14627  			} else {
 14628  				/* High-quality all-pass upsampler */
 14629  				(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fup2_function = *(*uintptr)(unsafe.Pointer(&struct {
 14630  					f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14631  				}{SKP_Silk_resampler_private_up2_HQ}))
 14632  			}
 14633  		}
 14634  	} else {
 14635  		/* Input and output sampling rates are equal: copy */
 14636  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fresampler_function = *(*uintptr)(unsafe.Pointer(&struct {
 14637  			f func(*libc.TLS, uintptr, uintptr, uintptr, int32)
 14638  		}{SKP_Silk_resampler_private_copy}))
 14639  	}
 14640  
 14641  	(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x = (up2 | down2)
 14642  
 14643  	/* Ratio of input/output samples */
 14644  	(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16 = ((((Fs_Hz_in) << ((14 + up2) - down2)) / (Fs_Hz_out)) << (2))
 14645  	/* Make sure the ratio is rounded up */
 14646  	for ((((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16) >> 16) * (int32((int16((Fs_Hz_out) << (down2)))))) + (((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16) & 0x0000FFFF) * (int32((int16((Fs_Hz_out) << (down2)))))) >> 16)) + (((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16) * (func() int32 {
 14647  		if (16) == 1 {
 14648  			return ((((Fs_Hz_out) << (down2)) >> 1) + (((Fs_Hz_out) << (down2)) & 1))
 14649  		}
 14650  		return (((((Fs_Hz_out) << (down2)) >> ((16) - 1)) + 1) >> 1)
 14651  	}()))) < ((Fs_Hz_in) << (up2)) {
 14652  		(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16++
 14653  	}
 14654  
 14655  	(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fmagic_number = 123456789
 14656  
 14657  	return 0
 14658  }
 14659  
 14660  /* Clear the states of all resampling filters, without resetting sampling rate ratio */
 14661  func SKP_Silk_resampler_clear(tls *libc.TLS, S uintptr) int32 { /* SKP_Silk_resampler.c:255:9: */
 14662  	/* Clear state */
 14663  	libc.Xmemset(tls, S+88 /* &.sDown2 */, 0, uint32(unsafe.Sizeof([2]int32{})))
 14664  	libc.Xmemset(tls, S /* &.sIIR */, 0, uint32(unsafe.Sizeof([6]int32{})))
 14665  	libc.Xmemset(tls, S+24 /* &.sFIR */, 0, uint32(unsafe.Sizeof([16]int32{})))
 14666  	libc.Xmemset(tls, S+124 /* &.sDownPre */, 0, uint32(unsafe.Sizeof([2]int32{})))
 14667  	libc.Xmemset(tls, S+132 /* &.sUpPost */, 0, uint32(unsafe.Sizeof([2]int32{})))
 14668  	return 0
 14669  }
 14670  
 14671  /* Resampler: convert from one sampling rate to another                                 */
 14672  func SKP_Silk_resampler(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) int32 { /* SKP_Silk_resampler.c:271:9: */
 14673  	bp := tls.Alloc(1920)
 14674  	defer tls.Free(1920)
 14675  
 14676  	/* Verify that state was initialized and has not been corrupted */
 14677  	if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fmagic_number != 123456789 {
 14678  
 14679  		return -1
 14680  	}
 14681  
 14682  	if ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers + (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers) > 0 {
 14683  		/* The input and/or output sampling rate is above 48000 Hz */
 14684  		var nSamplesIn int32
 14685  		var nSamplesOut int32
 14686  		// var in_buf [480]int16 at bp, 960
 14687  
 14688  		// var out_buf [480]int16 at bp+960, 960
 14689  
 14690  		for inLen > 0 {
 14691  			/* Number of input and output samples to process */
 14692  			nSamplesIn = func() int32 {
 14693  				if (inLen) < ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSizePrePost) {
 14694  					return inLen
 14695  				}
 14696  				return (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSizePrePost
 14697  			}()
 14698  			nSamplesOut = (((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) >> 16) * (int32(int16(nSamplesIn)))) + (((((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Fratio_Q16) & 0x0000FFFF) * (int32(int16(nSamplesIn)))) >> 16))
 14699  
 14700  			if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers > 0 {
 14701  				(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 140 /* &.down_pre_function */))))(tls, S+124 /* &.sDownPre */, bp /* &in_buf[0] */, in, nSamplesIn)
 14702  				if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers > 0 {
 14703  					(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 96 /* &.resampler_function */))))(tls, S, bp+960 /* &out_buf[0] */, bp /* &in_buf[0] */, ((nSamplesIn) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers)))
 14704  					(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 144 /* &.up_post_function */))))(tls, S+132 /* &.sUpPost */, out, bp+960 /* &out_buf[0] */, ((nSamplesOut) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers)))
 14705  				} else {
 14706  					(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 96 /* &.resampler_function */))))(tls, S, out, bp /* &in_buf[0] */, ((nSamplesIn) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers)))
 14707  				}
 14708  			} else {
 14709  				(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 96 /* &.resampler_function */))))(tls, S, bp+960 /* &out_buf[0] */, in, ((nSamplesIn) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPreDownsamplers)))
 14710  				(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 144 /* &.up_post_function */))))(tls, S+132 /* &.sUpPost */, out, bp+960 /* &out_buf[0] */, ((nSamplesOut) >> ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FnPostUpsamplers)))
 14711  			}
 14712  
 14713  			in += 2 * uintptr(nSamplesIn)
 14714  			out += 2 * uintptr(nSamplesOut)
 14715  			inLen = inLen - (nSamplesIn)
 14716  		}
 14717  	} else {
 14718  		/* Input and output sampling rate are at most 48000 Hz */
 14719  		(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 96 /* &.resampler_function */))))(tls, S, out, in, inLen)
 14720  	}
 14721  
 14722  	return 0
 14723  }
 14724  
 14725  /* Downsample by a factor 2, mediocre quality */
 14726  func SKP_Silk_resampler_down2(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_down2.c:40:6: */
 14727  	var k int32
 14728  	var len2 int32 = ((inLen) >> (1))
 14729  	var in32 int32
 14730  	var out32 int32
 14731  	var Y int32
 14732  	var X int32
 14733  
 14734  	/* Internal variables and state are in Q10 format */
 14735  	for k = 0; k < len2; k++ {
 14736  		/* Convert to Q10 */
 14737  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr((2*k))*2)))) << (10))
 14738  
 14739  		/* All-pass section for even input sample */
 14740  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 14741  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_down2_1))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_down2_1))) >> 16)))
 14742  		out32 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 14743  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 14744  
 14745  		/* Convert to Q10 */
 14746  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(((2*k)+1))*2)))) << (10))
 14747  
 14748  		/* All-pass section for odd input sample, and add to output of previous section */
 14749  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 14750  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_down2_0))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_down2_0))) >> 16))
 14751  		out32 = ((out32) + (*(*int32)(unsafe.Pointer(S + 1*4))))
 14752  		out32 = ((out32) + (X))
 14753  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
 14754  
 14755  		/* Add, convert back to int16 and store to output */
 14756  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
 14757  			if (func() int32 {
 14758  				if (11) == 1 {
 14759  					return (((out32) >> 1) + ((out32) & 1))
 14760  				}
 14761  				return ((((out32) >> ((11) - 1)) + 1) >> 1)
 14762  			}()) > 0x7FFF {
 14763  				return int16(0x7FFF)
 14764  			}
 14765  			return func() int16 {
 14766  				if (func() int32 {
 14767  					if (11) == 1 {
 14768  						return (((out32) >> 1) + ((out32) & 1))
 14769  					}
 14770  					return ((((out32) >> ((11) - 1)) + 1) >> 1)
 14771  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 14772  					return libc.Int16FromInt32(0x8000)
 14773  				}
 14774  				return func() int16 {
 14775  					if (11) == 1 {
 14776  						return (int16(((out32) >> 1) + ((out32) & 1)))
 14777  					}
 14778  					return (int16((((out32) >> ((11) - 1)) + 1) >> 1))
 14779  				}()
 14780  			}()
 14781  		}()
 14782  	}
 14783  }
 14784  
 14785  /* Downsample by a factor 2/3, low quality */
 14786  func SKP_Silk_resampler_down2_3(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_down2_3.c:42:6: */
 14787  	bp := tls.Alloc(1936)
 14788  	defer tls.Free(1936)
 14789  
 14790  	var nSamplesIn int32
 14791  	var counter int32
 14792  	var res_Q6 int32
 14793  	// var buf [484]int32 at bp, 1936
 14794  
 14795  	var buf_ptr uintptr
 14796  
 14797  	/* Copy buffered samples to start of buffer */
 14798  	libc.Xmemcpy(tls, bp /* &buf[0] */, S, (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
 14799  
 14800  	/* Iterate over blocks of frameSizeIn input samples */
 14801  	for 1 != 0 {
 14802  		nSamplesIn = func() int32 {
 14803  			if (inLen) < (480) {
 14804  				return inLen
 14805  			}
 14806  			return 480
 14807  		}()
 14808  
 14809  		/* Second-order AR filter (output in Q8) */
 14810  		SKP_Silk_resampler_private_AR2(tls, (S + 4*4), (bp /* &buf */ + 4*4), in,
 14811  			uintptr(unsafe.Pointer(&SKP_Silk_Resampler_2_3_COEFS_LQ)), nSamplesIn)
 14812  
 14813  		/* Interpolate filtered signal */
 14814  		buf_ptr = bp /* &buf[0] */
 14815  		counter = nSamplesIn
 14816  		for counter > 2 {
 14817  			/* Inner product */
 14818  			res_Q6 = ((((*(*int32)(unsafe.Pointer(buf_ptr))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[2]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[2]))) >> 16))
 14819  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[3]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[3]))) >> 16)))
 14820  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[5]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[5]))) >> 16)))
 14821  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[4]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[4]))) >> 16)))
 14822  
 14823  			/* Scale down, saturate and store in output array */
 14824  			*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 14825  				if (func() int32 {
 14826  					if (6) == 1 {
 14827  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14828  					}
 14829  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14830  				}()) > 0x7FFF {
 14831  					return int16(0x7FFF)
 14832  				}
 14833  				return func() int16 {
 14834  					if (func() int32 {
 14835  						if (6) == 1 {
 14836  							return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14837  						}
 14838  						return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14839  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 14840  						return libc.Int16FromInt32(0x8000)
 14841  					}
 14842  					return func() int16 {
 14843  						if (6) == 1 {
 14844  							return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 14845  						}
 14846  						return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 14847  					}()
 14848  				}()
 14849  			}()
 14850  
 14851  			res_Q6 = ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[4]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[4]))) >> 16))
 14852  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[5]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[5]))) >> 16)))
 14853  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[3]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[3]))) >> 16)))
 14854  			res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) >> 16) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[2]))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_2_3_COEFS_LQ[2]))) >> 16)))
 14855  
 14856  			/* Scale down, saturate and store in output array */
 14857  			*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 14858  				if (func() int32 {
 14859  					if (6) == 1 {
 14860  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14861  					}
 14862  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14863  				}()) > 0x7FFF {
 14864  					return int16(0x7FFF)
 14865  				}
 14866  				return func() int16 {
 14867  					if (func() int32 {
 14868  						if (6) == 1 {
 14869  							return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14870  						}
 14871  						return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14872  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 14873  						return libc.Int16FromInt32(0x8000)
 14874  					}
 14875  					return func() int16 {
 14876  						if (6) == 1 {
 14877  							return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 14878  						}
 14879  						return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 14880  					}()
 14881  				}()
 14882  			}()
 14883  
 14884  			buf_ptr += 4 * (uintptr(3))
 14885  			counter = counter - (3)
 14886  		}
 14887  
 14888  		in += 2 * (uintptr(nSamplesIn))
 14889  		inLen = inLen - (nSamplesIn)
 14890  
 14891  		if inLen > 0 {
 14892  			/* More iterations to do; copy last part of filtered signal to beginning of buffer */
 14893  			libc.Xmemcpy(tls, bp /* &buf[0] */, (bp /* &buf */ + uintptr(nSamplesIn)*4), (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
 14894  		} else {
 14895  			break
 14896  		}
 14897  	}
 14898  
 14899  	/* Copy last part of filtered signal to the state for the next call */
 14900  	libc.Xmemcpy(tls, S, (bp /* &buf */ + uintptr(nSamplesIn)*4), (uint32(4) * uint32(unsafe.Sizeof(int32(0)))))
 14901  }
 14902  
 14903  /* Downsample by a factor 3, low quality */
 14904  func SKP_Silk_resampler_down3(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_down3.c:42:6: */
 14905  	bp := tls.Alloc(1944)
 14906  	defer tls.Free(1944)
 14907  
 14908  	var nSamplesIn int32
 14909  	var counter int32
 14910  	var res_Q6 int32
 14911  	// var buf [486]int32 at bp, 1944
 14912  
 14913  	var buf_ptr uintptr
 14914  
 14915  	/* Copy buffered samples to start of buffer */
 14916  	libc.Xmemcpy(tls, bp /* &buf[0] */, S, (uint32(6) * uint32(unsafe.Sizeof(int32(0)))))
 14917  
 14918  	/* Iterate over blocks of frameSizeIn input samples */
 14919  	for 1 != 0 {
 14920  		nSamplesIn = func() int32 {
 14921  			if (inLen) < (480) {
 14922  				return inLen
 14923  			}
 14924  			return 480
 14925  		}()
 14926  
 14927  		/* Second-order AR filter (output in Q8) */
 14928  		SKP_Silk_resampler_private_AR2(tls, (S + 6*4), (bp /* &buf */ + 6*4), in,
 14929  			uintptr(unsafe.Pointer(&SKP_Silk_Resampler_1_3_COEFS_LQ)), nSamplesIn)
 14930  
 14931  		/* Interpolate filtered signal */
 14932  		buf_ptr = bp /* &buf[0] */
 14933  		counter = nSamplesIn
 14934  		for counter > 2 {
 14935  			/* Inner product */
 14936  			res_Q6 = (((((*(*int32)(unsafe.Pointer(buf_ptr))) + (*(*int32)(unsafe.Pointer(buf_ptr + 5*4)))) >> 16) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[2]))) + (((((*(*int32)(unsafe.Pointer(buf_ptr))) + (*(*int32)(unsafe.Pointer(buf_ptr + 5*4)))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[2]))) >> 16))
 14937  			res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 4*4)))) >> 16) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[3]))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 4*4)))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[3]))) >> 16)))
 14938  			res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 3*4)))) >> 16) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[4]))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 3*4)))) & 0x0000FFFF) * (int32(SKP_Silk_Resampler_1_3_COEFS_LQ[4]))) >> 16)))
 14939  
 14940  			/* Scale down, saturate and store in output array */
 14941  			*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 14942  				if (func() int32 {
 14943  					if (6) == 1 {
 14944  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14945  					}
 14946  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14947  				}()) > 0x7FFF {
 14948  					return int16(0x7FFF)
 14949  				}
 14950  				return func() int16 {
 14951  					if (func() int32 {
 14952  						if (6) == 1 {
 14953  							return (((res_Q6) >> 1) + ((res_Q6) & 1))
 14954  						}
 14955  						return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 14956  					}()) < (int32(libc.Int16FromInt32(0x8000))) {
 14957  						return libc.Int16FromInt32(0x8000)
 14958  					}
 14959  					return func() int16 {
 14960  						if (6) == 1 {
 14961  							return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 14962  						}
 14963  						return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 14964  					}()
 14965  				}()
 14966  			}()
 14967  
 14968  			buf_ptr += 4 * (uintptr(3))
 14969  			counter = counter - (3)
 14970  		}
 14971  
 14972  		in += 2 * (uintptr(nSamplesIn))
 14973  		inLen = inLen - (nSamplesIn)
 14974  
 14975  		if inLen > 0 {
 14976  			/* More iterations to do; copy last part of filtered signal to beginning of buffer */
 14977  			libc.Xmemcpy(tls, bp /* &buf[0] */, (bp /* &buf */ + uintptr(nSamplesIn)*4), (uint32(6) * uint32(unsafe.Sizeof(int32(0)))))
 14978  		} else {
 14979  			break
 14980  		}
 14981  	}
 14982  
 14983  	/* Copy last part of filtered signal to the state for the next call */
 14984  	libc.Xmemcpy(tls, S, (bp /* &buf */ + uintptr(nSamplesIn)*4), (uint32(6) * uint32(unsafe.Sizeof(int32(0)))))
 14985  }
 14986  
 14987  /* Second order AR filter with single delay elements */
 14988  func SKP_Silk_resampler_private_AR2(tls *libc.TLS, S uintptr, out_Q8 uintptr, in uintptr, A_Q14 uintptr, len int32) { /* SKP_Silk_resampler_private_AR2.c:40:6: */
 14989  	var k int32
 14990  	var out32 int32
 14991  
 14992  	for k = 0; k < len; k++ {
 14993  		out32 = ((*(*int32)(unsafe.Pointer(S))) + ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (8)))
 14994  		*(*int32)(unsafe.Pointer(out_Q8 + uintptr(k)*4)) = out32
 14995  		out32 = ((out32) << (2))
 14996  		*(*int32)(unsafe.Pointer(S)) = ((*(*int32)(unsafe.Pointer(S + 1*4))) + ((((out32) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q14))))) + ((((out32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q14))))) >> 16)))
 14997  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((((out32) >> 16) * (int32(*(*int16)(unsafe.Pointer(A_Q14 + 1*2))))) + ((((out32) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(A_Q14 + 1*2))))) >> 16))
 14998  	}
 14999  }
 15000  
 15001  /* Fourth order ARMA filter                                             */
 15002  /* Internally operates as two biquad filters in sequence.               */
 15003  
 15004  /* Coeffients are stored in a packed format:                                                        */
 15005  /*    { B1_Q14[1], B2_Q14[1], -A1_Q14[1], -A1_Q14[2], -A2_Q14[1], -A2_Q14[2], gain_Q16 }            */
 15006  /* where it is assumed that B*_Q14[0], B*_Q14[2], A*_Q14[0] are all 16384                           */
 15007  func SKP_Silk_resampler_private_ARMA4(tls *libc.TLS, S uintptr, out uintptr, in uintptr, Coef uintptr, len int32) { /* SKP_Silk_resampler_private_ARMA4.c:45:6: */
 15008  	var k int32
 15009  	var in_Q8 int32
 15010  	var out1_Q8 int32
 15011  	var out2_Q8 int32
 15012  	var X int32
 15013  
 15014  	for k = 0; k < len; k++ {
 15015  		in_Q8 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (8))
 15016  
 15017  		/* Outputs of first and second biquad */
 15018  		out1_Q8 = ((in_Q8) + ((*(*int32)(unsafe.Pointer(S))) << (2)))
 15019  		out2_Q8 = ((out1_Q8) + ((*(*int32)(unsafe.Pointer(S + 2*4))) << (2)))
 15020  
 15021  		/* Update states, which are stored in Q6. Coefficients are in Q14 here */
 15022  		X = ((*(*int32)(unsafe.Pointer(S + 1*4))) + ((((in_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef))))) + ((((in_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef))))) >> 16)))
 15023  		*(*int32)(unsafe.Pointer(S)) = ((X) + ((((out1_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 2*2))))) + ((((out1_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 2*2))))) >> 16)))
 15024  
 15025  		X = ((*(*int32)(unsafe.Pointer(S + 3*4))) + ((((out1_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 1*2))))) + ((((out1_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 1*2))))) >> 16)))
 15026  		*(*int32)(unsafe.Pointer(S + 2*4)) = ((X) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 4*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 4*2))))) >> 16)))
 15027  
 15028  		*(*int32)(unsafe.Pointer(S + 1*4)) = (((in_Q8) >> (2)) + ((((out1_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 3*2))))) + ((((out1_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 3*2))))) >> 16)))
 15029  		*(*int32)(unsafe.Pointer(S + 3*4)) = (((out1_Q8) >> (2)) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 5*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 5*2))))) >> 16)))
 15030  
 15031  		/* Apply gain and store to output. The coefficient is in Q16 */
 15032  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
 15033  			if (((128) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) >> 16))) >> (8)) > 0x7FFF {
 15034  				return int16(0x7FFF)
 15035  			}
 15036  			return func() int16 {
 15037  				if (((128) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) >> 16))) >> (8)) < (int32(libc.Int16FromInt32(0x8000))) {
 15038  					return libc.Int16FromInt32(0x8000)
 15039  				}
 15040  				return (int16(((128) + ((((out2_Q8) >> 16) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) + ((((out2_Q8) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(Coef + 6*2))))) >> 16))) >> (8)))
 15041  			}()
 15042  		}()
 15043  	}
 15044  }
 15045  
 15046  /* Copy */
 15047  func SKP_Silk_resampler_private_copy(tls *libc.TLS, SS uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_private_copy.c:41:6: */
 15048  	libc.Xmemcpy(tls, out, in, (uint32(inLen) * uint32(unsafe.Sizeof(int16(0)))))
 15049  }
 15050  
 15051  /* Downsample by a factor 4. Note: very low quality, only use with input sampling rates above 96 kHz. */
 15052  func SKP_Silk_resampler_private_down4(tls *libc.TLS, S uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_private_down4.c:40:6: */
 15053  	var k int32
 15054  	var len4 int32 = ((inLen) >> (2))
 15055  	var in32 int32
 15056  	var out32 int32
 15057  	var Y int32
 15058  	var X int32
 15059  
 15060  	/* Internal variables and state are in Q10 format */
 15061  	for k = 0; k < len4; k++ {
 15062  		/* Add two input samples and convert to Q10 */
 15063  		in32 = (((int32(*(*int16)(unsafe.Pointer(in + uintptr((4*k))*2)))) + (int32(*(*int16)(unsafe.Pointer(in + uintptr(((4*k)+1))*2))))) << (9))
 15064  
 15065  		/* All-pass section for even input sample */
 15066  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 15067  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_down2_1))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_down2_1))) >> 16)))
 15068  		out32 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 15069  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 15070  
 15071  		/* Add two input samples and convert to Q10 */
 15072  		in32 = (((int32(*(*int16)(unsafe.Pointer(in + uintptr(((4*k)+2))*2)))) + (int32(*(*int16)(unsafe.Pointer(in + uintptr(((4*k)+3))*2))))) << (9))
 15073  
 15074  		/* All-pass section for odd input sample */
 15075  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 15076  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_down2_0))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_down2_0))) >> 16))
 15077  		out32 = ((out32) + (*(*int32)(unsafe.Pointer(S + 1*4))))
 15078  		out32 = ((out32) + (X))
 15079  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
 15080  
 15081  		/* Add, convert back to int16 and store to output */
 15082  		*(*int16)(unsafe.Pointer(out + uintptr(k)*2)) = func() int16 {
 15083  			if (func() int32 {
 15084  				if (11) == 1 {
 15085  					return (((out32) >> 1) + ((out32) & 1))
 15086  				}
 15087  				return ((((out32) >> ((11) - 1)) + 1) >> 1)
 15088  			}()) > 0x7FFF {
 15089  				return int16(0x7FFF)
 15090  			}
 15091  			return func() int16 {
 15092  				if (func() int32 {
 15093  					if (11) == 1 {
 15094  						return (((out32) >> 1) + ((out32) & 1))
 15095  					}
 15096  					return ((((out32) >> ((11) - 1)) + 1) >> 1)
 15097  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15098  					return libc.Int16FromInt32(0x8000)
 15099  				}
 15100  				return func() int16 {
 15101  					if (11) == 1 {
 15102  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15103  					}
 15104  					return (int16((((out32) >> ((11) - 1)) + 1) >> 1))
 15105  				}()
 15106  			}()
 15107  		}()
 15108  	}
 15109  }
 15110  
 15111  func SKP_Silk_resampler_private_down_FIR_INTERPOL0(tls *libc.TLS, out uintptr, buf2 uintptr, FIR_Coefs uintptr, max_index_Q16 int32, index_increment_Q16 int32) uintptr { /* SKP_Silk_resampler_private_down_FIR.c:39:22: */
 15112  	var index_Q16 int32
 15113  	var res_Q6 int32
 15114  	var buf_ptr uintptr
 15115  	for index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 = index_Q16 + (index_increment_Q16) {
 15116  		/* Integer part gives pointer to buffered input */
 15117  		buf_ptr = (buf2 + uintptr(((index_Q16)>>(16)))*4)
 15118  
 15119  		/* Inner product */
 15120  		res_Q6 = (((((*(*int32)(unsafe.Pointer(buf_ptr))) + (*(*int32)(unsafe.Pointer(buf_ptr + 11*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr))) + (*(*int32)(unsafe.Pointer(buf_ptr + 11*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs))))) >> 16))
 15121  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 10*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 1*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 10*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 1*2))))) >> 16)))
 15122  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 9*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 2*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 9*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 2*2))))) >> 16)))
 15123  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 8*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 3*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 8*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 3*2))))) >> 16)))
 15124  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 7*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 4*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 7*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 4*2))))) >> 16)))
 15125  		res_Q6 = ((res_Q6) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 5*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 6*4)))) >> 16) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 5*2))))) + (((((*(*int32)(unsafe.Pointer(buf_ptr + 5*4))) + (*(*int32)(unsafe.Pointer(buf_ptr + 6*4)))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(FIR_Coefs + 5*2))))) >> 16)))
 15126  
 15127  		/* Scale down, saturate and store in output array */
 15128  		*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 15129  			if (func() int32 {
 15130  				if (6) == 1 {
 15131  					return (((res_Q6) >> 1) + ((res_Q6) & 1))
 15132  				}
 15133  				return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 15134  			}()) > 0x7FFF {
 15135  				return int16(0x7FFF)
 15136  			}
 15137  			return func() int16 {
 15138  				if (func() int32 {
 15139  					if (6) == 1 {
 15140  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 15141  					}
 15142  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 15143  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15144  					return libc.Int16FromInt32(0x8000)
 15145  				}
 15146  				return func() int16 {
 15147  					if (6) == 1 {
 15148  						return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 15149  					}
 15150  					return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 15151  				}()
 15152  			}()
 15153  		}()
 15154  	}
 15155  	return out
 15156  }
 15157  
 15158  func SKP_Silk_resampler_private_down_FIR_INTERPOL1(tls *libc.TLS, out uintptr, buf2 uintptr, FIR_Coefs uintptr, max_index_Q16 int32, index_increment_Q16 int32, FIR_Fracs int32) uintptr { /* SKP_Silk_resampler_private_down_FIR.c:62:22: */
 15159  	var index_Q16 int32
 15160  	var res_Q6 int32
 15161  	var buf_ptr uintptr
 15162  	var interpol_ind int32
 15163  	var interpol_ptr uintptr
 15164  	for index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 = index_Q16 + (index_increment_Q16) {
 15165  		/* Integer part gives pointer to buffered input */
 15166  		buf_ptr = (buf2 + uintptr(((index_Q16)>>(16)))*4)
 15167  
 15168  		/* Fractional part gives interpolation coefficients */
 15169  		interpol_ind = ((((index_Q16 & 0xFFFF) >> 16) * (int32(int16(FIR_Fracs)))) + ((((index_Q16 & 0xFFFF) & 0x0000FFFF) * (int32(int16(FIR_Fracs)))) >> 16))
 15170  
 15171  		/* Inner product */
 15172  		interpol_ptr = (FIR_Coefs + uintptr(((12/2)*interpol_ind))*2)
 15173  		res_Q6 = ((((*(*int32)(unsafe.Pointer(buf_ptr))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr))))) >> 16))
 15174  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 1*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 1*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 1*2))))) >> 16)))
 15175  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 2*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 2*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 2*2))))) >> 16)))
 15176  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 3*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 3*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 3*2))))) >> 16)))
 15177  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 4*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 4*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 4*2))))) >> 16)))
 15178  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 5*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 5*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 5*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 5*2))))) >> 16)))
 15179  		interpol_ptr = (FIR_Coefs + uintptr(((12/2)*((FIR_Fracs-1)-interpol_ind)))*2)
 15180  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 11*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 11*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr))))) >> 16)))
 15181  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 10*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 1*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 10*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 1*2))))) >> 16)))
 15182  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 9*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 2*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 9*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 2*2))))) >> 16)))
 15183  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 8*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 3*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 8*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 3*2))))) >> 16)))
 15184  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 7*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 4*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 7*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 4*2))))) >> 16)))
 15185  		res_Q6 = ((res_Q6) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 6*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 5*2))))) + ((((*(*int32)(unsafe.Pointer(buf_ptr + 6*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(interpol_ptr + 5*2))))) >> 16)))
 15186  
 15187  		/* Scale down, saturate and store in output array */
 15188  		*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 15189  			if (func() int32 {
 15190  				if (6) == 1 {
 15191  					return (((res_Q6) >> 1) + ((res_Q6) & 1))
 15192  				}
 15193  				return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 15194  			}()) > 0x7FFF {
 15195  				return int16(0x7FFF)
 15196  			}
 15197  			return func() int16 {
 15198  				if (func() int32 {
 15199  					if (6) == 1 {
 15200  						return (((res_Q6) >> 1) + ((res_Q6) & 1))
 15201  					}
 15202  					return ((((res_Q6) >> ((6) - 1)) + 1) >> 1)
 15203  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15204  					return libc.Int16FromInt32(0x8000)
 15205  				}
 15206  				return func() int16 {
 15207  					if (6) == 1 {
 15208  						return (int16(((res_Q6) >> 1) + ((res_Q6) & 1)))
 15209  					}
 15210  					return (int16((((res_Q6) >> ((6) - 1)) + 1) >> 1))
 15211  				}()
 15212  			}()
 15213  		}()
 15214  	}
 15215  	return out
 15216  }
 15217  
 15218  /* Resample with a 2x downsampler (optional), a 2nd order AR filter followed by FIR interpolation */
 15219  func SKP_Silk_resampler_private_down_FIR(tls *libc.TLS, SS uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_private_down_FIR.c:100:6: */
 15220  	bp := tls.Alloc(2448)
 15221  	defer tls.Free(2448)
 15222  
 15223  	var S uintptr = SS
 15224  	var nSamplesIn int32
 15225  	var max_index_Q16 int32
 15226  	var index_increment_Q16 int32
 15227  	// var buf1 [240]int16 at bp+1968, 480
 15228  
 15229  	// var buf2 [492]int32 at bp, 1968
 15230  
 15231  	var FIR_Coefs uintptr
 15232  
 15233  	/* Copy buffered samples to start of buffer */
 15234  	libc.Xmemcpy(tls, bp /* &buf2[0] */, S+24 /* &.sFIR */, (uint32(12) * uint32(unsafe.Sizeof(int32(0)))))
 15235  
 15236  	FIR_Coefs = ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs + 2*2)
 15237  
 15238  	/* Iterate over blocks of frameSizeIn input samples */
 15239  	index_increment_Q16 = (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16
 15240  	for 1 != 0 {
 15241  		nSamplesIn = func() int32 {
 15242  			if (inLen) < ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize) {
 15243  				return inLen
 15244  			}
 15245  			return (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize
 15246  		}()
 15247  
 15248  		if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x == 1 {
 15249  			/* Downsample 2x */
 15250  			SKP_Silk_resampler_down2(tls, S+88 /* &.sDown2 */, bp+1968 /* &buf1[0] */, in, nSamplesIn)
 15251  
 15252  			nSamplesIn = ((nSamplesIn) >> (1))
 15253  
 15254  			/* Second-order AR filter (output in Q8) */
 15255  			SKP_Silk_resampler_private_AR2(tls, S /* &.sIIR */, (bp /* &buf2 */ + 12*4), bp+1968 /* &buf1[0] */, (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs, nSamplesIn)
 15256  		} else {
 15257  			/* Second-order AR filter (output in Q8) */
 15258  			SKP_Silk_resampler_private_AR2(tls, S /* &.sIIR */, (bp /* &buf2 */ + 12*4), in, (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs, nSamplesIn)
 15259  		}
 15260  
 15261  		max_index_Q16 = ((nSamplesIn) << (16))
 15262  
 15263  		/* Interpolate filtered signal */
 15264  		if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs == 1 {
 15265  			out = SKP_Silk_resampler_private_down_FIR_INTERPOL0(tls, out, bp /* &buf2[0] */, FIR_Coefs, max_index_Q16, index_increment_Q16)
 15266  		} else {
 15267  			out = SKP_Silk_resampler_private_down_FIR_INTERPOL1(tls, out, bp /* &buf2[0] */, FIR_Coefs, max_index_Q16, index_increment_Q16, (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FFIR_Fracs)
 15268  		}
 15269  
 15270  		in += 2 * uintptr((nSamplesIn << (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x))
 15271  		inLen = inLen - (nSamplesIn << (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x)
 15272  
 15273  		if inLen > (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x {
 15274  			/* More iterations to do; copy last part of filtered signal to beginning of buffer */
 15275  			libc.Xmemcpy(tls, bp /* &buf2[0] */, (bp /* &buf2 */ + uintptr(nSamplesIn)*4), (uint32(12) * uint32(unsafe.Sizeof(int32(0)))))
 15276  		} else {
 15277  			break
 15278  		}
 15279  	}
 15280  
 15281  	/* Copy last part of filtered signal to the state for the next call */
 15282  	libc.Xmemcpy(tls, S+24 /* &.sFIR */, (bp /* &buf2 */ + uintptr(nSamplesIn)*4), (uint32(12) * uint32(unsafe.Sizeof(int32(0)))))
 15283  }
 15284  
 15285  func SKP_Silk_resampler_private_IIR_FIR_INTERPOL(tls *libc.TLS, out uintptr, buf uintptr, max_index_Q16 int32, index_increment_Q16 int32) uintptr { /* SKP_Silk_resampler_private_IIR_FIR.c:39:22: */
 15286  	var index_Q16 int32
 15287  	var res_Q15 int32
 15288  	var buf_ptr uintptr
 15289  	var table_index int32
 15290  	/* Interpolate upsampled signal and store in output array */
 15291  	for index_Q16 = 0; index_Q16 < max_index_Q16; index_Q16 = index_Q16 + (index_increment_Q16) {
 15292  		table_index = ((((index_Q16 & 0xFFFF) >> 16) * (int32(int16(144)))) + ((((index_Q16 & 0xFFFF) & 0x0000FFFF) * (int32(int16(144)))) >> 16))
 15293  		buf_ptr = (buf + uintptr((index_Q16>>16))*2)
 15294  
 15295  		res_Q15 = ((int32(*(*int16)(unsafe.Pointer(buf_ptr)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr(table_index)*6))))))
 15296  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 1*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr(table_index)*6) + 1*2))))))
 15297  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 2*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr(table_index)*6) + 2*2))))))
 15298  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 3*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr((143-table_index))*6) + 2*2))))))
 15299  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 4*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr((143-table_index))*6) + 1*2))))))
 15300  		res_Q15 = ((res_Q15) + ((int32(*(*int16)(unsafe.Pointer(buf_ptr + 5*2)))) * (int32(*(*int16)(unsafe.Pointer((uintptr(unsafe.Pointer(&SKP_Silk_resampler_frac_FIR_144)) + uintptr((143-table_index))*6)))))))
 15301  		*(*int16)(unsafe.Pointer(libc.PostIncUintptr(&out, 2))) = func() int16 {
 15302  			if (func() int32 {
 15303  				if (15) == 1 {
 15304  					return (((res_Q15) >> 1) + ((res_Q15) & 1))
 15305  				}
 15306  				return ((((res_Q15) >> ((15) - 1)) + 1) >> 1)
 15307  			}()) > 0x7FFF {
 15308  				return int16(0x7FFF)
 15309  			}
 15310  			return func() int16 {
 15311  				if (func() int32 {
 15312  					if (15) == 1 {
 15313  						return (((res_Q15) >> 1) + ((res_Q15) & 1))
 15314  					}
 15315  					return ((((res_Q15) >> ((15) - 1)) + 1) >> 1)
 15316  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15317  					return libc.Int16FromInt32(0x8000)
 15318  				}
 15319  				return func() int16 {
 15320  					if (15) == 1 {
 15321  						return (int16(((res_Q15) >> 1) + ((res_Q15) & 1)))
 15322  					}
 15323  					return (int16((((res_Q15) >> ((15) - 1)) + 1) >> 1))
 15324  				}()
 15325  			}()
 15326  		}()
 15327  	}
 15328  	return out
 15329  }
 15330  
 15331  /* Upsample using a combination of allpass-based 2x upsampling and FIR interpolation */
 15332  func SKP_Silk_resampler_private_IIR_FIR(tls *libc.TLS, SS uintptr, out uintptr, in uintptr, inLen int32) { /* SKP_Silk_resampler_private_IIR_FIR.c:60:6: */
 15333  	bp := tls.Alloc(1932)
 15334  	defer tls.Free(1932)
 15335  
 15336  	var S uintptr = SS
 15337  	var nSamplesIn int32
 15338  	var max_index_Q16 int32
 15339  	var index_increment_Q16 int32
 15340  	// var buf [966]int16 at bp, 1932
 15341  
 15342  	/* Copy buffered samples to start of buffer */
 15343  	libc.Xmemcpy(tls, bp /* &buf[0] */, S+24 /* &.sFIR */, (uint32(6) * uint32(unsafe.Sizeof(int32(0)))))
 15344  
 15345  	/* Iterate over blocks of frameSizeIn input samples */
 15346  	index_increment_Q16 = (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FinvRatio_Q16
 15347  	for 1 != 0 {
 15348  		nSamplesIn = func() int32 {
 15349  			if (inLen) < ((*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize) {
 15350  				return inLen
 15351  			}
 15352  			return (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FbatchSize
 15353  		}()
 15354  
 15355  		if (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x == 1 {
 15356  			/* Upsample 2x */
 15357  			(*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32))(unsafe.Pointer((S + 100 /* &.up2_function */))))(tls, S /* &.sIIR */, (bp /* &buf */ + 6*2), in, nSamplesIn)
 15358  		} else {
 15359  			/* Fourth-order ARMA filter */
 15360  			SKP_Silk_resampler_private_ARMA4(tls, S /* &.sIIR */, (bp /* &buf */ + 6*2), in, (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).FCoefs, nSamplesIn)
 15361  		}
 15362  
 15363  		max_index_Q16 = ((nSamplesIn) << (16 + (*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x)) /* +1 if 2x upsampling */
 15364  		out = SKP_Silk_resampler_private_IIR_FIR_INTERPOL(tls, out, bp /* &buf[0] */, max_index_Q16, index_increment_Q16)
 15365  		in += 2 * uintptr(nSamplesIn)
 15366  		inLen = inLen - (nSamplesIn)
 15367  
 15368  		if inLen > 0 {
 15369  			/* More iterations to do; copy last part of filtered signal to beginning of buffer */
 15370  			libc.Xmemcpy(tls, bp /* &buf[0] */, (bp /* &buf */ + uintptr((nSamplesIn<<(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x))*2), (uint32(6) * uint32(unsafe.Sizeof(int32(0)))))
 15371  		} else {
 15372  			break
 15373  		}
 15374  	}
 15375  
 15376  	/* Copy last part of filtered signal to the state for the next call */
 15377  	libc.Xmemcpy(tls, S+24 /* &.sFIR */, (bp /* &buf */ + uintptr((nSamplesIn<<(*SKP_Silk_resampler_state_struct)(unsafe.Pointer(S)).Finput2x))*2), (uint32(6) * uint32(unsafe.Sizeof(int32(0)))))
 15378  }
 15379  
 15380  /* Upsample by a factor 2, high quality */
 15381  /* Uses 2nd order allpass filters for the 2x upsampling, followed by a      */
 15382  /* notch filter just above Nyquist.                                         */
 15383  func SKP_Silk_resampler_private_up2_HQ(tls *libc.TLS, S uintptr, out uintptr, in uintptr, len int32) { /* SKP_Silk_resampler_private_up2_HQ.c:42:6: */
 15384  	var k int32
 15385  	var in32 int32
 15386  	var out32_1 int32
 15387  	var out32_2 int32
 15388  	var Y int32
 15389  	var X int32
 15390  
 15391  	/* Internal variables and state are in Q10 format */
 15392  	for k = 0; k < len; k++ {
 15393  		/* Convert to Q10 */
 15394  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (10))
 15395  
 15396  		/* First all-pass section for even output sample */
 15397  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 15398  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_hq_0[0]))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_0[0]))) >> 16))
 15399  		out32_1 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 15400  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 15401  
 15402  		/* Second all-pass section for even output sample */
 15403  		Y = ((out32_1) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 15404  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_hq_0[1]))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_0[1]))) >> 16)))
 15405  		out32_2 = ((*(*int32)(unsafe.Pointer(S + 1*4))) + (X))
 15406  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((out32_1) + (X))
 15407  
 15408  		/* Biquad notch filter */
 15409  		out32_2 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[2]))) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[2]))) >> 16)))
 15410  		out32_2 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[1]))) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[1]))) >> 16)))
 15411  		out32_1 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[0]))) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[0]))) >> 16)))
 15412  		*(*int32)(unsafe.Pointer(S + 5*4)) = ((out32_2) - (*(*int32)(unsafe.Pointer(S + 5*4))))
 15413  
 15414  		/* Apply gain in Q15, convert back to int16 and store to output */
 15415  		*(*int16)(unsafe.Pointer(out + uintptr((2*k))*2)) = func() int16 {
 15416  			if (((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)) > 0x7FFF {
 15417  				return int16(0x7FFF)
 15418  			}
 15419  			return func() int16 {
 15420  				if (((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)) < (int32(libc.Int16FromInt32(0x8000))) {
 15421  					return libc.Int16FromInt32(0x8000)
 15422  				}
 15423  				return (int16(((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)))
 15424  			}()
 15425  		}()
 15426  
 15427  		/* First all-pass section for odd output sample */
 15428  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 2*4))))
 15429  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_hq_1[0]))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_1[0]))) >> 16))
 15430  		out32_1 = ((*(*int32)(unsafe.Pointer(S + 2*4))) + (X))
 15431  		*(*int32)(unsafe.Pointer(S + 2*4)) = ((in32) + (X))
 15432  
 15433  		/* Second all-pass section for odd output sample */
 15434  		Y = ((out32_1) - (*(*int32)(unsafe.Pointer(S + 3*4))))
 15435  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_hq_1[1]))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_1[1]))) >> 16)))
 15436  		out32_2 = ((*(*int32)(unsafe.Pointer(S + 3*4))) + (X))
 15437  		*(*int32)(unsafe.Pointer(S + 3*4)) = ((out32_1) + (X))
 15438  
 15439  		/* Biquad notch filter */
 15440  		out32_2 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[2]))) + ((((*(*int32)(unsafe.Pointer(S + 4*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[2]))) >> 16)))
 15441  		out32_2 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[1]))) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[1]))) >> 16)))
 15442  		out32_1 = ((out32_2) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[0]))) + ((((*(*int32)(unsafe.Pointer(S + 5*4))) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[0]))) >> 16)))
 15443  		*(*int32)(unsafe.Pointer(S + 4*4)) = ((out32_2) - (*(*int32)(unsafe.Pointer(S + 4*4))))
 15444  
 15445  		/* Apply gain in Q15, convert back to int16 and store to output */
 15446  		*(*int16)(unsafe.Pointer(out + uintptr(((2*k)+1))*2)) = func() int16 {
 15447  			if (((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)) > 0x7FFF {
 15448  				return int16(0x7FFF)
 15449  			}
 15450  			return func() int16 {
 15451  				if (((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)) < (int32(libc.Int16FromInt32(0x8000))) {
 15452  					return libc.Int16FromInt32(0x8000)
 15453  				}
 15454  				return (int16(((256) + ((((out32_1) >> 16) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) + ((((out32_1) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_hq_notch[3]))) >> 16))) >> (9)))
 15455  			}()
 15456  		}()
 15457  	}
 15458  }
 15459  
 15460  func SKP_Silk_resampler_private_up2_HQ_wrapper(tls *libc.TLS, SS uintptr, out uintptr, in uintptr, len int32) { /* SKP_Silk_resampler_private_up2_HQ.c:109:6: */
 15461  	var S uintptr = SS
 15462  	SKP_Silk_resampler_private_up2_HQ(tls, S /* &.sIIR */, out, in, len)
 15463  }
 15464  
 15465  /* Upsample by a factor 4, Note: very low quality, only use with output sampling rates above 96 kHz. */
 15466  func SKP_Silk_resampler_private_up4(tls *libc.TLS, S uintptr, out uintptr, in uintptr, len int32) { /* SKP_Silk_resampler_private_up4.c:40:6: */
 15467  	var k int32
 15468  	var in32 int32
 15469  	var out32 int32
 15470  	var Y int32
 15471  	var X int32
 15472  	var out16 int16
 15473  
 15474  	/* Internal variables and state are in Q10 format */
 15475  	for k = 0; k < len; k++ {
 15476  		/* Convert to Q10 */
 15477  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (10))
 15478  
 15479  		/* All-pass section for even output sample */
 15480  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 15481  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_lq_0))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_lq_0))) >> 16))
 15482  		out32 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 15483  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 15484  
 15485  		/* Convert back to int16 and store to output */
 15486  		out16 = func() int16 {
 15487  			if (func() int32 {
 15488  				if (10) == 1 {
 15489  					return (((out32) >> 1) + ((out32) & 1))
 15490  				}
 15491  				return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15492  			}()) > 0x7FFF {
 15493  				return int16(0x7FFF)
 15494  			}
 15495  			return func() int16 {
 15496  				if (func() int32 {
 15497  					if (10) == 1 {
 15498  						return (((out32) >> 1) + ((out32) & 1))
 15499  					}
 15500  					return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15501  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15502  					return libc.Int16FromInt32(0x8000)
 15503  				}
 15504  				return func() int16 {
 15505  					if (10) == 1 {
 15506  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15507  					}
 15508  					return (int16((((out32) >> ((10) - 1)) + 1) >> 1))
 15509  				}()
 15510  			}()
 15511  		}()
 15512  		*(*int16)(unsafe.Pointer(out + uintptr((4*k))*2)) = out16
 15513  		*(*int16)(unsafe.Pointer(out + uintptr(((4*k)+1))*2)) = out16
 15514  
 15515  		/* All-pass section for odd output sample */
 15516  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 15517  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_lq_1))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_lq_1))) >> 16)))
 15518  		out32 = ((*(*int32)(unsafe.Pointer(S + 1*4))) + (X))
 15519  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
 15520  
 15521  		/* Convert back to int16 and store to output */
 15522  		out16 = func() int16 {
 15523  			if (func() int32 {
 15524  				if (10) == 1 {
 15525  					return (((out32) >> 1) + ((out32) & 1))
 15526  				}
 15527  				return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15528  			}()) > 0x7FFF {
 15529  				return int16(0x7FFF)
 15530  			}
 15531  			return func() int16 {
 15532  				if (func() int32 {
 15533  					if (10) == 1 {
 15534  						return (((out32) >> 1) + ((out32) & 1))
 15535  					}
 15536  					return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15537  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15538  					return libc.Int16FromInt32(0x8000)
 15539  				}
 15540  				return func() int16 {
 15541  					if (10) == 1 {
 15542  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15543  					}
 15544  					return (int16((((out32) >> ((10) - 1)) + 1) >> 1))
 15545  				}()
 15546  			}()
 15547  		}()
 15548  		*(*int16)(unsafe.Pointer(out + uintptr(((4*k)+2))*2)) = out16
 15549  		*(*int16)(unsafe.Pointer(out + uintptr(((4*k)+3))*2)) = out16
 15550  	}
 15551  }
 15552  
 15553  /* Tables for 2x downsampler */
 15554  var SKP_Silk_resampler_down2_0 int16 = int16(9872)            /* SKP_Silk_resampler_rom.c:41:17 */
 15555  var SKP_Silk_resampler_down2_1 int16 = (int16(39809 - 65536)) /* SKP_Silk_resampler_rom.c:42:17 */
 15556  
 15557  /* Tables for 2x upsampler, low quality */
 15558  var SKP_Silk_resampler_up2_lq_0 int16 = int16(8102)            /* SKP_Silk_resampler_rom.c:45:17 */
 15559  var SKP_Silk_resampler_up2_lq_1 int16 = (int16(36783 - 65536)) /* SKP_Silk_resampler_rom.c:46:17 */
 15560  
 15561  /* Tables for 2x upsampler, high quality */
 15562  var SKP_Silk_resampler_up2_hq_0 = [2]int16{int16(4280), (int16(33727 - 65536))}  /* SKP_Silk_resampler_rom.c:49:17 */
 15563  var SKP_Silk_resampler_up2_hq_1 = [2]int16{int16(16295), (int16(54015 - 65536))} /* SKP_Silk_resampler_rom.c:50:17 */
 15564  /* Matlab code for the notch filter coefficients: */
 15565  /* B = [1, 0.12, 1];  A = [1, 0.055, 0.8]; G = 0.87; freqz(G * B, A, 2^14, 16e3); axis([0, 8000, -10, 1]);  */
 15566  /* fprintf('\t%6d, %6d, %6d, %6d\n', round(B(2)*2^16), round(-A(2)*2^16), round((1-A(3))*2^16), round(G*2^15)) */
 15567  var SKP_Silk_resampler_up2_hq_notch = [4]int16{int16(7864), int16(-3604), int16(13107), int16(28508)} /* SKP_Silk_resampler_rom.c:54:17 */
 15568  
 15569  /* Tables with IIR and FIR coefficients for fractional downsamplers (70 Words) */
 15570  var SKP_Silk_Resampler_3_4_COEFS = [20]int16{
 15571  	int16(-18249), int16(-12532),
 15572  	int16(-97), int16(284), int16(-495), int16(309), int16(10268), int16(20317),
 15573  	int16(-94), int16(156), int16(-48), int16(-720), int16(5984), int16(18278),
 15574  	int16(-45), int16(-4), int16(237), int16(-847), int16(2540), int16(14662),
 15575  } /* SKP_Silk_resampler_rom.c:58:33 */
 15576  
 15577  var SKP_Silk_Resampler_2_3_COEFS = [14]int16{
 15578  	int16(-11891), int16(-12486),
 15579  	int16(20), int16(211), int16(-657), int16(688), int16(8423), int16(15911),
 15580  	int16(-44), int16(197), int16(-152), int16(-653), int16(3855), int16(13015),
 15581  } /* SKP_Silk_resampler_rom.c:65:33 */
 15582  
 15583  var SKP_Silk_Resampler_1_2_COEFS = [8]int16{
 15584  	int16(2415), int16(-13101),
 15585  	int16(158), int16(-295), int16(-400), int16(1265), int16(4832), int16(7968),
 15586  } /* SKP_Silk_resampler_rom.c:71:33 */
 15587  
 15588  var SKP_Silk_Resampler_3_8_COEFS = [20]int16{
 15589  	int16(13270), int16(-13738),
 15590  	int16(-294), int16(-123), int16(747), int16(2043), int16(3339), int16(3995),
 15591  	int16(-151), int16(-311), int16(414), int16(1583), int16(2947), int16(3877),
 15592  	int16(-33), int16(-389), int16(143), int16(1141), int16(2503), int16(3653),
 15593  } /* SKP_Silk_resampler_rom.c:76:33 */
 15594  
 15595  var SKP_Silk_Resampler_1_3_COEFS = [8]int16{
 15596  	int16(16643), int16(-14000),
 15597  	int16(-331), int16(19), int16(581), int16(1421), int16(2290), int16(2845),
 15598  } /* SKP_Silk_resampler_rom.c:83:33 */
 15599  
 15600  var SKP_Silk_Resampler_2_3_COEFS_LQ = [6]int16{
 15601  	int16(-2797), int16(-6507),
 15602  	int16(4697), int16(10739),
 15603  	int16(1567), int16(8276),
 15604  } /* SKP_Silk_resampler_rom.c:88:33 */
 15605  
 15606  var SKP_Silk_Resampler_1_3_COEFS_LQ = [5]int16{
 15607  	int16(16777), int16(-9792),
 15608  	int16(890), int16(1614), int16(2148),
 15609  } /* SKP_Silk_resampler_rom.c:94:33 */
 15610  
 15611  /* Tables with coefficients for 4th order ARMA filter (35 Words), in a packed format:       */
 15612  /*    { B1_Q14[1], B2_Q14[1], -A1_Q14[1], -A1_Q14[2], -A2_Q14[1], -A2_Q14[2], gain_Q16 }    */
 15613  /* where it is assumed that B*_Q14[0], B*_Q14[2], A*_Q14[0] are all 16384                   */
 15614  var SKP_Silk_Resampler_320_441_ARMA4_COEFS = [7]int16{
 15615  	int16(31454), int16(24746), int16(-9706), int16(-3386), int16(-17911), int16(-13243), int16(24797),
 15616  } /* SKP_Silk_resampler_rom.c:103:33 */
 15617  
 15618  var SKP_Silk_Resampler_240_441_ARMA4_COEFS = [7]int16{
 15619  	int16(28721), int16(11254), int16(3189), int16(-2546), int16(-1495), int16(-12618), int16(11562),
 15620  } /* SKP_Silk_resampler_rom.c:107:33 */
 15621  
 15622  var SKP_Silk_Resampler_160_441_ARMA4_COEFS = [7]int16{
 15623  	int16(23492), int16(-6457), int16(14358), int16(-4856), int16(14654), int16(-13008), int16(4456),
 15624  } /* SKP_Silk_resampler_rom.c:111:33 */
 15625  
 15626  var SKP_Silk_Resampler_120_441_ARMA4_COEFS = [7]int16{
 15627  	int16(19311), int16(-15569), int16(19489), int16(-6950), int16(21441), int16(-13559), int16(2370),
 15628  } /* SKP_Silk_resampler_rom.c:115:33 */
 15629  
 15630  var SKP_Silk_Resampler_80_441_ARMA4_COEFS = [7]int16{
 15631  	int16(13248), int16(-23849), int16(24126), int16(-9486), int16(26806), int16(-14286), int16(1065),
 15632  } /* SKP_Silk_resampler_rom.c:119:33 */
 15633  
 15634  /* Table with interplation fractions of 1/288 : 2/288 : 287/288 (432 Words) */
 15635  var SKP_Silk_resampler_frac_FIR_144 = [144][3]int16{
 15636  	{int16(-647), int16(1884), int16(30078)},
 15637  	{int16(-625), int16(1736), int16(30044)},
 15638  	{int16(-603), int16(1591), int16(30005)},
 15639  	{int16(-581), int16(1448), int16(29963)},
 15640  	{int16(-559), int16(1308), int16(29917)},
 15641  	{int16(-537), int16(1169), int16(29867)},
 15642  	{int16(-515), int16(1032), int16(29813)},
 15643  	{int16(-494), int16(898), int16(29755)},
 15644  	{int16(-473), int16(766), int16(29693)},
 15645  	{int16(-452), int16(636), int16(29627)},
 15646  	{int16(-431), int16(508), int16(29558)},
 15647  	{int16(-410), int16(383), int16(29484)},
 15648  	{int16(-390), int16(260), int16(29407)},
 15649  	{int16(-369), int16(139), int16(29327)},
 15650  	{int16(-349), int16(20), int16(29242)},
 15651  	{int16(-330), int16(-97), int16(29154)},
 15652  	{int16(-310), int16(-211), int16(29062)},
 15653  	{int16(-291), int16(-324), int16(28967)},
 15654  	{int16(-271), int16(-434), int16(28868)},
 15655  	{int16(-253), int16(-542), int16(28765)},
 15656  	{int16(-234), int16(-647), int16(28659)},
 15657  	{int16(-215), int16(-751), int16(28550)},
 15658  	{int16(-197), int16(-852), int16(28436)},
 15659  	{int16(-179), int16(-951), int16(28320)},
 15660  	{int16(-162), int16(-1048), int16(28200)},
 15661  	{int16(-144), int16(-1143), int16(28077)},
 15662  	{int16(-127), int16(-1235), int16(27950)},
 15663  	{int16(-110), int16(-1326), int16(27820)},
 15664  	{int16(-94), int16(-1414), int16(27687)},
 15665  	{int16(-77), int16(-1500), int16(27550)},
 15666  	{int16(-61), int16(-1584), int16(27410)},
 15667  	{int16(-45), int16(-1665), int16(27268)},
 15668  	{int16(-30), int16(-1745), int16(27122)},
 15669  	{int16(-15), int16(-1822), int16(26972)},
 15670  	{int16(0), int16(-1897), int16(26820)},
 15671  	{int16(15), int16(-1970), int16(26665)},
 15672  	{int16(29), int16(-2041), int16(26507)},
 15673  	{int16(44), int16(-2110), int16(26346)},
 15674  	{int16(57), int16(-2177), int16(26182)},
 15675  	{int16(71), int16(-2242), int16(26015)},
 15676  	{int16(84), int16(-2305), int16(25845)},
 15677  	{int16(97), int16(-2365), int16(25673)},
 15678  	{int16(110), int16(-2424), int16(25498)},
 15679  	{int16(122), int16(-2480), int16(25320)},
 15680  	{int16(134), int16(-2534), int16(25140)},
 15681  	{int16(146), int16(-2587), int16(24956)},
 15682  	{int16(157), int16(-2637), int16(24771)},
 15683  	{int16(168), int16(-2685), int16(24583)},
 15684  	{int16(179), int16(-2732), int16(24392)},
 15685  	{int16(190), int16(-2776), int16(24199)},
 15686  	{int16(200), int16(-2819), int16(24003)},
 15687  	{int16(210), int16(-2859), int16(23805)},
 15688  	{int16(220), int16(-2898), int16(23605)},
 15689  	{int16(229), int16(-2934), int16(23403)},
 15690  	{int16(238), int16(-2969), int16(23198)},
 15691  	{int16(247), int16(-3002), int16(22992)},
 15692  	{int16(255), int16(-3033), int16(22783)},
 15693  	{int16(263), int16(-3062), int16(22572)},
 15694  	{int16(271), int16(-3089), int16(22359)},
 15695  	{int16(279), int16(-3114), int16(22144)},
 15696  	{int16(286), int16(-3138), int16(21927)},
 15697  	{int16(293), int16(-3160), int16(21709)},
 15698  	{int16(300), int16(-3180), int16(21488)},
 15699  	{int16(306), int16(-3198), int16(21266)},
 15700  	{int16(312), int16(-3215), int16(21042)},
 15701  	{int16(318), int16(-3229), int16(20816)},
 15702  	{int16(323), int16(-3242), int16(20589)},
 15703  	{int16(328), int16(-3254), int16(20360)},
 15704  	{int16(333), int16(-3263), int16(20130)},
 15705  	{int16(338), int16(-3272), int16(19898)},
 15706  	{int16(342), int16(-3278), int16(19665)},
 15707  	{int16(346), int16(-3283), int16(19430)},
 15708  	{int16(350), int16(-3286), int16(19194)},
 15709  	{int16(353), int16(-3288), int16(18957)},
 15710  	{int16(356), int16(-3288), int16(18718)},
 15711  	{int16(359), int16(-3286), int16(18478)},
 15712  	{int16(362), int16(-3283), int16(18238)},
 15713  	{int16(364), int16(-3279), int16(17996)},
 15714  	{int16(366), int16(-3273), int16(17753)},
 15715  	{int16(368), int16(-3266), int16(17509)},
 15716  	{int16(369), int16(-3257), int16(17264)},
 15717  	{int16(371), int16(-3247), int16(17018)},
 15718  	{int16(372), int16(-3235), int16(16772)},
 15719  	{int16(372), int16(-3222), int16(16525)},
 15720  	{int16(373), int16(-3208), int16(16277)},
 15721  	{int16(373), int16(-3192), int16(16028)},
 15722  	{int16(373), int16(-3175), int16(15779)},
 15723  	{int16(373), int16(-3157), int16(15529)},
 15724  	{int16(372), int16(-3138), int16(15279)},
 15725  	{int16(371), int16(-3117), int16(15028)},
 15726  	{int16(370), int16(-3095), int16(14777)},
 15727  	{int16(369), int16(-3072), int16(14526)},
 15728  	{int16(368), int16(-3048), int16(14274)},
 15729  	{int16(366), int16(-3022), int16(14022)},
 15730  	{int16(364), int16(-2996), int16(13770)},
 15731  	{int16(362), int16(-2968), int16(13517)},
 15732  	{int16(359), int16(-2940), int16(13265)},
 15733  	{int16(357), int16(-2910), int16(13012)},
 15734  	{int16(354), int16(-2880), int16(12760)},
 15735  	{int16(351), int16(-2848), int16(12508)},
 15736  	{int16(348), int16(-2815), int16(12255)},
 15737  	{int16(344), int16(-2782), int16(12003)},
 15738  	{int16(341), int16(-2747), int16(11751)},
 15739  	{int16(337), int16(-2712), int16(11500)},
 15740  	{int16(333), int16(-2676), int16(11248)},
 15741  	{int16(328), int16(-2639), int16(10997)},
 15742  	{int16(324), int16(-2601), int16(10747)},
 15743  	{int16(320), int16(-2562), int16(10497)},
 15744  	{int16(315), int16(-2523), int16(10247)},
 15745  	{int16(310), int16(-2482), int16(9998)},
 15746  	{int16(305), int16(-2442), int16(9750)},
 15747  	{int16(300), int16(-2400), int16(9502)},
 15748  	{int16(294), int16(-2358), int16(9255)},
 15749  	{int16(289), int16(-2315), int16(9009)},
 15750  	{int16(283), int16(-2271), int16(8763)},
 15751  	{int16(277), int16(-2227), int16(8519)},
 15752  	{int16(271), int16(-2182), int16(8275)},
 15753  	{int16(265), int16(-2137), int16(8032)},
 15754  	{int16(259), int16(-2091), int16(7791)},
 15755  	{int16(252), int16(-2045), int16(7550)},
 15756  	{int16(246), int16(-1998), int16(7311)},
 15757  	{int16(239), int16(-1951), int16(7072)},
 15758  	{int16(232), int16(-1904), int16(6835)},
 15759  	{int16(226), int16(-1856), int16(6599)},
 15760  	{int16(219), int16(-1807), int16(6364)},
 15761  	{int16(212), int16(-1758), int16(6131)},
 15762  	{int16(204), int16(-1709), int16(5899)},
 15763  	{int16(197), int16(-1660), int16(5668)},
 15764  	{int16(190), int16(-1611), int16(5439)},
 15765  	{int16(183), int16(-1561), int16(5212)},
 15766  	{int16(175), int16(-1511), int16(4986)},
 15767  	{int16(168), int16(-1460), int16(4761)},
 15768  	{int16(160), int16(-1410), int16(4538)},
 15769  	{int16(152), int16(-1359), int16(4317)},
 15770  	{int16(145), int16(-1309), int16(4098)},
 15771  	{int16(137), int16(-1258), int16(3880)},
 15772  	{int16(129), int16(-1207), int16(3664)},
 15773  	{int16(121), int16(-1156), int16(3450)},
 15774  	{int16(113), int16(-1105), int16(3238)},
 15775  	{int16(105), int16(-1054), int16(3028)},
 15776  	{int16(97), int16(-1003), int16(2820)},
 15777  	{int16(89), int16(-952), int16(2614)},
 15778  	{int16(81), int16(-901), int16(2409)},
 15779  	{int16(73), int16(-851), int16(2207)},
 15780  } /* SKP_Silk_resampler_rom.c:124:33 */
 15781  
 15782  /* Upsample by a factor 2, low quality */
 15783  func SKP_Silk_resampler_up2(tls *libc.TLS, S uintptr, out uintptr, in uintptr, len int32) { /* SKP_Silk_resampler_up2.c:40:6: */
 15784  	var k int32
 15785  	var in32 int32
 15786  	var out32 int32
 15787  	var Y int32
 15788  	var X int32
 15789  
 15790  	/* Internal variables and state are in Q10 format */
 15791  	for k = 0; k < len; k++ {
 15792  		/* Convert to Q10 */
 15793  		in32 = ((int32(*(*int16)(unsafe.Pointer(in + uintptr(k)*2)))) << (10))
 15794  
 15795  		/* All-pass section for even output sample */
 15796  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S))))
 15797  		X = ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_lq_0))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_lq_0))) >> 16))
 15798  		out32 = ((*(*int32)(unsafe.Pointer(S))) + (X))
 15799  		*(*int32)(unsafe.Pointer(S)) = ((in32) + (X))
 15800  
 15801  		/* Convert back to int16 and store to output */
 15802  		*(*int16)(unsafe.Pointer(out + uintptr((2*k))*2)) = func() int16 {
 15803  			if (func() int32 {
 15804  				if (10) == 1 {
 15805  					return (((out32) >> 1) + ((out32) & 1))
 15806  				}
 15807  				return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15808  			}()) > 0x7FFF {
 15809  				return int16(0x7FFF)
 15810  			}
 15811  			return func() int16 {
 15812  				if (func() int32 {
 15813  					if (10) == 1 {
 15814  						return (((out32) >> 1) + ((out32) & 1))
 15815  					}
 15816  					return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15817  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15818  					return libc.Int16FromInt32(0x8000)
 15819  				}
 15820  				return func() int16 {
 15821  					if (10) == 1 {
 15822  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15823  					}
 15824  					return (int16((((out32) >> ((10) - 1)) + 1) >> 1))
 15825  				}()
 15826  			}()
 15827  		}()
 15828  
 15829  		/* All-pass section for odd output sample */
 15830  		Y = ((in32) - (*(*int32)(unsafe.Pointer(S + 1*4))))
 15831  		X = ((Y) + ((((Y) >> 16) * (int32(SKP_Silk_resampler_up2_lq_1))) + ((((Y) & 0x0000FFFF) * (int32(SKP_Silk_resampler_up2_lq_1))) >> 16)))
 15832  		out32 = ((*(*int32)(unsafe.Pointer(S + 1*4))) + (X))
 15833  		*(*int32)(unsafe.Pointer(S + 1*4)) = ((in32) + (X))
 15834  
 15835  		/* Convert back to int16 and store to output */
 15836  		*(*int16)(unsafe.Pointer(out + uintptr(((2*k)+1))*2)) = func() int16 {
 15837  			if (func() int32 {
 15838  				if (10) == 1 {
 15839  					return (((out32) >> 1) + ((out32) & 1))
 15840  				}
 15841  				return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15842  			}()) > 0x7FFF {
 15843  				return int16(0x7FFF)
 15844  			}
 15845  			return func() int16 {
 15846  				if (func() int32 {
 15847  					if (10) == 1 {
 15848  						return (((out32) >> 1) + ((out32) & 1))
 15849  					}
 15850  					return ((((out32) >> ((10) - 1)) + 1) >> 1)
 15851  				}()) < (int32(libc.Int16FromInt32(0x8000))) {
 15852  					return libc.Int16FromInt32(0x8000)
 15853  				}
 15854  				return func() int16 {
 15855  					if (10) == 1 {
 15856  						return (int16(((out32) >> 1) + ((out32) & 1)))
 15857  					}
 15858  					return (int16((((out32) >> ((10) - 1)) + 1) >> 1))
 15859  				}()
 15860  			}()
 15861  		}()
 15862  	}
 15863  }
 15864  
 15865  /* Residual energy: nrg = wxx - 2 * wXx * c + c' * wXX * c */
 15866  func SKP_Silk_residual_energy16_covar_FIX(tls *libc.TLS, c uintptr, wXX uintptr, wXx uintptr, wxx int32, D int32, cQ int32) int32 { /* SKP_Silk_residual_energy16_FIX.c:31:11: */
 15867  	bp := tls.Alloc(64)
 15868  	defer tls.Free(64)
 15869  
 15870  	var i int32
 15871  	var j int32
 15872  	var lshifts int32
 15873  	var Qxtra int32
 15874  	var c_max int32
 15875  	var w_max int32
 15876  	var tmp int32
 15877  	var tmp2 int32
 15878  	var nrg int32
 15879  	// var cn [16]int32 at bp, 64
 15880  
 15881  	var pRow uintptr
 15882  
 15883  	/* Safety checks */
 15884  
 15885  	lshifts = (16 - cQ)
 15886  	Qxtra = lshifts
 15887  
 15888  	c_max = 0
 15889  	for i = 0; i < D; i++ {
 15890  		c_max = SKP_max_32(tls, c_max, func() int32 {
 15891  			if (int32(*(*int16)(unsafe.Pointer(c + uintptr(i)*2)))) > 0 {
 15892  				return int32(*(*int16)(unsafe.Pointer(c + uintptr(i)*2)))
 15893  			}
 15894  			return -int32(*(*int16)(unsafe.Pointer(c + uintptr(i)*2)))
 15895  		}())
 15896  	}
 15897  	Qxtra = SKP_min_int(tls, Qxtra, (SKP_Silk_CLZ32(tls, c_max) - 17))
 15898  
 15899  	w_max = SKP_max_32(tls, *(*int32)(unsafe.Pointer(wXX)), *(*int32)(unsafe.Pointer(wXX + uintptr(((D*D)-1))*4)))
 15900  	Qxtra = SKP_min_int(tls, Qxtra, (SKP_Silk_CLZ32(tls, ((D)*(((((w_max)>>16)*(int32(int16(c_max))))+((((w_max)&0x0000FFFF)*(int32(int16(c_max))))>>16))>>(4)))) - 5))
 15901  	Qxtra = SKP_max_int(tls, Qxtra, 0)
 15902  	for i = 0; i < D; i++ {
 15903  		*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)) = ((int32(*(*int16)(unsafe.Pointer(c + uintptr(i)*2)))) << (Qxtra))
 15904  		/* Check that SKP_SMLAWB can be used */
 15905  	}
 15906  	lshifts = lshifts - (Qxtra)
 15907  
 15908  	/* Compute wxx - 2 * wXx * c */
 15909  	tmp = 0
 15910  	for i = 0; i < D; i++ {
 15911  		tmp = ((tmp) + ((((*(*int32)(unsafe.Pointer(wXx + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) + ((((*(*int32)(unsafe.Pointer(wXx + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) >> 16)))
 15912  	}
 15913  	nrg = (((wxx) >> (1 + lshifts)) - tmp) /* Q: -lshifts - 1 */
 15914  
 15915  	/* Add c' * wXX * c, assuming wXX is symmetric */
 15916  	tmp2 = 0
 15917  	for i = 0; i < D; i++ {
 15918  		tmp = 0
 15919  		pRow = (wXX + uintptr((i*D))*4)
 15920  		for j = (i + 1); j < D; j++ {
 15921  			tmp = ((tmp) + ((((*(*int32)(unsafe.Pointer(pRow + uintptr(j)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(j)*4)))))) + ((((*(*int32)(unsafe.Pointer(pRow + uintptr(j)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(j)*4)))))) >> 16)))
 15922  		}
 15923  		tmp = ((tmp) + (((((*(*int32)(unsafe.Pointer(pRow + uintptr(i)*4))) >> (1)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) + (((((*(*int32)(unsafe.Pointer(pRow + uintptr(i)*4))) >> (1)) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) >> 16)))
 15924  		tmp2 = ((tmp2) + ((((tmp) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) + ((((tmp) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(bp /* &cn[0] */ + uintptr(i)*4)))))) >> 16)))
 15925  	}
 15926  	nrg = ((nrg) + ((tmp2) << (lshifts))) /* Q: -lshifts - 1 */
 15927  
 15928  	/* Keep one bit free always, because we add them for LSF interpolation */
 15929  	if nrg < 1 {
 15930  		nrg = 1
 15931  	} else if nrg > (int32((0x7FFFFFFF)) >> (lshifts + 2)) {
 15932  		nrg = (int32(0x7FFFFFFF) >> 1)
 15933  	} else {
 15934  		nrg = ((nrg) << (lshifts + 1)) /* Q0 */
 15935  	}
 15936  	return nrg
 15937  
 15938  }
 15939  
 15940  /* Calculates residual energies of input subframes where all subframes have LPC_order   */
 15941  /* of preceeding samples                                                                */
 15942  func SKP_Silk_residual_energy_FIX(tls *libc.TLS, nrgs uintptr, nrgsQ uintptr, x uintptr, a_Q12 uintptr, gains uintptr, subfr_length int32, LPC_order int32) { /* SKP_Silk_residual_energy_FIX.c:32:6: */
 15943  	bp := tls.Alloc(580)
 15944  	defer tls.Free(580)
 15945  
 15946  	var offset int32
 15947  	var i int32
 15948  	var j int32
 15949  	// var rshift int32 at bp+576, 4
 15950  
 15951  	var lz1 int32
 15952  	var lz2 int32
 15953  	var LPC_res_ptr uintptr
 15954  	// var LPC_res [272]int16 at bp+32, 544
 15955  
 15956  	var x_ptr uintptr
 15957  	// var S [16]int16 at bp, 32
 15958  
 15959  	var tmp32 int32
 15960  
 15961  	x_ptr = x
 15962  	offset = (LPC_order + subfr_length)
 15963  
 15964  	/* Filter input to create the LPC residual for each frame half, and measure subframe energies */
 15965  	for i = 0; i < 2; i++ {
 15966  		/* Calculate half frame LPC residual signal including preceeding samples */
 15967  		libc.Xmemset(tls, bp /* &S[0] */, 0, (uint32(LPC_order) * uint32(unsafe.Sizeof(int16(0)))))
 15968  		SKP_Silk_LPC_analysis_filter(tls, x_ptr, (a_Q12 + uintptr(i)*32), bp /* &S[0] */, bp+32 /* &LPC_res[0] */, ((int32(4) >> 1) * offset), LPC_order)
 15969  
 15970  		/* Point to first subframe of the just calculated LPC residual signal */
 15971  		LPC_res_ptr = (bp + 32 /* &LPC_res[0] */ + uintptr(LPC_order)*2)
 15972  		for j = 0; j < (int32(4) >> 1); j++ {
 15973  			/* Measure subframe energy */
 15974  			SKP_Silk_sum_sqr_shift(tls, (nrgs + uintptr(((i*(int32(4)>>1))+j))*4), bp+576 /* &rshift */, LPC_res_ptr, subfr_length)
 15975  
 15976  			/* Set Q values for the measured energy */
 15977  			*(*int32)(unsafe.Pointer(nrgsQ + uintptr(((i*(int32(4)>>1))+j))*4)) = -*(*int32)(unsafe.Pointer(bp + 576 /* rshift */))
 15978  
 15979  			/* Move to next subframe */
 15980  			LPC_res_ptr += 2 * (uintptr(offset))
 15981  		}
 15982  		/* Move to next frame half */
 15983  		x_ptr += 2 * (uintptr((int32(4) >> 1) * offset))
 15984  	}
 15985  
 15986  	/* Apply the squared subframe gains */
 15987  	for i = 0; i < 4; i++ {
 15988  		/* Fully upscale gains and energies */
 15989  		lz1 = (SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(nrgs + uintptr(i)*4))) - 1)
 15990  		lz2 = (SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(gains + uintptr(i)*4))) - 1)
 15991  
 15992  		tmp32 = ((*(*int32)(unsafe.Pointer(gains + uintptr(i)*4))) << (lz2))
 15993  
 15994  		/* Find squared gains */
 15995  		tmp32 = (int32(((int64_t(tmp32)) * (int64_t(tmp32))) >> (32))) // Q( 2 * lz2 - 32 )
 15996  
 15997  		/* Scale energies */
 15998  		*(*int32)(unsafe.Pointer(nrgs + uintptr(i)*4)) = (int32(((int64_t(tmp32)) * (int64_t((*(*int32)(unsafe.Pointer(nrgs + uintptr(i)*4))) << (lz1)))) >> (32))) // Q( nrgsQ[ i ] + lz1 + 2 * lz2 - 32 - 32 )
 15999  		*(*int32)(unsafe.Pointer(nrgsQ + uintptr(i)*4)) += (((lz1 + (2 * lz2)) - 32) - 32)
 16000  	}
 16001  }
 16002  
 16003  /* Copy and multiply a vector by a constant */
 16004  func SKP_Silk_scale_copy_vector16(tls *libc.TLS, data_out uintptr, data_in uintptr, gain_Q16 int32, dataSize int32) { /* SKP_Silk_scale_copy_vector16.c:31:6: */
 16005  	var i int32
 16006  	var tmp32 int32
 16007  
 16008  	for i = 0; i < dataSize; i++ {
 16009  		tmp32 = ((((gain_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(data_in + uintptr(i)*2))))) + ((((gain_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(data_in + uintptr(i)*2))))) >> 16))
 16010  		*(*int16)(unsafe.Pointer(data_out + uintptr(i)*2)) = int16(tmp32)
 16011  	}
 16012  }
 16013  
 16014  /* Multiply a vector by a constant */
 16015  func SKP_Silk_scale_vector32_Q26_lshift_18(tls *libc.TLS, data1 uintptr, gain_Q26 int32, dataSize int32) { /* SKP_Silk_scale_vector.c:31:6: */
 16016  	var i int32
 16017  
 16018  	for i = 0; i < dataSize; i++ {
 16019  		*(*int32)(unsafe.Pointer(data1 + uintptr(i)*4)) = (int32(((int64_t(*(*int32)(unsafe.Pointer(data1 + uintptr(i)*4)))) * (int64_t(gain_Q26))) >> (8))) // OUTPUT: Q18
 16020  	}
 16021  }
 16022  
 16023  /* Faster than schur64(), but much less accurate.                       */
 16024  /* uses SMLAWB(), requiring armv5E and higher.                          */
 16025  func SKP_Silk_schur(tls *libc.TLS, rc_Q15 uintptr, c uintptr, order int32) int32 { /* SKP_Silk_schur.c:40:11: */
 16026  	bp := tls.Alloc(136)
 16027  	defer tls.Free(136)
 16028  
 16029  	var k int32
 16030  	var n int32
 16031  	var lz int32
 16032  	// var C [17][2]int32 at bp, 136
 16033  
 16034  	var Ctmp1 int32
 16035  	var Ctmp2 int32
 16036  	var rc_tmp_Q15 int32
 16037  
 16038  	/* Get number of leading zeros */
 16039  	lz = SKP_Silk_CLZ32(tls, *(*int32)(unsafe.Pointer(c)))
 16040  
 16041  	/* Copy correlations and adjust level to Q30 */
 16042  	if lz < 2 {
 16043  		/* lz must be 1, so shift one to the right */
 16044  		for k = 0; k < (order + 1); k++ {
 16045  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*8))) = libc.AssignPtrInt32((bp /* &C */ +uintptr(k)*8)+1*4, ((*(*int32)(unsafe.Pointer(c + uintptr(k)*4))) >> (1)))
 16046  		}
 16047  	} else if lz > 2 {
 16048  		/* Shift to the left */
 16049  		lz = lz - (2)
 16050  		for k = 0; k < (order + 1); k++ {
 16051  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*8))) = libc.AssignPtrInt32((bp /* &C */ +uintptr(k)*8)+1*4, ((*(*int32)(unsafe.Pointer(c + uintptr(k)*4))) << (lz)))
 16052  		}
 16053  	} else {
 16054  		/* No need to shift */
 16055  		for k = 0; k < (order + 1); k++ {
 16056  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*8))) = libc.AssignPtrInt32((bp /* &C */ +uintptr(k)*8)+1*4, *(*int32)(unsafe.Pointer(c + uintptr(k)*4)))
 16057  		}
 16058  	}
 16059  
 16060  	for k = 0; k < order; k++ {
 16061  
 16062  		/* Get reflection coefficient */
 16063  		rc_tmp_Q15 = -((*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr((k+1))*8)))) / (SKP_max_32(tls, ((*(*int32)(unsafe.Pointer((bp /* &C[0] */) + 1*4))) >> (15)), 1)))
 16064  
 16065  		/* Clip (shouldn't happen for properly conditioned inputs) */
 16066  		rc_tmp_Q15 = func() int32 {
 16067  			if (rc_tmp_Q15) > 0x7FFF {
 16068  				return 0x7FFF
 16069  			}
 16070  			return func() int32 {
 16071  				if (rc_tmp_Q15) < (int32(libc.Int16FromInt32(0x8000))) {
 16072  					return int32(libc.Int16FromInt32(0x8000))
 16073  				}
 16074  				return rc_tmp_Q15
 16075  			}()
 16076  		}()
 16077  
 16078  		/* Store */
 16079  		*(*int16)(unsafe.Pointer(rc_Q15 + uintptr(k)*2)) = int16(rc_tmp_Q15)
 16080  
 16081  		/* Update correlations */
 16082  		for n = 0; n < (order - k); n++ {
 16083  			Ctmp1 = *(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(((n+k)+1))*8)))
 16084  			Ctmp2 = *(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(n)*8) + 1*4))
 16085  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(((n+k)+1))*8))) = ((Ctmp1) + (((((Ctmp2) << (1)) >> 16) * (int32(int16(rc_tmp_Q15)))) + (((((Ctmp2) << (1)) & 0x0000FFFF) * (int32(int16(rc_tmp_Q15)))) >> 16)))
 16086  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(n)*8) + 1*4)) = ((Ctmp2) + (((((Ctmp1) << (1)) >> 16) * (int32(int16(rc_tmp_Q15)))) + (((((Ctmp1) << (1)) & 0x0000FFFF) * (int32(int16(rc_tmp_Q15)))) >> 16)))
 16087  		}
 16088  	}
 16089  
 16090  	/* return residual energy */
 16091  	return *(*int32)(unsafe.Pointer((bp /* &C[0] */) + 1*4))
 16092  }
 16093  
 16094  /* Slower than schur(), but more accurate.                              */
 16095  /* Uses SMULL(), available on armv4                                     */
 16096  func SKP_Silk_schur64(tls *libc.TLS, rc_Q16 uintptr, c uintptr, order int32) int32 { /* SKP_Silk_schur64.c:41:11: */
 16097  	bp := tls.Alloc(136)
 16098  	defer tls.Free(136)
 16099  
 16100  	var k int32
 16101  	var n int32
 16102  	// var C [17][2]int32 at bp, 136
 16103  
 16104  	var Ctmp1_Q30 int32
 16105  	var Ctmp2_Q30 int32
 16106  	var rc_tmp_Q31 int32
 16107  
 16108  	/* Check for invalid input */
 16109  	if *(*int32)(unsafe.Pointer(c)) <= 0 {
 16110  		libc.Xmemset(tls, rc_Q16, 0, (uint32(order) * uint32(unsafe.Sizeof(int32(0)))))
 16111  		return 0
 16112  	}
 16113  
 16114  	for k = 0; k < (order + 1); k++ {
 16115  		*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(k)*8))) = libc.AssignPtrInt32((bp /* &C */ +uintptr(k)*8)+1*4, *(*int32)(unsafe.Pointer(c + uintptr(k)*4)))
 16116  	}
 16117  
 16118  	for k = 0; k < order; k++ {
 16119  		/* Get reflection coefficient: divide two Q30 values and get result in Q31 */
 16120  		rc_tmp_Q31 = SKP_DIV32_varQ(tls, -*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr((k+1))*8))), *(*int32)(unsafe.Pointer((bp /* &C[0] */) + 1*4)), 31)
 16121  
 16122  		/* Save the output */
 16123  		*(*int32)(unsafe.Pointer(rc_Q16 + uintptr(k)*4)) = func() int32 {
 16124  			if (15) == 1 {
 16125  				return (((rc_tmp_Q31) >> 1) + ((rc_tmp_Q31) & 1))
 16126  			}
 16127  			return ((((rc_tmp_Q31) >> ((15) - 1)) + 1) >> 1)
 16128  		}()
 16129  
 16130  		/* Update correlations */
 16131  		for n = 0; n < (order - k); n++ {
 16132  			Ctmp1_Q30 = *(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(((n+k)+1))*8)))
 16133  			Ctmp2_Q30 = *(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(n)*8) + 1*4))
 16134  
 16135  			/* Multiply and add the highest int32 */
 16136  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(((n+k)+1))*8))) = (Ctmp1_Q30 + (int32(((int64_t((Ctmp2_Q30) << (1))) * (int64_t(rc_tmp_Q31))) >> (32))))
 16137  			*(*int32)(unsafe.Pointer((bp /* &C[0] */ + uintptr(n)*8) + 1*4)) = (Ctmp2_Q30 + (int32(((int64_t((Ctmp1_Q30) << (1))) * (int64_t(rc_tmp_Q31))) >> (32))))
 16138  		}
 16139  	}
 16140  
 16141  	return *(*int32)(unsafe.Pointer((bp /* &C[0] */) + 1*4))
 16142  }
 16143  
 16144  /* shell coder; pulse-subframe length is hardcoded */
 16145  
 16146  func combine_pulses(tls *libc.TLS, out uintptr, in uintptr, len int32) { /* SKP_Silk_shell_coder.c:32:17: */
 16147  	var k int32
 16148  	for k = 0; k < len; k++ {
 16149  		*(*int32)(unsafe.Pointer(out + uintptr(k)*4)) = (*(*int32)(unsafe.Pointer(in + uintptr((2*k))*4)) + *(*int32)(unsafe.Pointer(in + uintptr(((2*k)+1))*4)))
 16150  	}
 16151  }
 16152  
 16153  func encode_split(tls *libc.TLS, sRC uintptr, p_child1 int32, p int32, shell_table uintptr) { /* SKP_Silk_shell_coder.c:44:17: */
 16154  	var cdf uintptr
 16155  
 16156  	if p > 0 {
 16157  		cdf = (shell_table + uintptr(SKP_Silk_shell_code_table_offsets[p])*2)
 16158  		SKP_Silk_range_encoder(tls, sRC, p_child1, cdf)
 16159  	}
 16160  }
 16161  
 16162  func decode_split(tls *libc.TLS, p_child1 uintptr, p_child2 uintptr, sRC uintptr, p int32, shell_table uintptr) { /* SKP_Silk_shell_coder.c:59:17: */
 16163  	var cdf_middle int32
 16164  	var cdf uintptr
 16165  
 16166  	if p > 0 {
 16167  		cdf_middle = ((p) >> (1))
 16168  		cdf = (shell_table + uintptr(SKP_Silk_shell_code_table_offsets[p])*2)
 16169  		SKP_Silk_range_decoder(tls, p_child1, sRC, cdf, cdf_middle)
 16170  		*(*int32)(unsafe.Pointer(p_child2)) = (p - *(*int32)(unsafe.Pointer(p_child1)))
 16171  	} else {
 16172  		*(*int32)(unsafe.Pointer(p_child1)) = 0
 16173  		*(*int32)(unsafe.Pointer(p_child2)) = 0
 16174  	}
 16175  }
 16176  
 16177  /* Shell encoder, operates on one shell code frame of 16 pulses */
 16178  func SKP_Silk_shell_encoder(tls *libc.TLS, sRC uintptr, pulses0 uintptr) { /* SKP_Silk_shell_coder.c:82:6: */
 16179  	bp := tls.Alloc(60)
 16180  	defer tls.Free(60)
 16181  
 16182  	// var pulses1 [8]int32 at bp, 32
 16183  
 16184  	// var pulses2 [4]int32 at bp+32, 16
 16185  
 16186  	// var pulses3 [2]int32 at bp+48, 8
 16187  
 16188  	// var pulses4 [1]int32 at bp+56, 4
 16189  
 16190  	/* this function operates on one shell code frame of 16 pulses */
 16191  
 16192  	/* tree representation per pulse-subframe */
 16193  	combine_pulses(tls, bp /* &pulses1[0] */, pulses0, 8)
 16194  	combine_pulses(tls, bp+32 /* &pulses2[0] */, bp /* &pulses1[0] */, 4)
 16195  	combine_pulses(tls, bp+48 /* &pulses3[0] */, bp+32 /* &pulses2[0] */, 2)
 16196  	combine_pulses(tls, bp+56 /* &pulses4[0] */, bp+48 /* &pulses3[0] */, 1)
 16197  
 16198  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp + 48 /* &pulses3[0] */)), *(*int32)(unsafe.Pointer(bp + 56 /* &pulses4[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table3)))
 16199  
 16200  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */)), *(*int32)(unsafe.Pointer(bp + 48 /* &pulses3[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table2)))
 16201  
 16202  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */)), *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16203  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16204  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 2*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16205  
 16206  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 2*4)), *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16207  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 4*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 2*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16208  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 6*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 3*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16209  
 16210  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */ + 2*4)), *(*int32)(unsafe.Pointer(bp + 48 /* &pulses3[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table2)))
 16211  
 16212  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 4*4)), *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */ + 2*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16213  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 8*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 4*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16214  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 10*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 5*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16215  
 16216  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 6*4)), *(*int32)(unsafe.Pointer(bp + 32 /* &pulses2[0] */ + 3*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16217  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 12*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 6*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16218  	encode_split(tls, sRC, *(*int32)(unsafe.Pointer(pulses0 + 14*4)), *(*int32)(unsafe.Pointer(bp /* &pulses1[0] */ + 7*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16219  }
 16220  
 16221  /* Shell decoder, operates on one shell code frame of 16 pulses */
 16222  func SKP_Silk_shell_decoder(tls *libc.TLS, pulses0 uintptr, sRC uintptr, pulses4 int32) { /* SKP_Silk_shell_coder.c:123:6: */
 16223  	bp := tls.Alloc(56)
 16224  	defer tls.Free(56)
 16225  
 16226  	// var pulses3 [2]int32 at bp, 8
 16227  
 16228  	// var pulses2 [4]int32 at bp+8, 16
 16229  
 16230  	// var pulses1 [8]int32 at bp+24, 32
 16231  
 16232  	/* this function operates on one shell code frame of 16 pulses */
 16233  
 16234  	decode_split(tls, (bp /* &pulses3 */), (bp /* &pulses3 */ + 1*4), sRC, pulses4, uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table3)))
 16235  
 16236  	decode_split(tls, (bp + 8 /* &pulses2 */), (bp + 8 /* &pulses2 */ + 1*4), sRC, *(*int32)(unsafe.Pointer(bp /* &pulses3[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table2)))
 16237  
 16238  	decode_split(tls, (bp + 24 /* &pulses1 */), (bp + 24 /* &pulses1 */ + 1*4), sRC, *(*int32)(unsafe.Pointer(bp + 8 /* &pulses2[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16239  	decode_split(tls, (pulses0), (pulses0 + 1*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16240  	decode_split(tls, (pulses0 + 2*4), (pulses0 + 3*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16241  
 16242  	decode_split(tls, (bp + 24 /* &pulses1 */ + 2*4), (bp + 24 /* &pulses1 */ + 3*4), sRC, *(*int32)(unsafe.Pointer(bp + 8 /* &pulses2[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16243  	decode_split(tls, (pulses0 + 4*4), (pulses0 + 5*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 2*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16244  	decode_split(tls, (pulses0 + 6*4), (pulses0 + 7*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 3*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16245  
 16246  	decode_split(tls, (bp + 8 /* &pulses2 */ + 2*4), (bp + 8 /* &pulses2 */ + 3*4), sRC, *(*int32)(unsafe.Pointer(bp /* &pulses3[0] */ + 1*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table2)))
 16247  
 16248  	decode_split(tls, (bp + 24 /* &pulses1 */ + 4*4), (bp + 24 /* &pulses1 */ + 5*4), sRC, *(*int32)(unsafe.Pointer(bp + 8 /* &pulses2[0] */ + 2*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16249  	decode_split(tls, (pulses0 + 8*4), (pulses0 + 9*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 4*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16250  	decode_split(tls, (pulses0 + 10*4), (pulses0 + 11*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 5*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16251  
 16252  	decode_split(tls, (bp + 24 /* &pulses1 */ + 6*4), (bp + 24 /* &pulses1 */ + 7*4), sRC, *(*int32)(unsafe.Pointer(bp + 8 /* &pulses2[0] */ + 3*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table1)))
 16253  	decode_split(tls, (pulses0 + 12*4), (pulses0 + 13*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 6*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16254  	decode_split(tls, (pulses0 + 14*4), (pulses0 + 15*4), sRC, *(*int32)(unsafe.Pointer(bp + 24 /* &pulses1[0] */ + 7*4)), uintptr(unsafe.Pointer(&SKP_Silk_shell_code_table0)))
 16255  }
 16256  
 16257  /********************************/
 16258  /* approximate sigmoid function */
 16259  /********************************/
 16260  /* fprintf(1, '%d, ', round(1024 * ([1 ./ (1 + exp(-(1:5))), 1] - 1 ./ (1 + exp(-(0:5)))))); */
 16261  var sigm_LUT_slope_Q10 = [6]int32{
 16262  	237, 153, 73, 30, 12, 7,
 16263  } /* SKP_Silk_sigm_Q15.c:41:24 */
 16264  /* fprintf(1, '%d, ', round(32767 * 1 ./ (1 + exp(-(0:5))))); */
 16265  var sigm_LUT_pos_Q15 = [6]int32{
 16266  	16384, 23955, 28861, 31213, 32178, 32548,
 16267  } /* SKP_Silk_sigm_Q15.c:45:24 */
 16268  /* fprintf(1, '%d, ', round(32767 * 1 ./ (1 + exp((0:5))))); */
 16269  var sigm_LUT_neg_Q15 = [6]int32{
 16270  	16384, 8812, 3906, 1554, 589, 219,
 16271  } /* SKP_Silk_sigm_Q15.c:49:24 */
 16272  
 16273  func SKP_Silk_sigm_Q15(tls *libc.TLS, in_Q5 int32) int32 { /* SKP_Silk_sigm_Q15.c:53:9: */
 16274  	var ind int32
 16275  
 16276  	if in_Q5 < 0 {
 16277  		/* Negative input */
 16278  		in_Q5 = -in_Q5
 16279  		if in_Q5 >= (6 * 32) {
 16280  			return 0 /* Clip */
 16281  		} else {
 16282  			/* Linear interpolation of look up table */
 16283  			ind = ((in_Q5) >> (5))
 16284  			return (sigm_LUT_neg_Q15[ind] - ((int32(int16(sigm_LUT_slope_Q10[ind]))) * (int32((int16(in_Q5 & 0x1F))))))
 16285  		}
 16286  	} else {
 16287  		/* Positive input */
 16288  		if in_Q5 >= (6 * 32) {
 16289  			return 32767 /* clip */
 16290  		} else {
 16291  			/* Linear interpolation of look up table */
 16292  			ind = ((in_Q5) >> (5))
 16293  			return (sigm_LUT_pos_Q15[ind] + ((int32(int16(sigm_LUT_slope_Q10[ind]))) * (int32((int16(in_Q5 & 0x1F))))))
 16294  		}
 16295  	}
 16296  	return int32(0)
 16297  }
 16298  
 16299  /***********************************************************************
 16300  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 16301  Redistribution and use in source and binary forms, with or without
 16302  modification, (subject to the limitations in the disclaimer below)
 16303  are permitted provided that the following conditions are met:
 16304  - Redistributions of source code must retain the above copyright notice,
 16305  this list of conditions and the following disclaimer.
 16306  - Redistributions in binary form must reproduce the above copyright
 16307  notice, this list of conditions and the following disclaimer in the
 16308  documentation and/or other materials provided with the distribution.
 16309  - Neither the name of Skype Limited, nor the names of specific
 16310  contributors, may be used to endorse or promote products derived from
 16311  this software without specific prior written permission.
 16312  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 16313  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 16314  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 16315  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 16316  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 16317  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 16318  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 16319  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 16320  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 16321  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 16322  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 16323  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 16324  ***********************************************************************/
 16325  
 16326  /*******************/
 16327  /* Pitch estimator */
 16328  /*******************/
 16329  
 16330  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
 16331  
 16332  /* Bandwidth expansion for whitening filter in pitch analysis */
 16333  
 16334  /* Threshold used by pitch estimator for early escape */
 16335  
 16336  /*********************/
 16337  /* Linear prediction */
 16338  /*********************/
 16339  
 16340  /* LPC analysis defines: regularization and bandwidth expansion */
 16341  
 16342  /* LTP analysis defines */
 16343  
 16344  /* LTP quantization settings */
 16345  
 16346  /***********************/
 16347  /* High pass filtering */
 16348  /***********************/
 16349  
 16350  /* Smoothing parameters for low end of pitch frequency range estimation */
 16351  
 16352  /* Min and max values for low end of pitch frequency range estimation */
 16353  
 16354  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
 16355  
 16356  /***********/
 16357  /* Various */
 16358  /***********/
 16359  
 16360  /* Required speech activity for counting frame as active */
 16361  
 16362  /* Speech Activity LBRR enable threshold (needs tuning) */
 16363  
 16364  /*************************/
 16365  /* Perceptual parameters */
 16366  /*************************/
 16367  
 16368  /* reduction in coding SNR during low speech activity */
 16369  
 16370  /* factor for reducing quantization noise during voiced speech */
 16371  
 16372  /* factor for reducing quantization noise for unvoiced sparse signals */
 16373  
 16374  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
 16375  
 16376  /* warping control */
 16377  
 16378  /* fraction added to first autocorrelation value */
 16379  
 16380  /* noise shaping filter chirp factor */
 16381  
 16382  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
 16383  
 16384  /* gain reduction for fricatives */
 16385  
 16386  /* extra harmonic boosting (signal shaping) at low bitrates */
 16387  
 16388  /* extra harmonic boosting (signal shaping) for noisy input signals */
 16389  
 16390  /* harmonic noise shaping */
 16391  
 16392  /* extra harmonic noise shaping for high bitrates or noisy input */
 16393  
 16394  /* parameter for shaping noise towards higher frequencies */
 16395  
 16396  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
 16397  
 16398  /* parameter for applying a high-pass tilt to the input signal */
 16399  
 16400  /* parameter for extra high-pass tilt to the input signal at high rates */
 16401  
 16402  /* parameter for reducing noise at the very low frequencies */
 16403  
 16404  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
 16405  
 16406  /* noise floor to put a lower limit on the quantization step size */
 16407  
 16408  /* noise floor relative to active speech gain level */
 16409  
 16410  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
 16411  
 16412  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
 16413  
 16414  /* parameters defining the R/D tradeoff in the residual quantizer */
 16415  
 16416  /*****************************/
 16417  /* Internal function headers */
 16418  /*****************************/
 16419  
 16420  type inv_D_t = struct {
 16421  	FQ36_part int32
 16422  	FQ48_part int32
 16423  } /* SKP_Silk_solve_LS_FIX.c:38:3 */
 16424  
 16425  /* Solves Ax = b, assuming A is symmetric */
 16426  func SKP_Silk_solve_LDL_FIX(tls *libc.TLS, A uintptr, M int32, b uintptr, x_Q16 uintptr) { /* SKP_Silk_solve_LS_FIX.c:71:6: */
 16427  	bp := tls.Alloc(1216)
 16428  	defer tls.Free(1216)
 16429  
 16430  	// var L_Q16 [256]int32 at bp, 1024
 16431  
 16432  	// var Y [16]int32 at bp+1152, 64
 16433  
 16434  	// var inv_D [16]inv_D_t at bp+1024, 128
 16435  
 16436  	/***************************************************
 16437  	  Factorize A by LDL such that A = L*D*L',
 16438  	  where L is lower triangular with ones on diagonal
 16439  	  ****************************************************/
 16440  	SKP_Silk_LDL_factorize_FIX(tls, A, M, bp /* &L_Q16[0] */, bp+1024 /* &inv_D[0] */)
 16441  
 16442  	/****************************************************
 16443  	  * substitute D*L'*x = Y. ie:
 16444  	  L*D*L'*x = b => L*Y = b <=> Y = inv(L)*b
 16445  	  ******************************************************/
 16446  	SKP_Silk_LS_SolveFirst_FIX(tls, bp /* &L_Q16[0] */, M, b, bp+1152 /* &Y[0] */)
 16447  
 16448  	/****************************************************
 16449  	  D*L'*x = Y <=> L'*x = inv(D)*Y, because D is
 16450  	  diagonal just multiply with 1/d_i
 16451  	  ****************************************************/
 16452  	SKP_Silk_LS_divide_Q16_FIX(tls, bp+1152 /* &Y[0] */, bp+1024 /* &inv_D[0] */, M)
 16453  
 16454  	/****************************************************
 16455  	  x = inv(L') * inv(D) * Y
 16456  	  *****************************************************/
 16457  	SKP_Silk_LS_SolveLast_FIX(tls, bp /* &L_Q16[0] */, M, bp+1152 /* &Y[0] */, x_Q16)
 16458  }
 16459  
 16460  func SKP_Silk_LDL_factorize_FIX(tls *libc.TLS, A uintptr, M int32, L_Q16 uintptr, inv_D uintptr) { /* SKP_Silk_solve_LS_FIX.c:108:17: */
 16461  	bp := tls.Alloc(128)
 16462  	defer tls.Free(128)
 16463  
 16464  	var i int32
 16465  	var j int32
 16466  	var k int32
 16467  	var status int32
 16468  	var loop_count int32
 16469  	var ptr1 uintptr
 16470  	var ptr2 uintptr
 16471  	var diag_min_value int32
 16472  	var tmp_32 int32
 16473  	var err int32
 16474  	// var v_Q0 [16]int32 at bp, 64
 16475  
 16476  	// var D_Q0 [16]int32 at bp+64, 64
 16477  
 16478  	var one_div_diag_Q36 int32
 16479  	var one_div_diag_Q40 int32
 16480  	var one_div_diag_Q48 int32
 16481  
 16482  	status = 1
 16483  	diag_min_value = SKP_max_32(tls, (int32(((func() int64 {
 16484  		if ((uint32((*(*int32)(unsafe.Pointer(A))) + (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4))))) & 0x80000000) == uint32(0) {
 16485  			return func() int64 {
 16486  				if ((uint32((*(*int32)(unsafe.Pointer(A))) & (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4))))) & 0x80000000) != uint32(0) {
 16487  					return int64(libc.Int32FromUint32(0x80000000))
 16488  				}
 16489  				return (int64((*(*int32)(unsafe.Pointer(A))) + (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4)))))
 16490  			}()
 16491  		}
 16492  		return func() int64 {
 16493  			if ((uint32((*(*int32)(unsafe.Pointer(A))) | (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4))))) & 0x80000000) == uint32(0) {
 16494  				return int64(0x7FFFFFFF)
 16495  			}
 16496  			return (int64((*(*int32)(unsafe.Pointer(A))) + (*(*int32)(unsafe.Pointer(A + uintptr((((int32(int16(M)))*(int32(int16(M))))-1))*4)))))
 16497  		}()
 16498  	}()) * (int64_t(SKP_FIX_CONST(tls, 1e-5, 31)))) >> (32))), (int32(1) << 9))
 16499  	for loop_count = 0; (loop_count < M) && (status == 1); loop_count++ {
 16500  		status = 0
 16501  		for j = 0; j < M; j++ {
 16502  			ptr1 = (L_Q16 + uintptr((((j)*(M))+(0)))*4)
 16503  			tmp_32 = 0
 16504  			for i = 0; i < j; i++ {
 16505  				*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(i)*4)) = (((((*(*int32)(unsafe.Pointer(bp + 64 /* &D_Q0[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp + 64 /* &D_Q0[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4)))))) >> 16)) + ((*(*int32)(unsafe.Pointer(bp + 64 /* &D_Q0[0] */ + uintptr(i)*4))) * (func() int32 {
 16506  					if (16) == 1 {
 16507  						return (((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) & 1))
 16508  					}
 16509  					return ((((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 16510  				}()))) /* Q0 */
 16511  				tmp_32 = (((tmp_32) + ((((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(i)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(i)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(i)*4))) * (func() int32 {
 16512  					if (16) == 1 {
 16513  						return (((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) & 1))
 16514  					}
 16515  					return ((((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) >> ((16) - 1)) + 1) >> 1)
 16516  				}()))) /* Q0 */
 16517  			}
 16518  			tmp_32 = ((*(*int32)(unsafe.Pointer((A + uintptr((((j)*(M))+(j)))*4)))) - (tmp_32))
 16519  
 16520  			if tmp_32 < diag_min_value {
 16521  				tmp_32 = (((int32((int16(loop_count + 1)))) * (int32(int16(diag_min_value)))) - (tmp_32))
 16522  				/* Matrix not positive semi-definite, or ill conditioned */
 16523  				for i = 0; i < M; i++ {
 16524  					*(*int32)(unsafe.Pointer((A + uintptr((((i)*(M))+(i)))*4))) = ((*(*int32)(unsafe.Pointer((A + uintptr((((i)*(M))+(i)))*4)))) + (tmp_32))
 16525  				}
 16526  				status = 1
 16527  				break
 16528  			}
 16529  			*(*int32)(unsafe.Pointer(bp + 64 /* &D_Q0[0] */ + uintptr(j)*4)) = tmp_32 /* always < max(Correlation) */
 16530  
 16531  			/* two-step division */
 16532  			one_div_diag_Q36 = SKP_INVERSE32_varQ(tls, tmp_32, 36) /* Q36 */
 16533  			one_div_diag_Q40 = ((one_div_diag_Q36) << (4))         /* Q40 */
 16534  			err = ((int32(1) << 24) - (((((tmp_32) >> 16) * (int32(int16(one_div_diag_Q40)))) + ((((tmp_32) & 0x0000FFFF) * (int32(int16(one_div_diag_Q40)))) >> 16)) + ((tmp_32) * (func() int32 {
 16535  				if (16) == 1 {
 16536  					return (((one_div_diag_Q40) >> 1) + ((one_div_diag_Q40) & 1))
 16537  				}
 16538  				return ((((one_div_diag_Q40) >> ((16) - 1)) + 1) >> 1)
 16539  			}())))) /* Q24 */
 16540  			one_div_diag_Q48 = (((((err) >> 16) * (int32(int16(one_div_diag_Q40)))) + ((((err) & 0x0000FFFF) * (int32(int16(one_div_diag_Q40)))) >> 16)) + ((err) * (func() int32 {
 16541  				if (16) == 1 {
 16542  					return (((one_div_diag_Q40) >> 1) + ((one_div_diag_Q40) & 1))
 16543  				}
 16544  				return ((((one_div_diag_Q40) >> ((16) - 1)) + 1) >> 1)
 16545  			}()))) /* Q48 */
 16546  
 16547  			/* Save 1/Ds */
 16548  			(*inv_D_t)(unsafe.Pointer(inv_D + uintptr(j)*8)).FQ36_part = one_div_diag_Q36
 16549  			(*inv_D_t)(unsafe.Pointer(inv_D + uintptr(j)*8)).FQ48_part = one_div_diag_Q48
 16550  
 16551  			*(*int32)(unsafe.Pointer((L_Q16 + uintptr((((j)*(M))+(j)))*4))) = 65536 /* 1.0 in Q16 */
 16552  			ptr1 = (A + uintptr((((j)*(M))+(0)))*4)
 16553  			ptr2 = (L_Q16 + uintptr((((j+1)*(M))+(0)))*4)
 16554  			for i = (j + 1); i < M; i++ {
 16555  				tmp_32 = 0
 16556  				for k = 0; k < j; k++ {
 16557  					tmp_32 = (((tmp_32) + ((((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(k)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4)))))) + ((((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(bp /* &v_Q0[0] */ + uintptr(k)*4))) * (func() int32 {
 16558  						if (16) == 1 {
 16559  							return (((*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4))) & 1))
 16560  						}
 16561  						return ((((*(*int32)(unsafe.Pointer(ptr2 + uintptr(k)*4))) >> ((16) - 1)) + 1) >> 1)
 16562  					}()))) /* Q0 */
 16563  				}
 16564  				tmp_32 = ((*(*int32)(unsafe.Pointer(ptr1 + uintptr(i)*4))) - (tmp_32)) /* always < max(Correlation) */
 16565  
 16566  				/* tmp_32 / D_Q0[j] : Divide to Q16 */
 16567  				*(*int32)(unsafe.Pointer((L_Q16 + uintptr((((i)*(M))+(j)))*4))) = ((int32(((int64_t(tmp_32)) * (int64_t(one_div_diag_Q48))) >> (32))) + ((((((tmp_32) >> 16) * (int32(int16(one_div_diag_Q36)))) + ((((tmp_32) & 0x0000FFFF) * (int32(int16(one_div_diag_Q36)))) >> 16)) + ((tmp_32) * (func() int32 {
 16568  					if (16) == 1 {
 16569  						return (((one_div_diag_Q36) >> 1) + ((one_div_diag_Q36) & 1))
 16570  					}
 16571  					return ((((one_div_diag_Q36) >> ((16) - 1)) + 1) >> 1)
 16572  				}()))) >> (4)))
 16573  
 16574  				/* go to next column */
 16575  				ptr2 += 4 * (uintptr(M))
 16576  			}
 16577  		}
 16578  	}
 16579  
 16580  }
 16581  
 16582  func SKP_Silk_LS_divide_Q16_FIX(tls *libc.TLS, T uintptr, inv_D uintptr, M int32) { /* SKP_Silk_solve_LS_FIX.c:180:17: */
 16583  	var i int32
 16584  	var tmp_32 int32
 16585  	var one_div_diag_Q36 int32
 16586  	var one_div_diag_Q48 int32
 16587  
 16588  	for i = 0; i < M; i++ {
 16589  		one_div_diag_Q36 = (*inv_D_t)(unsafe.Pointer(inv_D + uintptr(i)*8)).FQ36_part
 16590  		one_div_diag_Q48 = (*inv_D_t)(unsafe.Pointer(inv_D + uintptr(i)*8)).FQ48_part
 16591  
 16592  		tmp_32 = *(*int32)(unsafe.Pointer(T + uintptr(i)*4))
 16593  		*(*int32)(unsafe.Pointer(T + uintptr(i)*4)) = ((int32(((int64_t(tmp_32)) * (int64_t(one_div_diag_Q48))) >> (32))) + ((((((tmp_32) >> 16) * (int32(int16(one_div_diag_Q36)))) + ((((tmp_32) & 0x0000FFFF) * (int32(int16(one_div_diag_Q36)))) >> 16)) + ((tmp_32) * (func() int32 {
 16594  			if (16) == 1 {
 16595  				return (((one_div_diag_Q36) >> 1) + ((one_div_diag_Q36) & 1))
 16596  			}
 16597  			return ((((one_div_diag_Q36) >> ((16) - 1)) + 1) >> 1)
 16598  		}()))) >> (4)))
 16599  	}
 16600  }
 16601  
 16602  /* Solve Lx = b, when L is lower triangular and has ones on the diagonal */
 16603  func SKP_Silk_LS_SolveFirst_FIX(tls *libc.TLS, L_Q16 uintptr, M int32, b uintptr, x_Q16 uintptr) { /* SKP_Silk_solve_LS_FIX.c:200:17: */
 16604  	var i int32
 16605  	var j int32
 16606  	var ptr32 uintptr
 16607  	var tmp_32 int32
 16608  
 16609  	for i = 0; i < M; i++ {
 16610  		ptr32 = (L_Q16 + uintptr((((i)*(M))+(0)))*4)
 16611  		tmp_32 = 0
 16612  		for j = 0; j < i; j++ {
 16613  			tmp_32 = (((tmp_32) + ((((*(*int32)(unsafe.Pointer(ptr32 + uintptr(j)*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4)))))) + ((((*(*int32)(unsafe.Pointer(ptr32 + uintptr(j)*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(ptr32 + uintptr(j)*4))) * (func() int32 {
 16614  				if (16) == 1 {
 16615  					return (((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) & 1))
 16616  				}
 16617  				return ((((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) >> ((16) - 1)) + 1) >> 1)
 16618  			}())))
 16619  		}
 16620  		*(*int32)(unsafe.Pointer(x_Q16 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(b + uintptr(i)*4))) - (tmp_32))
 16621  	}
 16622  }
 16623  
 16624  /* Solve L^t*x = b, where L is lower triangular with ones on the diagonal */
 16625  func SKP_Silk_LS_SolveLast_FIX(tls *libc.TLS, L_Q16 uintptr, M int32, b uintptr, x_Q16 uintptr) { /* SKP_Silk_solve_LS_FIX.c:222:17: */
 16626  	var i int32
 16627  	var j int32
 16628  	var ptr32 uintptr
 16629  	var tmp_32 int32
 16630  
 16631  	for i = (M - 1); i >= 0; i-- {
 16632  		ptr32 = (L_Q16 + uintptr((((0)*(M))+(i)))*4)
 16633  		tmp_32 = 0
 16634  		for j = (M - 1); j > i; j-- {
 16635  			tmp_32 = (((tmp_32) + ((((*(*int32)(unsafe.Pointer(ptr32 + uintptr(((int32(int16(j)))*(int32(int16(M)))))*4))) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4)))))) + ((((*(*int32)(unsafe.Pointer(ptr32 + uintptr(((int32(int16(j)))*(int32(int16(M)))))*4))) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4)))))) >> 16))) + ((*(*int32)(unsafe.Pointer(ptr32 + uintptr(((int32(int16(j)))*(int32(int16(M)))))*4))) * (func() int32 {
 16636  				if (16) == 1 {
 16637  					return (((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) >> 1) + ((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) & 1))
 16638  				}
 16639  				return ((((*(*int32)(unsafe.Pointer(x_Q16 + uintptr(j)*4))) >> ((16) - 1)) + 1) >> 1)
 16640  			}())))
 16641  		}
 16642  		*(*int32)(unsafe.Pointer(x_Q16 + uintptr(i)*4)) = ((*(*int32)(unsafe.Pointer(b + uintptr(i)*4))) - (tmp_32))
 16643  	}
 16644  }
 16645  
 16646  func SKP_Silk_insertion_sort_increasing(tls *libc.TLS, a uintptr, index uintptr, L int32, K int32) { /* SKP_Silk_sort.c:34:6: */
 16647  	var value int32
 16648  	var i int32
 16649  	var j int32
 16650  
 16651  	/* Safety checks */
 16652  
 16653  	/* Write start indices in index vector */
 16654  	for i = 0; i < K; i++ {
 16655  		*(*int32)(unsafe.Pointer(index + uintptr(i)*4)) = i
 16656  	}
 16657  
 16658  	/* Sort vector elements by value, increasing order */
 16659  	for i = 1; i < K; i++ {
 16660  		value = *(*int32)(unsafe.Pointer(a + uintptr(i)*4))
 16661  		for j = (i - 1); (j >= 0) && (value < *(*int32)(unsafe.Pointer(a + uintptr(j)*4))); j-- {
 16662  			*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(a + uintptr(j)*4))         /* Shift value */
 16663  			*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(index + uintptr(j)*4)) /* Shift index */
 16664  		}
 16665  		*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = value /* Write value */
 16666  		*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = i /* Write index */
 16667  	}
 16668  
 16669  	/* If less than L values are asked for, check the remaining values, */
 16670  	/* but only spend CPU to ensure that the K first values are correct */
 16671  	for i = K; i < L; i++ {
 16672  		value = *(*int32)(unsafe.Pointer(a + uintptr(i)*4))
 16673  		if value < *(*int32)(unsafe.Pointer(a + uintptr((K-1))*4)) {
 16674  			for j = (K - 2); (j >= 0) && (value < *(*int32)(unsafe.Pointer(a + uintptr(j)*4))); j-- {
 16675  				*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(a + uintptr(j)*4))         /* Shift value */
 16676  				*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(index + uintptr(j)*4)) /* Shift index */
 16677  			}
 16678  			*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = value /* Write value */
 16679  			*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = i /* Write index */
 16680  		}
 16681  	}
 16682  }
 16683  
 16684  func SKP_Silk_insertion_sort_decreasing_int16(tls *libc.TLS, a uintptr, index uintptr, L int32, K int32) { /* SKP_Silk_sort.c:80:6: */
 16685  	var i int32
 16686  	var j int32
 16687  	var value int32
 16688  
 16689  	/* Safety checks */
 16690  
 16691  	/* Write start indices in index vector */
 16692  	for i = 0; i < K; i++ {
 16693  		*(*int32)(unsafe.Pointer(index + uintptr(i)*4)) = i
 16694  	}
 16695  
 16696  	/* Sort vector elements by value, decreasing order */
 16697  	for i = 1; i < K; i++ {
 16698  		value = int32(*(*int16)(unsafe.Pointer(a + uintptr(i)*2)))
 16699  		for j = (i - 1); (j >= 0) && (value > int32(*(*int16)(unsafe.Pointer(a + uintptr(j)*2)))); j-- {
 16700  			*(*int16)(unsafe.Pointer(a + uintptr((j+1))*2)) = *(*int16)(unsafe.Pointer(a + uintptr(j)*2))         /* Shift value */
 16701  			*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(index + uintptr(j)*4)) /* Shift index */
 16702  		}
 16703  		*(*int16)(unsafe.Pointer(a + uintptr((j+1))*2)) = int16(value) /* Write value */
 16704  		*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = i        /* Write index */
 16705  	}
 16706  
 16707  	/* If less than L values are asked for, check the remaining values, */
 16708  	/* but only spend CPU to ensure that the K first values are correct */
 16709  	for i = K; i < L; i++ {
 16710  		value = int32(*(*int16)(unsafe.Pointer(a + uintptr(i)*2)))
 16711  		if value > int32(*(*int16)(unsafe.Pointer(a + uintptr((K-1))*2))) {
 16712  			for j = (K - 2); (j >= 0) && (value > int32(*(*int16)(unsafe.Pointer(a + uintptr(j)*2)))); j-- {
 16713  				*(*int16)(unsafe.Pointer(a + uintptr((j+1))*2)) = *(*int16)(unsafe.Pointer(a + uintptr(j)*2))         /* Shift value */
 16714  				*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(index + uintptr(j)*4)) /* Shift index */
 16715  			}
 16716  			*(*int16)(unsafe.Pointer(a + uintptr((j+1))*2)) = int16(value) /* Write value */
 16717  			*(*int32)(unsafe.Pointer(index + uintptr((j+1))*4)) = i        /* Write index */
 16718  		}
 16719  	}
 16720  }
 16721  
 16722  func SKP_Silk_insertion_sort_increasing_all_values(tls *libc.TLS, a uintptr, L int32) { /* SKP_Silk_sort.c:126:6: */
 16723  	var value int32
 16724  	var i int32
 16725  	var j int32
 16726  
 16727  	/* Safety checks */
 16728  
 16729  	/* Sort vector elements by value, increasing order */
 16730  	for i = 1; i < L; i++ {
 16731  		value = *(*int32)(unsafe.Pointer(a + uintptr(i)*4))
 16732  		for j = (i - 1); (j >= 0) && (value < *(*int32)(unsafe.Pointer(a + uintptr(j)*4))); j-- {
 16733  			*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = *(*int32)(unsafe.Pointer(a + uintptr(j)*4)) /* Shift value */
 16734  		}
 16735  		*(*int32)(unsafe.Pointer(a + uintptr((j+1))*4)) = value /* Write value */
 16736  	}
 16737  }
 16738  
 16739  /* Compute number of bits to right shift the sum of squares of a vector */
 16740  /* of int16s to make it fit in an int32                                 */
 16741  func SKP_Silk_sum_sqr_shift(tls *libc.TLS, energy uintptr, shift uintptr, x uintptr, len int32) { /* SKP_Silk_sum_sqr_shift.c:39:6: */
 16742  	var i int32
 16743  	var shft int32
 16744  	var in32 int32
 16745  	var nrg_tmp int32
 16746  	var nrg int32
 16747  
 16748  	if (intptr_t(x) & 2) != 0 {
 16749  		/* Input is not 4-byte aligned */
 16750  		nrg = ((int32(*(*int16)(unsafe.Pointer(x)))) * (int32(*(*int16)(unsafe.Pointer(x)))))
 16751  		i = 1
 16752  	} else {
 16753  		nrg = 0
 16754  		i = 0
 16755  	}
 16756  	shft = 0
 16757  	len--
 16758  	for i < len {
 16759  		/* Load two values at once */
 16760  		in32 = *(*int32)(unsafe.Pointer((x + uintptr(i)*2)))
 16761  		nrg = (int32((uint32(nrg)) + (uint32((int32(int16(in32))) * (int32(int16(in32)))))))
 16762  		nrg = (int32((uint32(nrg)) + (uint32(((in32) >> 16) * ((in32) >> 16)))))
 16763  		i = i + (2)
 16764  		if nrg < 0 {
 16765  			/* Scale down */
 16766  			nrg = (int32((uint32(nrg)) >> (2)))
 16767  			shft = 2
 16768  			break
 16769  		}
 16770  	}
 16771  	for ; i < len; i = i + (2) {
 16772  		/* Load two values at once */
 16773  		in32 = *(*int32)(unsafe.Pointer((x + uintptr(i)*2)))
 16774  		nrg_tmp = ((int32(int16(in32))) * (int32(int16(in32))))
 16775  		nrg_tmp = (int32((uint32(nrg_tmp)) + (uint32(((in32) >> 16) * ((in32) >> 16)))))
 16776  		nrg = (int32((uint32(nrg)) + ((uint32(nrg_tmp)) >> (shft))))
 16777  		if nrg < 0 {
 16778  			/* Scale down */
 16779  			nrg = (int32((uint32(nrg)) >> (2)))
 16780  			shft = shft + (2)
 16781  		}
 16782  	}
 16783  	if i == len {
 16784  		/* One sample left to process */
 16785  		nrg_tmp = ((int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))) * (int32(*(*int16)(unsafe.Pointer(x + uintptr(i)*2)))))
 16786  		nrg = ((nrg) + ((nrg_tmp) >> (shft)))
 16787  	}
 16788  
 16789  	/* Make sure to have at least one extra leading zero (two leading zeros in total) */
 16790  	if (uint32(nrg) & 0xC0000000) != 0 {
 16791  		nrg = (int32((uint32(nrg)) >> (2)))
 16792  		shft = shft + (2)
 16793  	}
 16794  
 16795  	/* Output arguments */
 16796  	*(*int32)(unsafe.Pointer(shift)) = shft
 16797  	*(*int32)(unsafe.Pointer(energy)) = nrg
 16798  }
 16799  
 16800  var SKP_Silk_gain_CDF = [2][65]uint16{
 16801  	{
 16802  		uint16(0), uint16(18), uint16(45), uint16(94), uint16(181), uint16(320), uint16(519), uint16(777),
 16803  		uint16(1093), uint16(1468), uint16(1909), uint16(2417), uint16(2997), uint16(3657), uint16(4404), uint16(5245),
 16804  		uint16(6185), uint16(7228), uint16(8384), uint16(9664), uint16(11069), uint16(12596), uint16(14244), uint16(16022),
 16805  		uint16(17937), uint16(19979), uint16(22121), uint16(24345), uint16(26646), uint16(29021), uint16(31454), uint16(33927),
 16806  		uint16(36438), uint16(38982), uint16(41538), uint16(44068), uint16(46532), uint16(48904), uint16(51160), uint16(53265),
 16807  		uint16(55184), uint16(56904), uint16(58422), uint16(59739), uint16(60858), uint16(61793), uint16(62568), uint16(63210),
 16808  		uint16(63738), uint16(64165), uint16(64504), uint16(64769), uint16(64976), uint16(65133), uint16(65249), uint16(65330),
 16809  		uint16(65386), uint16(65424), uint16(65451), uint16(65471), uint16(65487), uint16(65501), uint16(65513), uint16(65524),
 16810  		uint16(65535),
 16811  	},
 16812  	{
 16813  		uint16(0), uint16(214), uint16(581), uint16(1261), uint16(2376), uint16(3920), uint16(5742), uint16(7632),
 16814  		uint16(9449), uint16(11157), uint16(12780), uint16(14352), uint16(15897), uint16(17427), uint16(18949), uint16(20462),
 16815  		uint16(21957), uint16(23430), uint16(24889), uint16(26342), uint16(27780), uint16(29191), uint16(30575), uint16(31952),
 16816  		uint16(33345), uint16(34763), uint16(36200), uint16(37642), uint16(39083), uint16(40519), uint16(41930), uint16(43291),
 16817  		uint16(44602), uint16(45885), uint16(47154), uint16(48402), uint16(49619), uint16(50805), uint16(51959), uint16(53069),
 16818  		uint16(54127), uint16(55140), uint16(56128), uint16(57101), uint16(58056), uint16(58979), uint16(59859), uint16(60692),
 16819  		uint16(61468), uint16(62177), uint16(62812), uint16(63368), uint16(63845), uint16(64242), uint16(64563), uint16(64818),
 16820  		uint16(65023), uint16(65184), uint16(65306), uint16(65391), uint16(65447), uint16(65482), uint16(65505), uint16(65521),
 16821  		uint16(65535),
 16822  	},
 16823  } /* SKP_Silk_tables_gain.c:35:18 */
 16824  
 16825  var SKP_Silk_gain_CDF_offset int32 = 32 /* SKP_Silk_tables_gain.c:61:15 */
 16826  
 16827  var SKP_Silk_delta_gain_CDF = [46]uint16{
 16828  	uint16(0), uint16(2358), uint16(3856), uint16(7023), uint16(15376), uint16(53058), uint16(59135), uint16(61555),
 16829  	uint16(62784), uint16(63498), uint16(63949), uint16(64265), uint16(64478), uint16(64647), uint16(64783), uint16(64894),
 16830  	uint16(64986), uint16(65052), uint16(65113), uint16(65169), uint16(65213), uint16(65252), uint16(65284), uint16(65314),
 16831  	uint16(65338), uint16(65359), uint16(65377), uint16(65392), uint16(65403), uint16(65415), uint16(65424), uint16(65432),
 16832  	uint16(65440), uint16(65448), uint16(65455), uint16(65462), uint16(65470), uint16(65477), uint16(65484), uint16(65491),
 16833  	uint16(65499), uint16(65506), uint16(65513), uint16(65521), uint16(65528), uint16(65535),
 16834  } /* SKP_Silk_tables_gain.c:64:18 */
 16835  
 16836  var SKP_Silk_delta_gain_CDF_offset int32 = 5 /* SKP_Silk_tables_gain.c:73:15 */
 16837  
 16838  var SKP_Silk_LTP_per_index_CDF = [4]uint16{
 16839  	uint16(0), uint16(20992), uint16(40788), uint16(65535),
 16840  } /* SKP_Silk_tables_LTP.c:30:18 */
 16841  
 16842  var SKP_Silk_LTP_per_index_CDF_offset int32 = 1 /* SKP_Silk_tables_LTP.c:34:15 */
 16843  
 16844  var SKP_Silk_LTP_gain_CDF_0 = [11]uint16{
 16845  	uint16(0), uint16(49380), uint16(54463), uint16(56494), uint16(58437), uint16(60101), uint16(61683), uint16(62985),
 16846  	uint16(64066), uint16(64823), uint16(65535),
 16847  } /* SKP_Silk_tables_LTP.c:37:18 */
 16848  
 16849  var SKP_Silk_LTP_gain_CDF_1 = [21]uint16{
 16850  	uint16(0), uint16(25290), uint16(30654), uint16(35710), uint16(40386), uint16(42937), uint16(45250), uint16(47459),
 16851  	uint16(49411), uint16(51348), uint16(52974), uint16(54517), uint16(55976), uint16(57423), uint16(58865), uint16(60285),
 16852  	uint16(61667), uint16(62895), uint16(63827), uint16(64724), uint16(65535),
 16853  } /* SKP_Silk_tables_LTP.c:42:18 */
 16854  
 16855  var SKP_Silk_LTP_gain_CDF_2 = [41]uint16{
 16856  	uint16(0), uint16(4958), uint16(9439), uint16(13581), uint16(17638), uint16(21651), uint16(25015), uint16(28025),
 16857  	uint16(30287), uint16(32406), uint16(34330), uint16(36240), uint16(38130), uint16(39790), uint16(41281), uint16(42764),
 16858  	uint16(44229), uint16(45676), uint16(47081), uint16(48431), uint16(49675), uint16(50849), uint16(51932), uint16(52966),
 16859  	uint16(53957), uint16(54936), uint16(55869), uint16(56789), uint16(57708), uint16(58504), uint16(59285), uint16(60043),
 16860  	uint16(60796), uint16(61542), uint16(62218), uint16(62871), uint16(63483), uint16(64076), uint16(64583), uint16(65062),
 16861  	uint16(65535),
 16862  } /* SKP_Silk_tables_LTP.c:48:18 */
 16863  
 16864  var SKP_Silk_LTP_gain_CDF_offsets = [3]int32{
 16865  	1, 3, 10,
 16866  } /* SKP_Silk_tables_LTP.c:57:15 */
 16867  
 16868  var SKP_Silk_LTP_gain_middle_avg_RD_Q14 int32 = 11010 /* SKP_Silk_tables_LTP.c:61:17 */
 16869  
 16870  var SKP_Silk_LTP_gain_BITS_Q6_0 = [10]int16{
 16871  	int16(26), int16(236), int16(321), int16(325), int16(339), int16(344), int16(362), int16(379),
 16872  	int16(412), int16(418),
 16873  } /* SKP_Silk_tables_LTP.c:63:17 */
 16874  
 16875  var SKP_Silk_LTP_gain_BITS_Q6_1 = [20]int16{
 16876  	int16(88), int16(231), int16(237), int16(244), int16(300), int16(309), int16(313), int16(324),
 16877  	int16(325), int16(341), int16(346), int16(351), int16(352), int16(352), int16(354), int16(356),
 16878  	int16(367), int16(393), int16(396), int16(406),
 16879  } /* SKP_Silk_tables_LTP.c:68:17 */
 16880  
 16881  var SKP_Silk_LTP_gain_BITS_Q6_2 = [40]int16{
 16882  	int16(238), int16(248), int16(255), int16(257), int16(258), int16(274), int16(284), int16(311),
 16883  	int16(317), int16(326), int16(326), int16(327), int16(339), int16(349), int16(350), int16(351),
 16884  	int16(352), int16(355), int16(358), int16(366), int16(371), int16(379), int16(383), int16(387),
 16885  	int16(388), int16(393), int16(394), int16(394), int16(407), int16(409), int16(412), int16(412),
 16886  	int16(413), int16(422), int16(426), int16(432), int16(434), int16(449), int16(454), int16(455),
 16887  } /* SKP_Silk_tables_LTP.c:74:17 */
 16888  
 16889  var SKP_Silk_LTP_gain_CDF_ptrs = [3]uintptr{
 16890  	0,
 16891  	0,
 16892  	0,
 16893  } /* SKP_Silk_tables_LTP.c:82:18 */
 16894  
 16895  var SKP_Silk_LTP_gain_BITS_Q6_ptrs = [3]uintptr{
 16896  	0,
 16897  	0,
 16898  	0,
 16899  } /* SKP_Silk_tables_LTP.c:88:17 */
 16900  
 16901  var SKP_Silk_LTP_gain_vq_0_Q14 = [10][5]int16{
 16902  	{
 16903  		int16(594), int16(984), int16(2840), int16(1021), int16(669),
 16904  	},
 16905  	{
 16906  		int16(10), int16(35), int16(304), int16(-1), int16(23),
 16907  	},
 16908  	{
 16909  		int16(-694), int16(1923), int16(4603), int16(2975), int16(2335),
 16910  	},
 16911  	{
 16912  		int16(2437), int16(3176), int16(3778), int16(1940), int16(481),
 16913  	},
 16914  	{
 16915  		int16(214), int16(-46), int16(7870), int16(4406), int16(-521),
 16916  	},
 16917  	{
 16918  		int16(-896), int16(4818), int16(8501), int16(1623), int16(-887),
 16919  	},
 16920  	{
 16921  		int16(-696), int16(3178), int16(6480), int16(-302), int16(1081),
 16922  	},
 16923  	{
 16924  		int16(517), int16(599), int16(1002), int16(567), int16(560),
 16925  	},
 16926  	{
 16927  		int16(-2075), int16(-834), int16(4712), int16(-340), int16(896),
 16928  	},
 16929  	{
 16930  		int16(1435), int16(-644), int16(3993), int16(-612), int16(-2063),
 16931  	},
 16932  } /* SKP_Silk_tables_LTP.c:94:17 */
 16933  
 16934  var SKP_Silk_LTP_gain_vq_1_Q14 = [20][5]int16{
 16935  	{
 16936  		int16(1655), int16(2918), int16(5001), int16(3010), int16(1775),
 16937  	},
 16938  	{
 16939  		int16(113), int16(198), int16(856), int16(176), int16(178),
 16940  	},
 16941  	{
 16942  		int16(-843), int16(2479), int16(7858), int16(5371), int16(574),
 16943  	},
 16944  	{
 16945  		int16(59), int16(5356), int16(7648), int16(2850), int16(-315),
 16946  	},
 16947  	{
 16948  		int16(3840), int16(4851), int16(6527), int16(1583), int16(-1233),
 16949  	},
 16950  	{
 16951  		int16(1620), int16(1760), int16(2330), int16(1876), int16(2045),
 16952  	},
 16953  	{
 16954  		int16(-545), int16(1854), int16(11792), int16(1547), int16(-307),
 16955  	},
 16956  	{
 16957  		int16(-604), int16(689), int16(5369), int16(5074), int16(4265),
 16958  	},
 16959  	{
 16960  		int16(521), int16(-1331), int16(9829), int16(6209), int16(-1211),
 16961  	},
 16962  	{
 16963  		int16(-1315), int16(6747), int16(9929), int16(-1410), int16(546),
 16964  	},
 16965  	{
 16966  		int16(117), int16(-144), int16(2810), int16(1649), int16(5240),
 16967  	},
 16968  	{
 16969  		int16(5392), int16(3476), int16(2425), int16(-38), int16(633),
 16970  	},
 16971  	{
 16972  		int16(14), int16(-449), int16(5274), int16(3547), int16(-171),
 16973  	},
 16974  	{
 16975  		int16(-98), int16(395), int16(9114), int16(1676), int16(844),
 16976  	},
 16977  	{
 16978  		int16(-908), int16(3843), int16(8861), int16(-957), int16(1474),
 16979  	},
 16980  	{
 16981  		int16(396), int16(6747), int16(5379), int16(-329), int16(1269),
 16982  	},
 16983  	{
 16984  		int16(-335), int16(2830), int16(4281), int16(270), int16(-54),
 16985  	},
 16986  	{
 16987  		int16(1502), int16(5609), int16(8958), int16(6045), int16(2059),
 16988  	},
 16989  	{
 16990  		int16(-370), int16(479), int16(5267), int16(5726), int16(1174),
 16991  	},
 16992  	{
 16993  		int16(5237), int16(-1144), int16(6510), int16(455), int16(512),
 16994  	},
 16995  } /* SKP_Silk_tables_LTP.c:128:17 */
 16996  
 16997  var SKP_Silk_LTP_gain_vq_2_Q14 = [40][5]int16{
 16998  	{
 16999  		int16(-278), int16(415), int16(9345), int16(7106), int16(-431),
 17000  	},
 17001  	{
 17002  		int16(-1006), int16(3863), int16(9524), int16(4724), int16(-871),
 17003  	},
 17004  	{
 17005  		int16(-954), int16(4624), int16(11722), int16(973), int16(-300),
 17006  	},
 17007  	{
 17008  		int16(-117), int16(7066), int16(8331), int16(1959), int16(-901),
 17009  	},
 17010  	{
 17011  		int16(593), int16(3412), int16(6070), int16(4914), int16(1567),
 17012  	},
 17013  	{
 17014  		int16(54), int16(-51), int16(12618), int16(4228), int16(-844),
 17015  	},
 17016  	{
 17017  		int16(3157), int16(4822), int16(5229), int16(2313), int16(717),
 17018  	},
 17019  	{
 17020  		int16(-244), int16(1161), int16(14198), int16(779), int16(69),
 17021  	},
 17022  	{
 17023  		int16(-1218), int16(5603), int16(12894), int16(-2301), int16(1001),
 17024  	},
 17025  	{
 17026  		int16(-132), int16(3960), int16(9526), int16(577), int16(1806),
 17027  	},
 17028  	{
 17029  		int16(-1633), int16(8815), int16(10484), int16(-2452), int16(895),
 17030  	},
 17031  	{
 17032  		int16(235), int16(450), int16(1243), int16(667), int16(437),
 17033  	},
 17034  	{
 17035  		int16(959), int16(-2630), int16(10897), int16(8772), int16(-1852),
 17036  	},
 17037  	{
 17038  		int16(2420), int16(2046), int16(8893), int16(4427), int16(-1569),
 17039  	},
 17040  	{
 17041  		int16(23), int16(7091), int16(8356), int16(-1285), int16(1508),
 17042  	},
 17043  	{
 17044  		int16(-1133), int16(835), int16(7662), int16(6043), int16(2800),
 17045  	},
 17046  	{
 17047  		int16(439), int16(391), int16(11016), int16(2253), int16(1362),
 17048  	},
 17049  	{
 17050  		int16(-1020), int16(2876), int16(13436), int16(4015), int16(-3020),
 17051  	},
 17052  	{
 17053  		int16(1060), int16(-2690), int16(13512), int16(5565), int16(-1394),
 17054  	},
 17055  	{
 17056  		int16(-1420), int16(8007), int16(11421), int16(-152), int16(-1672),
 17057  	},
 17058  	{
 17059  		int16(-893), int16(2895), int16(15434), int16(-1490), int16(159),
 17060  	},
 17061  	{
 17062  		int16(-1054), int16(428), int16(12208), int16(8538), int16(-3344),
 17063  	},
 17064  	{
 17065  		int16(1772), int16(-1304), int16(7593), int16(6185), int16(561),
 17066  	},
 17067  	{
 17068  		int16(525), int16(-1207), int16(6659), int16(11151), int16(-1170),
 17069  	},
 17070  	{
 17071  		int16(439), int16(2667), int16(4743), int16(2359), int16(5515),
 17072  	},
 17073  	{
 17074  		int16(2951), int16(7432), int16(7909), int16(-230), int16(-1564),
 17075  	},
 17076  	{
 17077  		int16(-72), int16(2140), int16(5477), int16(1391), int16(1580),
 17078  	},
 17079  	{
 17080  		int16(476), int16(-1312), int16(15912), int16(2174), int16(-1027),
 17081  	},
 17082  	{
 17083  		int16(5737), int16(441), int16(2493), int16(2043), int16(2757),
 17084  	},
 17085  	{
 17086  		int16(228), int16(-43), int16(1803), int16(6663), int16(7064),
 17087  	},
 17088  	{
 17089  		int16(4596), int16(9182), int16(1917), int16(-200), int16(203),
 17090  	},
 17091  	{
 17092  		int16(-704), int16(12039), int16(5451), int16(-1188), int16(542),
 17093  	},
 17094  	{
 17095  		int16(1782), int16(-1040), int16(10078), int16(7513), int16(-2767),
 17096  	},
 17097  	{
 17098  		int16(-2626), int16(7747), int16(9019), int16(62), int16(1710),
 17099  	},
 17100  	{
 17101  		int16(235), int16(-233), int16(2954), int16(10921), int16(1947),
 17102  	},
 17103  	{
 17104  		int16(10854), int16(2814), int16(1232), int16(-111), int16(222),
 17105  	},
 17106  	{
 17107  		int16(2267), int16(2778), int16(12325), int16(156), int16(-1658),
 17108  	},
 17109  	{
 17110  		int16(-2950), int16(8095), int16(16330), int16(268), int16(-3626),
 17111  	},
 17112  	{
 17113  		int16(67), int16(2083), int16(7950), int16(-80), int16(-2432),
 17114  	},
 17115  	{
 17116  		int16(518), int16(-66), int16(1718), int16(415), int16(11435),
 17117  	},
 17118  } /* SKP_Silk_tables_LTP.c:192:17 */
 17119  
 17120  var SKP_Silk_LTP_vq_ptrs_Q14 = [3]uintptr{
 17121  	0,
 17122  	0,
 17123  	0,
 17124  } /* SKP_Silk_tables_LTP.c:316:17 */
 17125  
 17126  var SKP_Silk_LTP_vq_sizes = [3]int32{
 17127  	10, 20, 40,
 17128  } /* SKP_Silk_tables_LTP.c:322:15 */
 17129  
 17130  var SKP_Silk_NLSF_MSVQ_CB0_10_CDF = [126]uint16{
 17131  	uint16(0),
 17132  	uint16(2658),
 17133  	uint16(4420),
 17134  	uint16(6107),
 17135  	uint16(7757),
 17136  	uint16(9408),
 17137  	uint16(10955),
 17138  	uint16(12502),
 17139  	uint16(13983),
 17140  	uint16(15432),
 17141  	uint16(16882),
 17142  	uint16(18331),
 17143  	uint16(19750),
 17144  	uint16(21108),
 17145  	uint16(22409),
 17146  	uint16(23709),
 17147  	uint16(25010),
 17148  	uint16(26256),
 17149  	uint16(27501),
 17150  	uint16(28747),
 17151  	uint16(29965),
 17152  	uint16(31158),
 17153  	uint16(32351),
 17154  	uint16(33544),
 17155  	uint16(34736),
 17156  	uint16(35904),
 17157  	uint16(36997),
 17158  	uint16(38091),
 17159  	uint16(39185),
 17160  	uint16(40232),
 17161  	uint16(41280),
 17162  	uint16(42327),
 17163  	uint16(43308),
 17164  	uint16(44290),
 17165  	uint16(45271),
 17166  	uint16(46232),
 17167  	uint16(47192),
 17168  	uint16(48132),
 17169  	uint16(49032),
 17170  	uint16(49913),
 17171  	uint16(50775),
 17172  	uint16(51618),
 17173  	uint16(52462),
 17174  	uint16(53287),
 17175  	uint16(54095),
 17176  	uint16(54885),
 17177  	uint16(55675),
 17178  	uint16(56449),
 17179  	uint16(57222),
 17180  	uint16(57979),
 17181  	uint16(58688),
 17182  	uint16(59382),
 17183  	uint16(60076),
 17184  	uint16(60726),
 17185  	uint16(61363),
 17186  	uint16(61946),
 17187  	uint16(62505),
 17188  	uint16(63052),
 17189  	uint16(63543),
 17190  	uint16(63983),
 17191  	uint16(64396),
 17192  	uint16(64766),
 17193  	uint16(65023),
 17194  	uint16(65279),
 17195  	uint16(65535),
 17196  	uint16(0),
 17197  	uint16(4977),
 17198  	uint16(9542),
 17199  	uint16(14106),
 17200  	uint16(18671),
 17201  	uint16(23041),
 17202  	uint16(27319),
 17203  	uint16(31596),
 17204  	uint16(35873),
 17205  	uint16(39969),
 17206  	uint16(43891),
 17207  	uint16(47813),
 17208  	uint16(51652),
 17209  	uint16(55490),
 17210  	uint16(59009),
 17211  	uint16(62307),
 17212  	uint16(65535),
 17213  	uint16(0),
 17214  	uint16(8571),
 17215  	uint16(17142),
 17216  	uint16(25529),
 17217  	uint16(33917),
 17218  	uint16(42124),
 17219  	uint16(49984),
 17220  	uint16(57844),
 17221  	uint16(65535),
 17222  	uint16(0),
 17223  	uint16(8732),
 17224  	uint16(17463),
 17225  	uint16(25825),
 17226  	uint16(34007),
 17227  	uint16(42189),
 17228  	uint16(50196),
 17229  	uint16(58032),
 17230  	uint16(65535),
 17231  	uint16(0),
 17232  	uint16(8948),
 17233  	uint16(17704),
 17234  	uint16(25733),
 17235  	uint16(33762),
 17236  	uint16(41791),
 17237  	uint16(49821),
 17238  	uint16(57678),
 17239  	uint16(65535),
 17240  	uint16(0),
 17241  	uint16(4374),
 17242  	uint16(8655),
 17243  	uint16(12936),
 17244  	uint16(17125),
 17245  	uint16(21313),
 17246  	uint16(25413),
 17247  	uint16(29512),
 17248  	uint16(33611),
 17249  	uint16(37710),
 17250  	uint16(41809),
 17251  	uint16(45820),
 17252  	uint16(49832),
 17253  	uint16(53843),
 17254  	uint16(57768),
 17255  	uint16(61694),
 17256  	uint16(65535),
 17257  } /* SKP_Silk_tables_NLSF_CB0_10.c:38:18 */
 17258  
 17259  var SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr = [6]uintptr{
 17260  	0,
 17261  	0,
 17262  	0,
 17263  	0,
 17264  	0,
 17265  	0,
 17266  } /* SKP_Silk_tables_NLSF_CB0_10.c:168:18 */
 17267  
 17268  var SKP_Silk_NLSF_MSVQ_CB0_10_CDF_middle_idx = [6]int32{
 17269  	23,
 17270  	8,
 17271  	5,
 17272  	5,
 17273  	5,
 17274  	9,
 17275  } /* SKP_Silk_tables_NLSF_CB0_10.c:178:15 */
 17276  
 17277  var SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5 = [120]int16{
 17278  	int16(148), int16(167),
 17279  	int16(169), int16(170),
 17280  	int16(170), int16(173),
 17281  	int16(173), int16(175),
 17282  	int16(176), int16(176),
 17283  	int16(176), int16(177),
 17284  	int16(179), int16(181),
 17285  	int16(181), int16(181),
 17286  	int16(183), int16(183),
 17287  	int16(183), int16(184),
 17288  	int16(185), int16(185),
 17289  	int16(185), int16(185),
 17290  	int16(186), int16(189),
 17291  	int16(189), int16(189),
 17292  	int16(191), int16(191),
 17293  	int16(191), int16(194),
 17294  	int16(194), int16(194),
 17295  	int16(195), int16(195),
 17296  	int16(196), int16(198),
 17297  	int16(199), int16(200),
 17298  	int16(201), int16(201),
 17299  	int16(202), int16(203),
 17300  	int16(204), int16(204),
 17301  	int16(205), int16(205),
 17302  	int16(206), int16(209),
 17303  	int16(210), int16(210),
 17304  	int16(213), int16(214),
 17305  	int16(218), int16(220),
 17306  	int16(221), int16(226),
 17307  	int16(231), int16(234),
 17308  	int16(239), int16(256),
 17309  	int16(256), int16(256),
 17310  	int16(119), int16(123),
 17311  	int16(123), int16(123),
 17312  	int16(125), int16(126),
 17313  	int16(126), int16(126),
 17314  	int16(128), int16(130),
 17315  	int16(130), int16(131),
 17316  	int16(131), int16(135),
 17317  	int16(138), int16(139),
 17318  	int16(94), int16(94),
 17319  	int16(95), int16(95),
 17320  	int16(96), int16(98),
 17321  	int16(98), int16(99),
 17322  	int16(93), int16(93),
 17323  	int16(95), int16(96),
 17324  	int16(96), int16(97),
 17325  	int16(98), int16(100),
 17326  	int16(92), int16(93),
 17327  	int16(97), int16(97),
 17328  	int16(97), int16(97),
 17329  	int16(98), int16(98),
 17330  	int16(125), int16(126),
 17331  	int16(126), int16(127),
 17332  	int16(127), int16(128),
 17333  	int16(128), int16(128),
 17334  	int16(128), int16(128),
 17335  	int16(129), int16(129),
 17336  	int16(129), int16(130),
 17337  	int16(130), int16(131),
 17338  } /* SKP_Silk_tables_NLSF_CB0_10.c:188:17 */
 17339  
 17340  var SKP_Silk_NLSF_MSVQ_CB0_10_ndelta_min_Q15 = [11]int32{
 17341  	563,
 17342  	3,
 17343  	22,
 17344  	20,
 17345  	3,
 17346  	3,
 17347  	132,
 17348  	119,
 17349  	358,
 17350  	86,
 17351  	964,
 17352  } /* SKP_Silk_tables_NLSF_CB0_10.c:252:15 */
 17353  
 17354  var SKP_Silk_NLSF_MSVQ_CB0_10_Q15 = [1200]int16{
 17355  	int16(2210), int16(4023),
 17356  	int16(6981), int16(9260),
 17357  	int16(12573), int16(15687),
 17358  	int16(19207), int16(22383),
 17359  	int16(25981), int16(29142),
 17360  	int16(3285), int16(4172),
 17361  	int16(6116), int16(10856),
 17362  	int16(15289), int16(16826),
 17363  	int16(19701), int16(22010),
 17364  	int16(24721), int16(29313),
 17365  	int16(1554), int16(2511),
 17366  	int16(6577), int16(10337),
 17367  	int16(13837), int16(16511),
 17368  	int16(20086), int16(23214),
 17369  	int16(26480), int16(29464),
 17370  	int16(3062), int16(4017),
 17371  	int16(5771), int16(10037),
 17372  	int16(13365), int16(14952),
 17373  	int16(20140), int16(22891),
 17374  	int16(25229), int16(29603),
 17375  	int16(2085), int16(3457),
 17376  	int16(5934), int16(8718),
 17377  	int16(11501), int16(13670),
 17378  	int16(17997), int16(21817),
 17379  	int16(24935), int16(28745),
 17380  	int16(2776), int16(4093),
 17381  	int16(6421), int16(10413),
 17382  	int16(15111), int16(16806),
 17383  	int16(20825), int16(23826),
 17384  	int16(26308), int16(29411),
 17385  	int16(2717), int16(4034),
 17386  	int16(5697), int16(8463),
 17387  	int16(14301), int16(16354),
 17388  	int16(19007), int16(23413),
 17389  	int16(25812), int16(28506),
 17390  	int16(2872), int16(3702),
 17391  	int16(5881), int16(11034),
 17392  	int16(17141), int16(18879),
 17393  	int16(21146), int16(23451),
 17394  	int16(25817), int16(29600),
 17395  	int16(2999), int16(4015),
 17396  	int16(7357), int16(11219),
 17397  	int16(12866), int16(17307),
 17398  	int16(20081), int16(22644),
 17399  	int16(26774), int16(29107),
 17400  	int16(2942), int16(3866),
 17401  	int16(5918), int16(11915),
 17402  	int16(13909), int16(16072),
 17403  	int16(20453), int16(22279),
 17404  	int16(27310), int16(29826),
 17405  	int16(2271), int16(3527),
 17406  	int16(6606), int16(9729),
 17407  	int16(12943), int16(17382),
 17408  	int16(20224), int16(22345),
 17409  	int16(24602), int16(28290),
 17410  	int16(2207), int16(3310),
 17411  	int16(5844), int16(9339),
 17412  	int16(11141), int16(15651),
 17413  	int16(18576), int16(21177),
 17414  	int16(25551), int16(28228),
 17415  	int16(3963), int16(4975),
 17416  	int16(6901), int16(11588),
 17417  	int16(13466), int16(15577),
 17418  	int16(19231), int16(21368),
 17419  	int16(25510), int16(27759),
 17420  	int16(2749), int16(3549),
 17421  	int16(6966), int16(13808),
 17422  	int16(15653), int16(17645),
 17423  	int16(20090), int16(22599),
 17424  	int16(26467), int16(28537),
 17425  	int16(2126), int16(3504),
 17426  	int16(5109), int16(9954),
 17427  	int16(12550), int16(14620),
 17428  	int16(19703), int16(21687),
 17429  	int16(26457), int16(29106),
 17430  	int16(3966), int16(5745),
 17431  	int16(7442), int16(9757),
 17432  	int16(14468), int16(16404),
 17433  	int16(19135), int16(23048),
 17434  	int16(25375), int16(28391),
 17435  	int16(3197), int16(4751),
 17436  	int16(6451), int16(9298),
 17437  	int16(13038), int16(14874),
 17438  	int16(17962), int16(20627),
 17439  	int16(23835), int16(28464),
 17440  	int16(3195), int16(4081),
 17441  	int16(6499), int16(12252),
 17442  	int16(14289), int16(16040),
 17443  	int16(18357), int16(20730),
 17444  	int16(26980), int16(29309),
 17445  	int16(1533), int16(2471),
 17446  	int16(4486), int16(7796),
 17447  	int16(12332), int16(15758),
 17448  	int16(19567), int16(22298),
 17449  	int16(25673), int16(29051),
 17450  	int16(2002), int16(2971),
 17451  	int16(4985), int16(8083),
 17452  	int16(13181), int16(15435),
 17453  	int16(18237), int16(21517),
 17454  	int16(24595), int16(28351),
 17455  	int16(3808), int16(4925),
 17456  	int16(6710), int16(10201),
 17457  	int16(12011), int16(14300),
 17458  	int16(18457), int16(20391),
 17459  	int16(26525), int16(28956),
 17460  	int16(2281), int16(3418),
 17461  	int16(4979), int16(8726),
 17462  	int16(15964), int16(18104),
 17463  	int16(20250), int16(22771),
 17464  	int16(25286), int16(28954),
 17465  	int16(3051), int16(5479),
 17466  	int16(7290), int16(9848),
 17467  	int16(12744), int16(14503),
 17468  	int16(18665), int16(23684),
 17469  	int16(26065), int16(28947),
 17470  	int16(2364), int16(3565),
 17471  	int16(5502), int16(9621),
 17472  	int16(14922), int16(16621),
 17473  	int16(19005), int16(20996),
 17474  	int16(26310), int16(29302),
 17475  	int16(4093), int16(5212),
 17476  	int16(6833), int16(9880),
 17477  	int16(16303), int16(18286),
 17478  	int16(20571), int16(23614),
 17479  	int16(26067), int16(29128),
 17480  	int16(2941), int16(3996),
 17481  	int16(6038), int16(10638),
 17482  	int16(12668), int16(14451),
 17483  	int16(16798), int16(19392),
 17484  	int16(26051), int16(28517),
 17485  	int16(3863), int16(5212),
 17486  	int16(7019), int16(9468),
 17487  	int16(11039), int16(13214),
 17488  	int16(19942), int16(22344),
 17489  	int16(25126), int16(29539),
 17490  	int16(4615), int16(6172),
 17491  	int16(7853), int16(10252),
 17492  	int16(12611), int16(14445),
 17493  	int16(19719), int16(22441),
 17494  	int16(24922), int16(29341),
 17495  	int16(3566), int16(4512),
 17496  	int16(6985), int16(8684),
 17497  	int16(10544), int16(16097),
 17498  	int16(18058), int16(22475),
 17499  	int16(26066), int16(28167),
 17500  	int16(4481), int16(5489),
 17501  	int16(7432), int16(11414),
 17502  	int16(13191), int16(15225),
 17503  	int16(20161), int16(22258),
 17504  	int16(26484), int16(29716),
 17505  	int16(3320), int16(4320),
 17506  	int16(6621), int16(9867),
 17507  	int16(11581), int16(14034),
 17508  	int16(21168), int16(23210),
 17509  	int16(26588), int16(29903),
 17510  	int16(3794), int16(4689),
 17511  	int16(6916), int16(8655),
 17512  	int16(10143), int16(16144),
 17513  	int16(19568), int16(21588),
 17514  	int16(27557), int16(29593),
 17515  	int16(2446), int16(3276),
 17516  	int16(5918), int16(12643),
 17517  	int16(16601), int16(18013),
 17518  	int16(21126), int16(23175),
 17519  	int16(27300), int16(29634),
 17520  	int16(2450), int16(3522),
 17521  	int16(5437), int16(8560),
 17522  	int16(15285), int16(19911),
 17523  	int16(21826), int16(24097),
 17524  	int16(26567), int16(29078),
 17525  	int16(2580), int16(3796),
 17526  	int16(5580), int16(8338),
 17527  	int16(9969), int16(12675),
 17528  	int16(18907), int16(22753),
 17529  	int16(25450), int16(29292),
 17530  	int16(3325), int16(4312),
 17531  	int16(6241), int16(7709),
 17532  	int16(9164), int16(14452),
 17533  	int16(21665), int16(23797),
 17534  	int16(27096), int16(29857),
 17535  	int16(3338), int16(4163),
 17536  	int16(7738), int16(11114),
 17537  	int16(12668), int16(14753),
 17538  	int16(16931), int16(22736),
 17539  	int16(25671), int16(28093),
 17540  	int16(3840), int16(4755),
 17541  	int16(7755), int16(13471),
 17542  	int16(15338), int16(17180),
 17543  	int16(20077), int16(22353),
 17544  	int16(27181), int16(29743),
 17545  	int16(2504), int16(4079),
 17546  	int16(8351), int16(12118),
 17547  	int16(15046), int16(18595),
 17548  	int16(21684), int16(24704),
 17549  	int16(27519), int16(29937),
 17550  	int16(5234), int16(6342),
 17551  	int16(8267), int16(11821),
 17552  	int16(15155), int16(16760),
 17553  	int16(20667), int16(23488),
 17554  	int16(25949), int16(29307),
 17555  	int16(2681), int16(3562),
 17556  	int16(6028), int16(10827),
 17557  	int16(18458), int16(20458),
 17558  	int16(22303), int16(24701),
 17559  	int16(26912), int16(29956),
 17560  	int16(3374), int16(4528),
 17561  	int16(6230), int16(8256),
 17562  	int16(9513), int16(12730),
 17563  	int16(18666), int16(20720),
 17564  	int16(26007), int16(28425),
 17565  	int16(2731), int16(3629),
 17566  	int16(8320), int16(12450),
 17567  	int16(14112), int16(16431),
 17568  	int16(18548), int16(22098),
 17569  	int16(25329), int16(27718),
 17570  	int16(3481), int16(4401),
 17571  	int16(7321), int16(9319),
 17572  	int16(11062), int16(13093),
 17573  	int16(15121), int16(22315),
 17574  	int16(26331), int16(28740),
 17575  	int16(3577), int16(4945),
 17576  	int16(6669), int16(8792),
 17577  	int16(10299), int16(12645),
 17578  	int16(19505), int16(24766),
 17579  	int16(26996), int16(29634),
 17580  	int16(4058), int16(5060),
 17581  	int16(7288), int16(10190),
 17582  	int16(11724), int16(13936),
 17583  	int16(15849), int16(18539),
 17584  	int16(26701), int16(29845),
 17585  	int16(4262), int16(5390),
 17586  	int16(7057), int16(8982),
 17587  	int16(10187), int16(15264),
 17588  	int16(20480), int16(22340),
 17589  	int16(25958), int16(28072),
 17590  	int16(3404), int16(4329),
 17591  	int16(6629), int16(7946),
 17592  	int16(10121), int16(17165),
 17593  	int16(19640), int16(22244),
 17594  	int16(25062), int16(27472),
 17595  	int16(3157), int16(4168),
 17596  	int16(6195), int16(9319),
 17597  	int16(10771), int16(13325),
 17598  	int16(15416), int16(19816),
 17599  	int16(24672), int16(27634),
 17600  	int16(2503), int16(3473),
 17601  	int16(5130), int16(6767),
 17602  	int16(8571), int16(14902),
 17603  	int16(19033), int16(21926),
 17604  	int16(26065), int16(28728),
 17605  	int16(4133), int16(5102),
 17606  	int16(7553), int16(10054),
 17607  	int16(11757), int16(14924),
 17608  	int16(17435), int16(20186),
 17609  	int16(23987), int16(26272),
 17610  	int16(4972), int16(6139),
 17611  	int16(7894), int16(9633),
 17612  	int16(11320), int16(14295),
 17613  	int16(21737), int16(24306),
 17614  	int16(26919), int16(29907),
 17615  	int16(2958), int16(3816),
 17616  	int16(6851), int16(9204),
 17617  	int16(10895), int16(18052),
 17618  	int16(20791), int16(23338),
 17619  	int16(27556), int16(29609),
 17620  	int16(5234), int16(6028),
 17621  	int16(8034), int16(10154),
 17622  	int16(11242), int16(14789),
 17623  	int16(18948), int16(20966),
 17624  	int16(26585), int16(29127),
 17625  	int16(5241), int16(6838),
 17626  	int16(10526), int16(12819),
 17627  	int16(14681), int16(17328),
 17628  	int16(19928), int16(22336),
 17629  	int16(26193), int16(28697),
 17630  	int16(3412), int16(4251),
 17631  	int16(5988), int16(7094),
 17632  	int16(9907), int16(18243),
 17633  	int16(21669), int16(23777),
 17634  	int16(26969), int16(29087),
 17635  	int16(2470), int16(3217),
 17636  	int16(7797), int16(15296),
 17637  	int16(17365), int16(19135),
 17638  	int16(21979), int16(24256),
 17639  	int16(27322), int16(29442),
 17640  	int16(4939), int16(5804),
 17641  	int16(8145), int16(11809),
 17642  	int16(13873), int16(15598),
 17643  	int16(17234), int16(19423),
 17644  	int16(26476), int16(29645),
 17645  	int16(5051), int16(6167),
 17646  	int16(8223), int16(9655),
 17647  	int16(12159), int16(17995),
 17648  	int16(20464), int16(22832),
 17649  	int16(26616), int16(28462),
 17650  	int16(4987), int16(5907),
 17651  	int16(9319), int16(11245),
 17652  	int16(13132), int16(15024),
 17653  	int16(17485), int16(22687),
 17654  	int16(26011), int16(28273),
 17655  	int16(5137), int16(6884),
 17656  	int16(11025), int16(14950),
 17657  	int16(17191), int16(19425),
 17658  	int16(21807), int16(24393),
 17659  	int16(26938), int16(29288),
 17660  	int16(7057), int16(7884),
 17661  	int16(9528), int16(10483),
 17662  	int16(10960), int16(14811),
 17663  	int16(19070), int16(21675),
 17664  	int16(25645), int16(28019),
 17665  	int16(6759), int16(7160),
 17666  	int16(8546), int16(11779),
 17667  	int16(12295), int16(13023),
 17668  	int16(16627), int16(21099),
 17669  	int16(24697), int16(28287),
 17670  	int16(3863), int16(9762),
 17671  	int16(11068), int16(11445),
 17672  	int16(12049), int16(13960),
 17673  	int16(18085), int16(21507),
 17674  	int16(25224), int16(28997),
 17675  	int16(397), int16(335),
 17676  	int16(651), int16(1168),
 17677  	int16(640), int16(765),
 17678  	int16(465), int16(331),
 17679  	int16(214), int16(-194),
 17680  	int16(-578), int16(-647),
 17681  	int16(-657), int16(750),
 17682  	int16(564), int16(613),
 17683  	int16(549), int16(630),
 17684  	int16(304), int16(-52),
 17685  	int16(828), int16(922),
 17686  	int16(443), int16(111),
 17687  	int16(138), int16(124),
 17688  	int16(169), int16(14),
 17689  	int16(144), int16(83),
 17690  	int16(132), int16(58),
 17691  	int16(-413), int16(-752),
 17692  	int16(869), int16(336),
 17693  	int16(385), int16(69),
 17694  	int16(56), int16(830),
 17695  	int16(-227), int16(-266),
 17696  	int16(-368), int16(-440),
 17697  	int16(-1195), int16(163),
 17698  	int16(126), int16(-228),
 17699  	int16(802), int16(156),
 17700  	int16(188), int16(120),
 17701  	int16(376), int16(59),
 17702  	int16(-358), int16(-558),
 17703  	int16(-1326), int16(-254),
 17704  	int16(-202), int16(-789),
 17705  	int16(296), int16(92),
 17706  	int16(-70), int16(-129),
 17707  	int16(-718), int16(-1135),
 17708  	int16(292), int16(-29),
 17709  	int16(-631), int16(487),
 17710  	int16(-157), int16(-153),
 17711  	int16(-279), int16(2),
 17712  	int16(-419), int16(-342),
 17713  	int16(-34), int16(-514),
 17714  	int16(-799), int16(-1571),
 17715  	int16(-687), int16(-609),
 17716  	int16(-546), int16(-130),
 17717  	int16(-215), int16(-252),
 17718  	int16(-446), int16(-574),
 17719  	int16(-1337), int16(207),
 17720  	int16(-72), int16(32),
 17721  	int16(103), int16(-642),
 17722  	int16(942), int16(733),
 17723  	int16(187), int16(29),
 17724  	int16(-211), int16(-814),
 17725  	int16(143), int16(225),
 17726  	int16(20), int16(24),
 17727  	int16(-268), int16(-377),
 17728  	int16(1623), int16(1133),
 17729  	int16(667), int16(164),
 17730  	int16(307), int16(366),
 17731  	int16(187), int16(34),
 17732  	int16(62), int16(-313),
 17733  	int16(-832), int16(-1482),
 17734  	int16(-1181), int16(483),
 17735  	int16(-42), int16(-39),
 17736  	int16(-450), int16(-1406),
 17737  	int16(-587), int16(-52),
 17738  	int16(-760), int16(334),
 17739  	int16(98), int16(-60),
 17740  	int16(-500), int16(-488),
 17741  	int16(-1058), int16(299),
 17742  	int16(131), int16(-250),
 17743  	int16(-251), int16(-703),
 17744  	int16(1037), int16(568),
 17745  	int16(-413), int16(-265),
 17746  	int16(1687), int16(573),
 17747  	int16(345), int16(323),
 17748  	int16(98), int16(61),
 17749  	int16(-102), int16(31),
 17750  	int16(135), int16(149),
 17751  	int16(617), int16(365),
 17752  	int16(-39), int16(34),
 17753  	int16(-611), int16(1201),
 17754  	int16(1421), int16(736),
 17755  	int16(-414), int16(-393),
 17756  	int16(-492), int16(-343),
 17757  	int16(-316), int16(-532),
 17758  	int16(528), int16(172),
 17759  	int16(90), int16(322),
 17760  	int16(-294), int16(-319),
 17761  	int16(-541), int16(503),
 17762  	int16(639), int16(401),
 17763  	int16(1), int16(-149),
 17764  	int16(-73), int16(-167),
 17765  	int16(150), int16(118),
 17766  	int16(308), int16(218),
 17767  	int16(121), int16(195),
 17768  	int16(-143), int16(-261),
 17769  	int16(-1013), int16(-802),
 17770  	int16(387), int16(436),
 17771  	int16(130), int16(-427),
 17772  	int16(-448), int16(-681),
 17773  	int16(123), int16(-87),
 17774  	int16(-251), int16(-113),
 17775  	int16(274), int16(310),
 17776  	int16(445), int16(501),
 17777  	int16(354), int16(272),
 17778  	int16(141), int16(-285),
 17779  	int16(569), int16(656),
 17780  	int16(37), int16(-49),
 17781  	int16(251), int16(-386),
 17782  	int16(-263), int16(1122),
 17783  	int16(604), int16(606),
 17784  	int16(336), int16(95),
 17785  	int16(34), int16(0),
 17786  	int16(85), int16(180),
 17787  	int16(207), int16(-367),
 17788  	int16(-622), int16(1070),
 17789  	int16(-6), int16(-79),
 17790  	int16(-160), int16(-92),
 17791  	int16(-137), int16(-276),
 17792  	int16(-323), int16(-371),
 17793  	int16(-696), int16(-1036),
 17794  	int16(407), int16(102),
 17795  	int16(-86), int16(-214),
 17796  	int16(-482), int16(-647),
 17797  	int16(-28), int16(-291),
 17798  	int16(-97), int16(-180),
 17799  	int16(-250), int16(-435),
 17800  	int16(-18), int16(-76),
 17801  	int16(-332), int16(410),
 17802  	int16(407), int16(168),
 17803  	int16(539), int16(411),
 17804  	int16(254), int16(111),
 17805  	int16(58), int16(-145),
 17806  	int16(200), int16(30),
 17807  	int16(187), int16(116),
 17808  	int16(131), int16(-367),
 17809  	int16(-475), int16(781),
 17810  	int16(-559), int16(561),
 17811  	int16(195), int16(-115),
 17812  	int16(8), int16(-168),
 17813  	int16(30), int16(55),
 17814  	int16(-122), int16(131),
 17815  	int16(82), int16(-5),
 17816  	int16(-273), int16(-50),
 17817  	int16(-632), int16(668),
 17818  	int16(4), int16(32),
 17819  	int16(-26), int16(-279),
 17820  	int16(315), int16(165),
 17821  	int16(197), int16(377),
 17822  	int16(155), int16(-41),
 17823  	int16(-138), int16(-324),
 17824  	int16(-109), int16(-617),
 17825  	int16(360), int16(98),
 17826  	int16(-53), int16(-319),
 17827  	int16(-114), int16(-245),
 17828  	int16(-82), int16(507),
 17829  	int16(468), int16(263),
 17830  	int16(-137), int16(-389),
 17831  	int16(652), int16(354),
 17832  	int16(-18), int16(-227),
 17833  	int16(-462), int16(-135),
 17834  	int16(317), int16(53),
 17835  	int16(-16), int16(66),
 17836  	int16(-72), int16(-126),
 17837  	int16(-356), int16(-347),
 17838  	int16(-328), int16(-72),
 17839  	int16(-337), int16(324),
 17840  	int16(152), int16(349),
 17841  	int16(169), int16(-196),
 17842  	int16(179), int16(254),
 17843  	int16(260), int16(325),
 17844  	int16(-74), int16(-80),
 17845  	int16(75), int16(-31),
 17846  	int16(270), int16(275),
 17847  	int16(87), int16(278),
 17848  	int16(-446), int16(-301),
 17849  	int16(309), int16(71),
 17850  	int16(-25), int16(-242),
 17851  	int16(516), int16(161),
 17852  	int16(-162), int16(-83),
 17853  	int16(329), int16(230),
 17854  	int16(-311), int16(-259),
 17855  	int16(177), int16(-26),
 17856  	int16(-462), int16(89),
 17857  	int16(257), int16(6),
 17858  	int16(-130), int16(-93),
 17859  	int16(-456), int16(-317),
 17860  	int16(-221), int16(-206),
 17861  	int16(-417), int16(-182),
 17862  	int16(-74), int16(234),
 17863  	int16(48), int16(261),
 17864  	int16(359), int16(231),
 17865  	int16(258), int16(85),
 17866  	int16(-282), int16(252),
 17867  	int16(-147), int16(-222),
 17868  	int16(251), int16(-207),
 17869  	int16(443), int16(123),
 17870  	int16(-417), int16(-36),
 17871  	int16(273), int16(-241),
 17872  	int16(240), int16(-112),
 17873  	int16(44), int16(-167),
 17874  	int16(126), int16(-124),
 17875  	int16(-77), int16(58),
 17876  	int16(-401), int16(333),
 17877  	int16(-118), int16(82),
 17878  	int16(126), int16(151),
 17879  	int16(-433), int16(359),
 17880  	int16(-130), int16(-102),
 17881  	int16(131), int16(-244),
 17882  	int16(86), int16(85),
 17883  	int16(-462), int16(414),
 17884  	int16(-240), int16(16),
 17885  	int16(145), int16(28),
 17886  	int16(-205), int16(-481),
 17887  	int16(373), int16(293),
 17888  	int16(-72), int16(-174),
 17889  	int16(62), int16(259),
 17890  	int16(-8), int16(-18),
 17891  	int16(362), int16(233),
 17892  	int16(185), int16(43),
 17893  	int16(278), int16(27),
 17894  	int16(193), int16(570),
 17895  	int16(-248), int16(189),
 17896  	int16(92), int16(31),
 17897  	int16(-275), int16(-3),
 17898  	int16(243), int16(176),
 17899  	int16(438), int16(209),
 17900  	int16(206), int16(-51),
 17901  	int16(79), int16(109),
 17902  	int16(168), int16(-185),
 17903  	int16(-308), int16(-68),
 17904  	int16(-618), int16(385),
 17905  	int16(-310), int16(-108),
 17906  	int16(-164), int16(165),
 17907  	int16(61), int16(-152),
 17908  	int16(-101), int16(-412),
 17909  	int16(-268), int16(-257),
 17910  	int16(-40), int16(-20),
 17911  	int16(-28), int16(-158),
 17912  	int16(-301), int16(271),
 17913  	int16(380), int16(-338),
 17914  	int16(-367), int16(-132),
 17915  	int16(64), int16(114),
 17916  	int16(-131), int16(-225),
 17917  	int16(-156), int16(-260),
 17918  	int16(-63), int16(-116),
 17919  	int16(155), int16(-586),
 17920  	int16(-202), int16(254),
 17921  	int16(-287), int16(178),
 17922  	int16(227), int16(-106),
 17923  	int16(-294), int16(164),
 17924  	int16(298), int16(-100),
 17925  	int16(185), int16(317),
 17926  	int16(193), int16(-45),
 17927  	int16(28), int16(80),
 17928  	int16(-87), int16(-433),
 17929  	int16(22), int16(-48),
 17930  	int16(48), int16(-237),
 17931  	int16(-229), int16(-139),
 17932  	int16(120), int16(-364),
 17933  	int16(268), int16(-136),
 17934  	int16(396), int16(125),
 17935  	int16(130), int16(-89),
 17936  	int16(-272), int16(118),
 17937  	int16(-256), int16(-68),
 17938  	int16(-451), int16(488),
 17939  	int16(143), int16(-165),
 17940  	int16(-48), int16(-190),
 17941  	int16(106), int16(219),
 17942  	int16(47), int16(435),
 17943  	int16(245), int16(97),
 17944  	int16(75), int16(-418),
 17945  	int16(121), int16(-187),
 17946  	int16(570), int16(-200),
 17947  	int16(-351), int16(225),
 17948  	int16(-21), int16(-217),
 17949  	int16(234), int16(-111),
 17950  	int16(194), int16(14),
 17951  	int16(242), int16(118),
 17952  	int16(140), int16(-397),
 17953  	int16(355), int16(361),
 17954  	int16(-45), int16(-195),
 17955  } /* SKP_Silk_tables_NLSF_CB0_10.c:267:17 */
 17956  
 17957  var SKP_Silk_NLSF_CB0_10_Stage_info = [6]SKP_Silk_NLSF_CBS{
 17958  	{FnVectors: 64, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17959  	{FnVectors: 16, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17960  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17961  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17962  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17963  	{FnVectors: 16, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 17964  } /* SKP_Silk_tables_NLSF_CB0_10.c:871:25 */
 17965  
 17966  var SKP_Silk_NLSF_CB0_10 = SKP_Silk_NLSF_CB_struct{
 17967  	FnStages:       6,
 17968  	FCBStages:      0,
 17969  	FNDeltaMin_Q15: 0,
 17970  	FCDF:           0,
 17971  	FStartPtr:      0,
 17972  	FMiddleIx:      0,
 17973  } /* SKP_Silk_tables_NLSF_CB0_10.c:881:31 */
 17974  
 17975  var SKP_Silk_NLSF_MSVQ_CB0_16_CDF = [226]uint16{
 17976  	uint16(0),
 17977  	uint16(1449),
 17978  	uint16(2749),
 17979  	uint16(4022),
 17980  	uint16(5267),
 17981  	uint16(6434),
 17982  	uint16(7600),
 17983  	uint16(8647),
 17984  	uint16(9695),
 17985  	uint16(10742),
 17986  	uint16(11681),
 17987  	uint16(12601),
 17988  	uint16(13444),
 17989  	uint16(14251),
 17990  	uint16(15008),
 17991  	uint16(15764),
 17992  	uint16(16521),
 17993  	uint16(17261),
 17994  	uint16(18002),
 17995  	uint16(18710),
 17996  	uint16(19419),
 17997  	uint16(20128),
 17998  	uint16(20837),
 17999  	uint16(21531),
 18000  	uint16(22225),
 18001  	uint16(22919),
 18002  	uint16(23598),
 18003  	uint16(24277),
 18004  	uint16(24956),
 18005  	uint16(25620),
 18006  	uint16(26256),
 18007  	uint16(26865),
 18008  	uint16(27475),
 18009  	uint16(28071),
 18010  	uint16(28667),
 18011  	uint16(29263),
 18012  	uint16(29859),
 18013  	uint16(30443),
 18014  	uint16(31026),
 18015  	uint16(31597),
 18016  	uint16(32168),
 18017  	uint16(32727),
 18018  	uint16(33273),
 18019  	uint16(33808),
 18020  	uint16(34332),
 18021  	uint16(34855),
 18022  	uint16(35379),
 18023  	uint16(35902),
 18024  	uint16(36415),
 18025  	uint16(36927),
 18026  	uint16(37439),
 18027  	uint16(37941),
 18028  	uint16(38442),
 18029  	uint16(38932),
 18030  	uint16(39423),
 18031  	uint16(39914),
 18032  	uint16(40404),
 18033  	uint16(40884),
 18034  	uint16(41364),
 18035  	uint16(41844),
 18036  	uint16(42324),
 18037  	uint16(42805),
 18038  	uint16(43285),
 18039  	uint16(43754),
 18040  	uint16(44224),
 18041  	uint16(44694),
 18042  	uint16(45164),
 18043  	uint16(45623),
 18044  	uint16(46083),
 18045  	uint16(46543),
 18046  	uint16(46993),
 18047  	uint16(47443),
 18048  	uint16(47892),
 18049  	uint16(48333),
 18050  	uint16(48773),
 18051  	uint16(49213),
 18052  	uint16(49653),
 18053  	uint16(50084),
 18054  	uint16(50515),
 18055  	uint16(50946),
 18056  	uint16(51377),
 18057  	uint16(51798),
 18058  	uint16(52211),
 18059  	uint16(52614),
 18060  	uint16(53018),
 18061  	uint16(53422),
 18062  	uint16(53817),
 18063  	uint16(54212),
 18064  	uint16(54607),
 18065  	uint16(55002),
 18066  	uint16(55388),
 18067  	uint16(55775),
 18068  	uint16(56162),
 18069  	uint16(56548),
 18070  	uint16(56910),
 18071  	uint16(57273),
 18072  	uint16(57635),
 18073  	uint16(57997),
 18074  	uint16(58352),
 18075  	uint16(58698),
 18076  	uint16(59038),
 18077  	uint16(59370),
 18078  	uint16(59702),
 18079  	uint16(60014),
 18080  	uint16(60325),
 18081  	uint16(60630),
 18082  	uint16(60934),
 18083  	uint16(61239),
 18084  	uint16(61537),
 18085  	uint16(61822),
 18086  	uint16(62084),
 18087  	uint16(62346),
 18088  	uint16(62602),
 18089  	uint16(62837),
 18090  	uint16(63072),
 18091  	uint16(63302),
 18092  	uint16(63517),
 18093  	uint16(63732),
 18094  	uint16(63939),
 18095  	uint16(64145),
 18096  	uint16(64342),
 18097  	uint16(64528),
 18098  	uint16(64701),
 18099  	uint16(64867),
 18100  	uint16(65023),
 18101  	uint16(65151),
 18102  	uint16(65279),
 18103  	uint16(65407),
 18104  	uint16(65535),
 18105  	uint16(0),
 18106  	uint16(5099),
 18107  	uint16(9982),
 18108  	uint16(14760),
 18109  	uint16(19538),
 18110  	uint16(24213),
 18111  	uint16(28595),
 18112  	uint16(32976),
 18113  	uint16(36994),
 18114  	uint16(41012),
 18115  	uint16(44944),
 18116  	uint16(48791),
 18117  	uint16(52557),
 18118  	uint16(56009),
 18119  	uint16(59388),
 18120  	uint16(62694),
 18121  	uint16(65535),
 18122  	uint16(0),
 18123  	uint16(9955),
 18124  	uint16(19697),
 18125  	uint16(28825),
 18126  	uint16(36842),
 18127  	uint16(44686),
 18128  	uint16(52198),
 18129  	uint16(58939),
 18130  	uint16(65535),
 18131  	uint16(0),
 18132  	uint16(8949),
 18133  	uint16(17335),
 18134  	uint16(25720),
 18135  	uint16(33926),
 18136  	uint16(41957),
 18137  	uint16(49987),
 18138  	uint16(57845),
 18139  	uint16(65535),
 18140  	uint16(0),
 18141  	uint16(9724),
 18142  	uint16(18642),
 18143  	uint16(26998),
 18144  	uint16(35355),
 18145  	uint16(43532),
 18146  	uint16(51534),
 18147  	uint16(59365),
 18148  	uint16(65535),
 18149  	uint16(0),
 18150  	uint16(8750),
 18151  	uint16(17499),
 18152  	uint16(26249),
 18153  	uint16(34448),
 18154  	uint16(42471),
 18155  	uint16(50494),
 18156  	uint16(58178),
 18157  	uint16(65535),
 18158  	uint16(0),
 18159  	uint16(8730),
 18160  	uint16(17273),
 18161  	uint16(25816),
 18162  	uint16(34176),
 18163  	uint16(42536),
 18164  	uint16(50203),
 18165  	uint16(57869),
 18166  	uint16(65535),
 18167  	uint16(0),
 18168  	uint16(8769),
 18169  	uint16(17538),
 18170  	uint16(26307),
 18171  	uint16(34525),
 18172  	uint16(42742),
 18173  	uint16(50784),
 18174  	uint16(58319),
 18175  	uint16(65535),
 18176  	uint16(0),
 18177  	uint16(8736),
 18178  	uint16(17101),
 18179  	uint16(25466),
 18180  	uint16(33653),
 18181  	uint16(41839),
 18182  	uint16(50025),
 18183  	uint16(57864),
 18184  	uint16(65535),
 18185  	uint16(0),
 18186  	uint16(4368),
 18187  	uint16(8735),
 18188  	uint16(12918),
 18189  	uint16(17100),
 18190  	uint16(21283),
 18191  	uint16(25465),
 18192  	uint16(29558),
 18193  	uint16(33651),
 18194  	uint16(37744),
 18195  	uint16(41836),
 18196  	uint16(45929),
 18197  	uint16(50022),
 18198  	uint16(54027),
 18199  	uint16(57947),
 18200  	uint16(61782),
 18201  	uint16(65535),
 18202  } /* SKP_Silk_tables_NLSF_CB0_16.c:38:18 */
 18203  
 18204  var SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr = [10]uintptr{
 18205  	0,
 18206  	0,
 18207  	0,
 18208  	0,
 18209  	0,
 18210  	0,
 18211  	0,
 18212  	0,
 18213  	0,
 18214  	0,
 18215  } /* SKP_Silk_tables_NLSF_CB0_16.c:268:18 */
 18216  
 18217  var SKP_Silk_NLSF_MSVQ_CB0_16_CDF_middle_idx = [10]int32{
 18218  	42,
 18219  	8,
 18220  	4,
 18221  	5,
 18222  	5,
 18223  	5,
 18224  	5,
 18225  	5,
 18226  	5,
 18227  	9,
 18228  } /* SKP_Silk_tables_NLSF_CB0_16.c:282:15 */
 18229  
 18230  var SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5 = [216]int16{
 18231  	int16(176), int16(181),
 18232  	int16(182), int16(183),
 18233  	int16(186), int16(186),
 18234  	int16(191), int16(191),
 18235  	int16(191), int16(196),
 18236  	int16(197), int16(201),
 18237  	int16(203), int16(206),
 18238  	int16(206), int16(206),
 18239  	int16(207), int16(207),
 18240  	int16(209), int16(209),
 18241  	int16(209), int16(209),
 18242  	int16(210), int16(210),
 18243  	int16(210), int16(211),
 18244  	int16(211), int16(211),
 18245  	int16(212), int16(214),
 18246  	int16(216), int16(216),
 18247  	int16(217), int16(217),
 18248  	int16(217), int16(217),
 18249  	int16(218), int16(218),
 18250  	int16(219), int16(219),
 18251  	int16(220), int16(221),
 18252  	int16(222), int16(223),
 18253  	int16(223), int16(223),
 18254  	int16(223), int16(224),
 18255  	int16(224), int16(224),
 18256  	int16(225), int16(225),
 18257  	int16(226), int16(226),
 18258  	int16(226), int16(226),
 18259  	int16(227), int16(227),
 18260  	int16(227), int16(227),
 18261  	int16(227), int16(227),
 18262  	int16(228), int16(228),
 18263  	int16(228), int16(228),
 18264  	int16(229), int16(229),
 18265  	int16(229), int16(230),
 18266  	int16(230), int16(230),
 18267  	int16(231), int16(231),
 18268  	int16(231), int16(231),
 18269  	int16(232), int16(232),
 18270  	int16(232), int16(232),
 18271  	int16(233), int16(234),
 18272  	int16(235), int16(235),
 18273  	int16(235), int16(236),
 18274  	int16(236), int16(236),
 18275  	int16(236), int16(237),
 18276  	int16(237), int16(237),
 18277  	int16(237), int16(240),
 18278  	int16(240), int16(240),
 18279  	int16(240), int16(241),
 18280  	int16(242), int16(243),
 18281  	int16(244), int16(244),
 18282  	int16(247), int16(247),
 18283  	int16(248), int16(248),
 18284  	int16(248), int16(249),
 18285  	int16(251), int16(255),
 18286  	int16(255), int16(256),
 18287  	int16(260), int16(260),
 18288  	int16(261), int16(264),
 18289  	int16(264), int16(266),
 18290  	int16(266), int16(268),
 18291  	int16(271), int16(274),
 18292  	int16(276), int16(279),
 18293  	int16(288), int16(288),
 18294  	int16(288), int16(288),
 18295  	int16(118), int16(120),
 18296  	int16(121), int16(121),
 18297  	int16(122), int16(125),
 18298  	int16(125), int16(129),
 18299  	int16(129), int16(130),
 18300  	int16(131), int16(132),
 18301  	int16(136), int16(137),
 18302  	int16(138), int16(145),
 18303  	int16(87), int16(88),
 18304  	int16(91), int16(97),
 18305  	int16(98), int16(100),
 18306  	int16(105), int16(106),
 18307  	int16(92), int16(95),
 18308  	int16(95), int16(96),
 18309  	int16(97), int16(97),
 18310  	int16(98), int16(99),
 18311  	int16(88), int16(92),
 18312  	int16(95), int16(95),
 18313  	int16(96), int16(97),
 18314  	int16(98), int16(109),
 18315  	int16(93), int16(93),
 18316  	int16(93), int16(96),
 18317  	int16(97), int16(97),
 18318  	int16(99), int16(101),
 18319  	int16(93), int16(94),
 18320  	int16(94), int16(95),
 18321  	int16(95), int16(99),
 18322  	int16(99), int16(99),
 18323  	int16(93), int16(93),
 18324  	int16(93), int16(96),
 18325  	int16(96), int16(97),
 18326  	int16(100), int16(102),
 18327  	int16(93), int16(95),
 18328  	int16(95), int16(96),
 18329  	int16(96), int16(96),
 18330  	int16(98), int16(99),
 18331  	int16(125), int16(125),
 18332  	int16(127), int16(127),
 18333  	int16(127), int16(127),
 18334  	int16(128), int16(128),
 18335  	int16(128), int16(128),
 18336  	int16(128), int16(128),
 18337  	int16(129), int16(130),
 18338  	int16(131), int16(132),
 18339  } /* SKP_Silk_tables_NLSF_CB0_16.c:296:17 */
 18340  
 18341  var SKP_Silk_NLSF_MSVQ_CB0_16_ndelta_min_Q15 = [17]int32{
 18342  	266,
 18343  	3,
 18344  	40,
 18345  	3,
 18346  	3,
 18347  	16,
 18348  	78,
 18349  	89,
 18350  	107,
 18351  	141,
 18352  	188,
 18353  	146,
 18354  	272,
 18355  	240,
 18356  	235,
 18357  	215,
 18358  	632,
 18359  } /* SKP_Silk_tables_NLSF_CB0_16.c:408:15 */
 18360  
 18361  var SKP_Silk_NLSF_MSVQ_CB0_16_Q15 = [3456]int16{
 18362  	int16(1170), int16(2278), int16(3658), int16(5374),
 18363  	int16(7666), int16(9113), int16(11298), int16(13304),
 18364  	int16(15371), int16(17549), int16(19587), int16(21487),
 18365  	int16(23798), int16(26038), int16(28318), int16(30201),
 18366  	int16(1628), int16(2334), int16(4115), int16(6036),
 18367  	int16(7818), int16(9544), int16(11777), int16(14021),
 18368  	int16(15787), int16(17408), int16(19466), int16(21261),
 18369  	int16(22886), int16(24565), int16(26714), int16(28059),
 18370  	int16(1724), int16(2670), int16(4056), int16(6532),
 18371  	int16(8357), int16(10119), int16(12093), int16(14061),
 18372  	int16(16491), int16(18795), int16(20417), int16(22402),
 18373  	int16(24251), int16(26224), int16(28410), int16(29956),
 18374  	int16(1493), int16(3427), int16(4789), int16(6399),
 18375  	int16(8435), int16(10168), int16(12000), int16(14066),
 18376  	int16(16229), int16(18210), int16(20040), int16(22098),
 18377  	int16(24153), int16(26095), int16(28183), int16(30121),
 18378  	int16(1119), int16(2089), int16(4295), int16(6245),
 18379  	int16(8691), int16(10741), int16(12688), int16(15057),
 18380  	int16(17028), int16(18792), int16(20717), int16(22514),
 18381  	int16(24497), int16(26548), int16(28619), int16(30630),
 18382  	int16(1363), int16(2417), int16(3927), int16(5556),
 18383  	int16(7422), int16(9315), int16(11879), int16(13767),
 18384  	int16(16143), int16(18520), int16(20458), int16(22578),
 18385  	int16(24539), int16(26436), int16(28318), int16(30318),
 18386  	int16(1122), int16(2503), int16(5216), int16(7148),
 18387  	int16(9310), int16(11078), int16(13175), int16(14800),
 18388  	int16(16864), int16(18700), int16(20436), int16(22488),
 18389  	int16(24572), int16(26602), int16(28555), int16(30426),
 18390  	int16(600), int16(1317), int16(2970), int16(5609),
 18391  	int16(7694), int16(9784), int16(12169), int16(14087),
 18392  	int16(16379), int16(18378), int16(20551), int16(22686),
 18393  	int16(24739), int16(26697), int16(28646), int16(30355),
 18394  	int16(941), int16(1882), int16(4274), int16(5540),
 18395  	int16(8482), int16(9858), int16(11940), int16(14287),
 18396  	int16(16091), int16(18501), int16(20326), int16(22612),
 18397  	int16(24711), int16(26638), int16(28814), int16(30430),
 18398  	int16(635), int16(1699), int16(4376), int16(5948),
 18399  	int16(8097), int16(10115), int16(12274), int16(14178),
 18400  	int16(16111), int16(17813), int16(19695), int16(21773),
 18401  	int16(23927), int16(25866), int16(28022), int16(30134),
 18402  	int16(1408), int16(2222), int16(3524), int16(5615),
 18403  	int16(7345), int16(8849), int16(10989), int16(12772),
 18404  	int16(15352), int16(17026), int16(18919), int16(21062),
 18405  	int16(23329), int16(25215), int16(27209), int16(29023),
 18406  	int16(701), int16(1307), int16(3548), int16(6301),
 18407  	int16(7744), int16(9574), int16(11227), int16(12978),
 18408  	int16(15170), int16(17565), int16(19775), int16(22097),
 18409  	int16(24230), int16(26335), int16(28377), int16(30231),
 18410  	int16(1752), int16(2364), int16(4879), int16(6569),
 18411  	int16(7813), int16(9796), int16(11199), int16(14290),
 18412  	int16(15795), int16(18000), int16(20396), int16(22417),
 18413  	int16(24308), int16(26124), int16(28360), int16(30633),
 18414  	int16(901), int16(1629), int16(3356), int16(4635),
 18415  	int16(7256), int16(8767), int16(9971), int16(11558),
 18416  	int16(15215), int16(17544), int16(19523), int16(21852),
 18417  	int16(23900), int16(25978), int16(28133), int16(30184),
 18418  	int16(981), int16(1669), int16(3323), int16(4693),
 18419  	int16(6213), int16(8692), int16(10614), int16(12956),
 18420  	int16(15211), int16(17711), int16(19856), int16(22122),
 18421  	int16(24344), int16(26592), int16(28723), int16(30481),
 18422  	int16(1607), int16(2577), int16(4220), int16(5512),
 18423  	int16(8532), int16(10388), int16(11627), int16(13671),
 18424  	int16(15752), int16(17199), int16(19840), int16(21859),
 18425  	int16(23494), int16(25786), int16(28091), int16(30131),
 18426  	int16(811), int16(1471), int16(3144), int16(5041),
 18427  	int16(7430), int16(9389), int16(11174), int16(13255),
 18428  	int16(15157), int16(16741), int16(19583), int16(22167),
 18429  	int16(24115), int16(26142), int16(28383), int16(30395),
 18430  	int16(1543), int16(2144), int16(3629), int16(6347),
 18431  	int16(7333), int16(9339), int16(10710), int16(13596),
 18432  	int16(15099), int16(17340), int16(20102), int16(21886),
 18433  	int16(23732), int16(25637), int16(27818), int16(29917),
 18434  	int16(492), int16(1185), int16(2940), int16(5488),
 18435  	int16(7095), int16(8751), int16(11596), int16(13579),
 18436  	int16(16045), int16(18015), int16(20178), int16(22127),
 18437  	int16(24265), int16(26406), int16(28484), int16(30357),
 18438  	int16(1547), int16(2282), int16(3693), int16(6341),
 18439  	int16(7758), int16(9607), int16(11848), int16(13236),
 18440  	int16(16564), int16(18069), int16(19759), int16(21404),
 18441  	int16(24110), int16(26606), int16(28786), int16(30655),
 18442  	int16(685), int16(1338), int16(3409), int16(5262),
 18443  	int16(6950), int16(9222), int16(11414), int16(14523),
 18444  	int16(16337), int16(17893), int16(19436), int16(21298),
 18445  	int16(23293), int16(25181), int16(27973), int16(30520),
 18446  	int16(887), int16(1581), int16(3057), int16(4318),
 18447  	int16(7192), int16(8617), int16(10047), int16(13106),
 18448  	int16(16265), int16(17893), int16(20233), int16(22350),
 18449  	int16(24379), int16(26384), int16(28314), int16(30189),
 18450  	int16(2285), int16(3745), int16(5662), int16(7576),
 18451  	int16(9323), int16(11320), int16(13239), int16(15191),
 18452  	int16(17175), int16(19225), int16(21108), int16(22972),
 18453  	int16(24821), int16(26655), int16(28561), int16(30460),
 18454  	int16(1496), int16(2108), int16(3448), int16(6898),
 18455  	int16(8328), int16(9656), int16(11252), int16(12823),
 18456  	int16(14979), int16(16482), int16(18180), int16(20085),
 18457  	int16(22962), int16(25160), int16(27705), int16(29629),
 18458  	int16(575), int16(1261), int16(3861), int16(6627),
 18459  	int16(8294), int16(10809), int16(12705), int16(14768),
 18460  	int16(17076), int16(19047), int16(20978), int16(23055),
 18461  	int16(24972), int16(26703), int16(28720), int16(30345),
 18462  	int16(1682), int16(2213), int16(3882), int16(6238),
 18463  	int16(7208), int16(9646), int16(10877), int16(13431),
 18464  	int16(14805), int16(16213), int16(17941), int16(20873),
 18465  	int16(23550), int16(25765), int16(27756), int16(29461),
 18466  	int16(888), int16(1616), int16(3924), int16(5195),
 18467  	int16(7206), int16(8647), int16(9842), int16(11473),
 18468  	int16(16067), int16(18221), int16(20343), int16(22774),
 18469  	int16(24503), int16(26412), int16(28054), int16(29731),
 18470  	int16(805), int16(1454), int16(2683), int16(4472),
 18471  	int16(7936), int16(9360), int16(11398), int16(14345),
 18472  	int16(16205), int16(17832), int16(19453), int16(21646),
 18473  	int16(23899), int16(25928), int16(28387), int16(30463),
 18474  	int16(1640), int16(2383), int16(3484), int16(5082),
 18475  	int16(6032), int16(8606), int16(11640), int16(12966),
 18476  	int16(15842), int16(17368), int16(19346), int16(21182),
 18477  	int16(23638), int16(25889), int16(28368), int16(30299),
 18478  	int16(1632), int16(2204), int16(4510), int16(7580),
 18479  	int16(8718), int16(10512), int16(11962), int16(14096),
 18480  	int16(15640), int16(17194), int16(19143), int16(22247),
 18481  	int16(24563), int16(26561), int16(28604), int16(30509),
 18482  	int16(2043), int16(2612), int16(3985), int16(6851),
 18483  	int16(8038), int16(9514), int16(10979), int16(12789),
 18484  	int16(15426), int16(16728), int16(18899), int16(20277),
 18485  	int16(22902), int16(26209), int16(28711), int16(30618),
 18486  	int16(2224), int16(2798), int16(4465), int16(5320),
 18487  	int16(7108), int16(9436), int16(10986), int16(13222),
 18488  	int16(14599), int16(18317), int16(20141), int16(21843),
 18489  	int16(23601), int16(25700), int16(28184), int16(30582),
 18490  	int16(835), int16(1541), int16(4083), int16(5769),
 18491  	int16(7386), int16(9399), int16(10971), int16(12456),
 18492  	int16(15021), int16(18642), int16(20843), int16(23100),
 18493  	int16(25292), int16(26966), int16(28952), int16(30422),
 18494  	int16(1795), int16(2343), int16(4809), int16(5896),
 18495  	int16(7178), int16(8545), int16(10223), int16(13370),
 18496  	int16(14606), int16(16469), int16(18273), int16(20736),
 18497  	int16(23645), int16(26257), int16(28224), int16(30390),
 18498  	int16(1734), int16(2254), int16(4031), int16(5188),
 18499  	int16(6506), int16(7872), int16(9651), int16(13025),
 18500  	int16(14419), int16(17305), int16(19495), int16(22190),
 18501  	int16(24403), int16(26302), int16(28195), int16(30177),
 18502  	int16(1841), int16(2349), int16(3968), int16(4764),
 18503  	int16(6376), int16(9825), int16(11048), int16(13345),
 18504  	int16(14682), int16(16252), int16(18183), int16(21363),
 18505  	int16(23918), int16(26156), int16(28031), int16(29935),
 18506  	int16(1432), int16(2047), int16(5631), int16(6927),
 18507  	int16(8198), int16(9675), int16(11358), int16(13506),
 18508  	int16(14802), int16(16419), int16(18339), int16(22019),
 18509  	int16(24124), int16(26177), int16(28130), int16(30586),
 18510  	int16(1730), int16(2320), int16(3744), int16(4808),
 18511  	int16(6007), int16(9666), int16(10997), int16(13622),
 18512  	int16(15234), int16(17495), int16(20088), int16(22002),
 18513  	int16(23603), int16(25400), int16(27379), int16(29254),
 18514  	int16(1267), int16(1915), int16(5483), int16(6812),
 18515  	int16(8229), int16(9919), int16(11589), int16(13337),
 18516  	int16(14747), int16(17965), int16(20552), int16(22167),
 18517  	int16(24519), int16(26819), int16(28883), int16(30642),
 18518  	int16(1526), int16(2229), int16(4240), int16(7388),
 18519  	int16(8953), int16(10450), int16(11899), int16(13718),
 18520  	int16(16861), int16(18323), int16(20379), int16(22672),
 18521  	int16(24797), int16(26906), int16(28906), int16(30622),
 18522  	int16(2175), int16(2791), int16(4104), int16(6875),
 18523  	int16(8612), int16(9798), int16(12152), int16(13536),
 18524  	int16(15623), int16(17682), int16(19213), int16(21060),
 18525  	int16(24382), int16(26760), int16(28633), int16(30248),
 18526  	int16(454), int16(1231), int16(4339), int16(5738),
 18527  	int16(7550), int16(9006), int16(10320), int16(13525),
 18528  	int16(16005), int16(17849), int16(20071), int16(21992),
 18529  	int16(23949), int16(26043), int16(28245), int16(30175),
 18530  	int16(2250), int16(2791), int16(4230), int16(5283),
 18531  	int16(6762), int16(10607), int16(11879), int16(13821),
 18532  	int16(15797), int16(17264), int16(20029), int16(22266),
 18533  	int16(24588), int16(26437), int16(28244), int16(30419),
 18534  	int16(1696), int16(2216), int16(4308), int16(8385),
 18535  	int16(9766), int16(11030), int16(12556), int16(14099),
 18536  	int16(16322), int16(17640), int16(19166), int16(20590),
 18537  	int16(23967), int16(26858), int16(28798), int16(30562),
 18538  	int16(2452), int16(3236), int16(4369), int16(6118),
 18539  	int16(7156), int16(9003), int16(11509), int16(12796),
 18540  	int16(15749), int16(17291), int16(19491), int16(22241),
 18541  	int16(24530), int16(26474), int16(28273), int16(30073),
 18542  	int16(1811), int16(2541), int16(3555), int16(5480),
 18543  	int16(9123), int16(10527), int16(11894), int16(13659),
 18544  	int16(15262), int16(16899), int16(19366), int16(21069),
 18545  	int16(22694), int16(24314), int16(27256), int16(29983),
 18546  	int16(1553), int16(2246), int16(4559), int16(5500),
 18547  	int16(6754), int16(7874), int16(11739), int16(13571),
 18548  	int16(15188), int16(17879), int16(20281), int16(22510),
 18549  	int16(24614), int16(26649), int16(28786), int16(30755),
 18550  	int16(1982), int16(2768), int16(3834), int16(5964),
 18551  	int16(8732), int16(9908), int16(11797), int16(14813),
 18552  	int16(16311), int16(17946), int16(21097), int16(22851),
 18553  	int16(24456), int16(26304), int16(28166), int16(29755),
 18554  	int16(1824), int16(2529), int16(3817), int16(5449),
 18555  	int16(6854), int16(8714), int16(10381), int16(12286),
 18556  	int16(14194), int16(15774), int16(19524), int16(21374),
 18557  	int16(23695), int16(26069), int16(28096), int16(30212),
 18558  	int16(2212), int16(2854), int16(3947), int16(5898),
 18559  	int16(9930), int16(11556), int16(12854), int16(14788),
 18560  	int16(16328), int16(17700), int16(20321), int16(22098),
 18561  	int16(23672), int16(25291), int16(26976), int16(28586),
 18562  	int16(2023), int16(2599), int16(4024), int16(4916),
 18563  	int16(6613), int16(11149), int16(12457), int16(14626),
 18564  	int16(16320), int16(17822), int16(19673), int16(21172),
 18565  	int16(23115), int16(26051), int16(28825), int16(30758),
 18566  	int16(1628), int16(2206), int16(3467), int16(4364),
 18567  	int16(8679), int16(10173), int16(11864), int16(13679),
 18568  	int16(14998), int16(16938), int16(19207), int16(21364),
 18569  	int16(23850), int16(26115), int16(28124), int16(30273),
 18570  	int16(2014), int16(2603), int16(4114), int16(7254),
 18571  	int16(8516), int16(10043), int16(11822), int16(13503),
 18572  	int16(16329), int16(17826), int16(19697), int16(21280),
 18573  	int16(23151), int16(24661), int16(26807), int16(30161),
 18574  	int16(2376), int16(2980), int16(4422), int16(5770),
 18575  	int16(7016), int16(9723), int16(11125), int16(13516),
 18576  	int16(15485), int16(16985), int16(19160), int16(20587),
 18577  	int16(24401), int16(27180), int16(29046), int16(30647),
 18578  	int16(2454), int16(3502), int16(4624), int16(6019),
 18579  	int16(7632), int16(8849), int16(10792), int16(13964),
 18580  	int16(15523), int16(17085), int16(19611), int16(21238),
 18581  	int16(22856), int16(25108), int16(28106), int16(29890),
 18582  	int16(1573), int16(2274), int16(3308), int16(5999),
 18583  	int16(8977), int16(10104), int16(12457), int16(14258),
 18584  	int16(15749), int16(18180), int16(19974), int16(21253),
 18585  	int16(23045), int16(25058), int16(27741), int16(30315),
 18586  	int16(1943), int16(2730), int16(4140), int16(6160),
 18587  	int16(7491), int16(8986), int16(11309), int16(12775),
 18588  	int16(14820), int16(16558), int16(17909), int16(19757),
 18589  	int16(21512), int16(23605), int16(27274), int16(29527),
 18590  	int16(2021), int16(2582), int16(4494), int16(5835),
 18591  	int16(6993), int16(8245), int16(9827), int16(14733),
 18592  	int16(16462), int16(17894), int16(19647), int16(21083),
 18593  	int16(23764), int16(26667), int16(29072), int16(30990),
 18594  	int16(1052), int16(1775), int16(3218), int16(4378),
 18595  	int16(7666), int16(9403), int16(11248), int16(13327),
 18596  	int16(14972), int16(17962), int16(20758), int16(22354),
 18597  	int16(25071), int16(27209), int16(29001), int16(30609),
 18598  	int16(2218), int16(2866), int16(4223), int16(5352),
 18599  	int16(6581), int16(9980), int16(11587), int16(13121),
 18600  	int16(15193), int16(16583), int16(18386), int16(20080),
 18601  	int16(22013), int16(25317), int16(28127), int16(29880),
 18602  	int16(2146), int16(2840), int16(4397), int16(5840),
 18603  	int16(7449), int16(8721), int16(10512), int16(11936),
 18604  	int16(13595), int16(17253), int16(19310), int16(20891),
 18605  	int16(23417), int16(25627), int16(27749), int16(30231),
 18606  	int16(1972), int16(2619), int16(3756), int16(6367),
 18607  	int16(7641), int16(8814), int16(12286), int16(13768),
 18608  	int16(15309), int16(18036), int16(19557), int16(20904),
 18609  	int16(22582), int16(24876), int16(27800), int16(30440),
 18610  	int16(2005), int16(2577), int16(4272), int16(7373),
 18611  	int16(8558), int16(10223), int16(11770), int16(13402),
 18612  	int16(16502), int16(18000), int16(19645), int16(21104),
 18613  	int16(22990), int16(26806), int16(29505), int16(30942),
 18614  	int16(1153), int16(1822), int16(3724), int16(5443),
 18615  	int16(6990), int16(8702), int16(10289), int16(11899),
 18616  	int16(13856), int16(15315), int16(17601), int16(21064),
 18617  	int16(23692), int16(26083), int16(28586), int16(30639),
 18618  	int16(1304), int16(1869), int16(3318), int16(7195),
 18619  	int16(9613), int16(10733), int16(12393), int16(13728),
 18620  	int16(15822), int16(17474), int16(18882), int16(20692),
 18621  	int16(23114), int16(25540), int16(27684), int16(29244),
 18622  	int16(2093), int16(2691), int16(4018), int16(6658),
 18623  	int16(7947), int16(9147), int16(10497), int16(11881),
 18624  	int16(15888), int16(17821), int16(19333), int16(21233),
 18625  	int16(23371), int16(25234), int16(27553), int16(29998),
 18626  	int16(575), int16(1331), int16(5304), int16(6910),
 18627  	int16(8425), int16(10086), int16(11577), int16(13498),
 18628  	int16(16444), int16(18527), int16(20565), int16(22847),
 18629  	int16(24914), int16(26692), int16(28759), int16(30157),
 18630  	int16(1435), int16(2024), int16(3283), int16(4156),
 18631  	int16(7611), int16(10592), int16(12049), int16(13927),
 18632  	int16(15459), int16(18413), int16(20495), int16(22270),
 18633  	int16(24222), int16(26093), int16(28065), int16(30099),
 18634  	int16(1632), int16(2168), int16(5540), int16(7478),
 18635  	int16(8630), int16(10391), int16(11644), int16(14321),
 18636  	int16(15741), int16(17357), int16(18756), int16(20434),
 18637  	int16(22799), int16(26060), int16(28542), int16(30696),
 18638  	int16(1407), int16(2245), int16(3405), int16(5639),
 18639  	int16(9419), int16(10685), int16(12104), int16(13495),
 18640  	int16(15535), int16(18357), int16(19996), int16(21689),
 18641  	int16(24351), int16(26550), int16(28853), int16(30564),
 18642  	int16(1675), int16(2226), int16(4005), int16(8223),
 18643  	int16(9975), int16(11155), int16(12822), int16(14316),
 18644  	int16(16504), int16(18137), int16(19574), int16(21050),
 18645  	int16(22759), int16(24912), int16(28296), int16(30634),
 18646  	int16(1080), int16(1614), int16(3622), int16(7565),
 18647  	int16(8748), int16(10303), int16(11713), int16(13848),
 18648  	int16(15633), int16(17434), int16(19761), int16(21825),
 18649  	int16(23571), int16(25393), int16(27406), int16(29063),
 18650  	int16(1693), int16(2229), int16(3456), int16(4354),
 18651  	int16(5670), int16(10890), int16(12563), int16(14167),
 18652  	int16(15879), int16(17377), int16(19817), int16(21971),
 18653  	int16(24094), int16(26131), int16(28298), int16(30099),
 18654  	int16(2042), int16(2959), int16(4195), int16(5740),
 18655  	int16(7106), int16(8267), int16(11126), int16(14973),
 18656  	int16(16914), int16(18295), int16(20532), int16(21982),
 18657  	int16(23711), int16(25769), int16(27609), int16(29351),
 18658  	int16(984), int16(1612), int16(3808), int16(5265),
 18659  	int16(6885), int16(8411), int16(9547), int16(10889),
 18660  	int16(12522), int16(16520), int16(19549), int16(21639),
 18661  	int16(23746), int16(26058), int16(28310), int16(30374),
 18662  	int16(2036), int16(2538), int16(4166), int16(7761),
 18663  	int16(9146), int16(10412), int16(12144), int16(13609),
 18664  	int16(15588), int16(17169), int16(18559), int16(20113),
 18665  	int16(21820), int16(24313), int16(28029), int16(30612),
 18666  	int16(1871), int16(2355), int16(4061), int16(5143),
 18667  	int16(7464), int16(10129), int16(11941), int16(15001),
 18668  	int16(16680), int16(18354), int16(19957), int16(22279),
 18669  	int16(24861), int16(26872), int16(28988), int16(30615),
 18670  	int16(2566), int16(3161), int16(4643), int16(6227),
 18671  	int16(7406), int16(9970), int16(11618), int16(13416),
 18672  	int16(15889), int16(17364), int16(19121), int16(20817),
 18673  	int16(22592), int16(24720), int16(28733), int16(31082),
 18674  	int16(1700), int16(2327), int16(4828), int16(5939),
 18675  	int16(7567), int16(9154), int16(11087), int16(12771),
 18676  	int16(14209), int16(16121), int16(20222), int16(22671),
 18677  	int16(24648), int16(26656), int16(28696), int16(30745),
 18678  	int16(3169), int16(3873), int16(5046), int16(6868),
 18679  	int16(8184), int16(9480), int16(12335), int16(14068),
 18680  	int16(15774), int16(17971), int16(20231), int16(21711),
 18681  	int16(23520), int16(25245), int16(27026), int16(28730),
 18682  	int16(1564), int16(2391), int16(4229), int16(6730),
 18683  	int16(8905), int16(10459), int16(13026), int16(15033),
 18684  	int16(17265), int16(19809), int16(21849), int16(23741),
 18685  	int16(25490), int16(27312), int16(29061), int16(30527),
 18686  	int16(2864), int16(3559), int16(4719), int16(6441),
 18687  	int16(9592), int16(11055), int16(12763), int16(14784),
 18688  	int16(16428), int16(18164), int16(20486), int16(22262),
 18689  	int16(24183), int16(26263), int16(28383), int16(30224),
 18690  	int16(2673), int16(3449), int16(4581), int16(5983),
 18691  	int16(6863), int16(8311), int16(12464), int16(13911),
 18692  	int16(15738), int16(17791), int16(19416), int16(21182),
 18693  	int16(24025), int16(26561), int16(28723), int16(30440),
 18694  	int16(2419), int16(3049), int16(4274), int16(6384),
 18695  	int16(8564), int16(9661), int16(11288), int16(12676),
 18696  	int16(14447), int16(17578), int16(19816), int16(21231),
 18697  	int16(23099), int16(25270), int16(26899), int16(28926),
 18698  	int16(1278), int16(2001), int16(3000), int16(5353),
 18699  	int16(9995), int16(11777), int16(13018), int16(14570),
 18700  	int16(16050), int16(17762), int16(19982), int16(21617),
 18701  	int16(23371), int16(25083), int16(27656), int16(30172),
 18702  	int16(932), int16(1624), int16(2798), int16(4570),
 18703  	int16(8592), int16(9988), int16(11552), int16(13050),
 18704  	int16(16921), int16(18677), int16(20415), int16(22810),
 18705  	int16(24817), int16(26819), int16(28804), int16(30385),
 18706  	int16(2324), int16(2973), int16(4156), int16(5702),
 18707  	int16(6919), int16(8806), int16(10259), int16(12503),
 18708  	int16(15015), int16(16567), int16(19418), int16(21375),
 18709  	int16(22943), int16(24550), int16(27024), int16(29849),
 18710  	int16(1564), int16(2373), int16(3455), int16(4907),
 18711  	int16(5975), int16(7436), int16(11786), int16(14505),
 18712  	int16(16107), int16(18148), int16(20019), int16(21653),
 18713  	int16(23740), int16(25814), int16(28578), int16(30372),
 18714  	int16(3025), int16(3729), int16(4866), int16(6520),
 18715  	int16(9487), int16(10943), int16(12358), int16(14258),
 18716  	int16(16174), int16(17501), int16(19476), int16(21408),
 18717  	int16(23227), int16(24906), int16(27347), int16(29407),
 18718  	int16(1270), int16(1965), int16(6802), int16(7995),
 18719  	int16(9204), int16(10828), int16(12507), int16(14230),
 18720  	int16(15759), int16(17860), int16(20369), int16(22502),
 18721  	int16(24633), int16(26514), int16(28535), int16(30525),
 18722  	int16(2210), int16(2749), int16(4266), int16(7487),
 18723  	int16(9878), int16(11018), int16(12823), int16(14431),
 18724  	int16(16247), int16(18626), int16(20450), int16(22054),
 18725  	int16(23739), int16(25291), int16(27074), int16(29169),
 18726  	int16(1275), int16(1926), int16(4330), int16(6573),
 18727  	int16(8441), int16(10920), int16(13260), int16(15008),
 18728  	int16(16927), int16(18573), int16(20644), int16(22217),
 18729  	int16(23983), int16(25474), int16(27372), int16(28645),
 18730  	int16(3015), int16(3670), int16(5086), int16(6372),
 18731  	int16(7888), int16(9309), int16(10966), int16(12642),
 18732  	int16(14495), int16(16172), int16(18080), int16(19972),
 18733  	int16(22454), int16(24899), int16(27362), int16(29975),
 18734  	int16(2882), int16(3733), int16(5113), int16(6482),
 18735  	int16(8125), int16(9685), int16(11598), int16(13288),
 18736  	int16(15405), int16(17192), int16(20178), int16(22426),
 18737  	int16(24801), int16(27014), int16(29212), int16(30811),
 18738  	int16(2300), int16(2968), int16(4101), int16(5442),
 18739  	int16(6327), int16(7910), int16(12455), int16(13862),
 18740  	int16(15747), int16(17505), int16(19053), int16(20679),
 18741  	int16(22615), int16(24658), int16(27499), int16(30065),
 18742  	int16(2257), int16(2940), int16(4430), int16(5991),
 18743  	int16(7042), int16(8364), int16(9414), int16(11224),
 18744  	int16(15723), int16(17420), int16(19253), int16(21469),
 18745  	int16(23915), int16(26053), int16(28430), int16(30384),
 18746  	int16(1227), int16(2045), int16(3818), int16(5011),
 18747  	int16(6990), int16(9231), int16(11024), int16(13011),
 18748  	int16(17341), int16(19017), int16(20583), int16(22799),
 18749  	int16(25195), int16(26876), int16(29351), int16(30805),
 18750  	int16(1354), int16(1924), int16(3789), int16(8077),
 18751  	int16(10453), int16(11639), int16(13352), int16(14817),
 18752  	int16(16743), int16(18189), int16(20095), int16(22014),
 18753  	int16(24593), int16(26677), int16(28647), int16(30256),
 18754  	int16(3142), int16(4049), int16(6197), int16(7417),
 18755  	int16(8753), int16(10156), int16(11533), int16(13181),
 18756  	int16(15947), int16(17655), int16(19606), int16(21402),
 18757  	int16(23487), int16(25659), int16(28123), int16(30304),
 18758  	int16(1317), int16(2263), int16(4725), int16(7611),
 18759  	int16(9667), int16(11634), int16(14143), int16(16258),
 18760  	int16(18724), int16(20698), int16(22379), int16(24007),
 18761  	int16(25775), int16(27251), int16(28930), int16(30593),
 18762  	int16(1570), int16(2323), int16(3818), int16(6215),
 18763  	int16(9893), int16(11556), int16(13070), int16(14631),
 18764  	int16(16152), int16(18290), int16(21386), int16(23346),
 18765  	int16(25114), int16(26923), int16(28712), int16(30168),
 18766  	int16(2297), int16(3905), int16(6287), int16(8558),
 18767  	int16(10668), int16(12766), int16(15019), int16(17102),
 18768  	int16(19036), int16(20677), int16(22341), int16(23871),
 18769  	int16(25478), int16(27085), int16(28851), int16(30520),
 18770  	int16(1915), int16(2507), int16(4033), int16(5749),
 18771  	int16(7059), int16(8871), int16(10659), int16(12198),
 18772  	int16(13937), int16(15383), int16(16869), int16(18707),
 18773  	int16(23175), int16(25818), int16(28514), int16(30501),
 18774  	int16(2404), int16(2918), int16(5190), int16(6252),
 18775  	int16(7426), int16(9887), int16(12387), int16(14795),
 18776  	int16(16754), int16(18368), int16(20338), int16(22003),
 18777  	int16(24236), int16(26456), int16(28490), int16(30397),
 18778  	int16(1621), int16(2227), int16(3479), int16(5085),
 18779  	int16(9425), int16(12892), int16(14246), int16(15652),
 18780  	int16(17205), int16(18674), int16(20446), int16(22209),
 18781  	int16(23778), int16(25867), int16(27931), int16(30093),
 18782  	int16(1869), int16(2390), int16(4105), int16(7021),
 18783  	int16(11221), int16(12775), int16(14059), int16(15590),
 18784  	int16(17024), int16(18608), int16(20595), int16(22075),
 18785  	int16(23649), int16(25154), int16(26914), int16(28671),
 18786  	int16(2551), int16(3252), int16(4688), int16(6562),
 18787  	int16(7869), int16(9125), int16(10475), int16(11800),
 18788  	int16(15402), int16(18780), int16(20992), int16(22555),
 18789  	int16(24289), int16(25968), int16(27465), int16(29232),
 18790  	int16(2705), int16(3493), int16(4735), int16(6360),
 18791  	int16(7905), int16(9352), int16(11538), int16(13430),
 18792  	int16(15239), int16(16919), int16(18619), int16(20094),
 18793  	int16(21800), int16(23342), int16(25200), int16(29257),
 18794  	int16(2166), int16(2791), int16(4011), int16(5081),
 18795  	int16(5896), int16(9038), int16(13407), int16(14703),
 18796  	int16(16543), int16(18189), int16(19896), int16(21857),
 18797  	int16(24872), int16(26971), int16(28955), int16(30514),
 18798  	int16(1865), int16(3021), int16(4696), int16(6534),
 18799  	int16(8343), int16(9914), int16(12789), int16(14103),
 18800  	int16(16533), int16(17729), int16(21340), int16(22439),
 18801  	int16(24873), int16(26330), int16(28428), int16(30154),
 18802  	int16(3369), int16(4345), int16(6573), int16(8763),
 18803  	int16(10309), int16(11713), int16(13367), int16(14784),
 18804  	int16(16483), int16(18145), int16(19839), int16(21247),
 18805  	int16(23292), int16(25477), int16(27555), int16(29447),
 18806  	int16(1265), int16(2184), int16(5443), int16(7893),
 18807  	int16(10591), int16(13139), int16(15105), int16(16639),
 18808  	int16(18402), int16(19826), int16(21419), int16(22995),
 18809  	int16(24719), int16(26437), int16(28363), int16(30125),
 18810  	int16(1584), int16(2004), int16(3535), int16(4450),
 18811  	int16(8662), int16(10764), int16(12832), int16(14978),
 18812  	int16(16972), int16(18794), int16(20932), int16(22547),
 18813  	int16(24636), int16(26521), int16(28701), int16(30567),
 18814  	int16(3419), int16(4528), int16(6602), int16(7890),
 18815  	int16(9508), int16(10875), int16(12771), int16(14357),
 18816  	int16(16051), int16(18330), int16(20630), int16(22490),
 18817  	int16(25070), int16(26936), int16(28946), int16(30542),
 18818  	int16(1726), int16(2252), int16(4597), int16(6950),
 18819  	int16(8379), int16(9823), int16(11363), int16(12794),
 18820  	int16(14306), int16(15476), int16(16798), int16(18018),
 18821  	int16(21671), int16(25550), int16(28148), int16(30367),
 18822  	int16(3385), int16(3870), int16(5307), int16(6388),
 18823  	int16(7141), int16(8684), int16(12695), int16(14939),
 18824  	int16(16480), int16(18277), int16(20537), int16(22048),
 18825  	int16(23947), int16(25965), int16(28214), int16(29956),
 18826  	int16(2771), int16(3306), int16(4450), int16(5560),
 18827  	int16(6453), int16(9493), int16(13548), int16(14754),
 18828  	int16(16743), int16(18447), int16(20028), int16(21736),
 18829  	int16(23746), int16(25353), int16(27141), int16(29066),
 18830  	int16(3028), int16(3900), int16(6617), int16(7893),
 18831  	int16(9211), int16(10480), int16(12047), int16(13583),
 18832  	int16(15182), int16(16662), int16(18502), int16(20092),
 18833  	int16(22190), int16(24358), int16(26302), int16(28957),
 18834  	int16(2000), int16(2550), int16(4067), int16(6837),
 18835  	int16(9628), int16(11002), int16(12594), int16(14098),
 18836  	int16(15589), int16(17195), int16(18679), int16(20099),
 18837  	int16(21530), int16(23085), int16(24641), int16(29022),
 18838  	int16(2844), int16(3302), int16(5103), int16(6107),
 18839  	int16(6911), int16(8598), int16(12416), int16(14054),
 18840  	int16(16026), int16(18567), int16(20672), int16(22270),
 18841  	int16(23952), int16(25771), int16(27658), int16(30026),
 18842  	int16(4043), int16(5150), int16(7268), int16(9056),
 18843  	int16(10916), int16(12638), int16(14543), int16(16184),
 18844  	int16(17948), int16(19691), int16(21357), int16(22981),
 18845  	int16(24825), int16(26591), int16(28479), int16(30233),
 18846  	int16(2109), int16(2625), int16(4320), int16(5525),
 18847  	int16(7454), int16(10220), int16(12980), int16(14698),
 18848  	int16(17627), int16(19263), int16(20485), int16(22381),
 18849  	int16(24279), int16(25777), int16(27847), int16(30458),
 18850  	int16(1550), int16(2667), int16(6473), int16(9496),
 18851  	int16(10985), int16(12352), int16(13795), int16(15233),
 18852  	int16(17099), int16(18642), int16(20461), int16(22116),
 18853  	int16(24197), int16(26291), int16(28403), int16(30132),
 18854  	int16(2411), int16(3084), int16(4145), int16(5394),
 18855  	int16(6367), int16(8154), int16(13125), int16(16049),
 18856  	int16(17561), int16(19125), int16(21258), int16(22762),
 18857  	int16(24459), int16(26317), int16(28255), int16(29702),
 18858  	int16(4159), int16(4516), int16(5956), int16(7635),
 18859  	int16(8254), int16(8980), int16(11208), int16(14133),
 18860  	int16(16210), int16(17875), int16(20196), int16(21864),
 18861  	int16(23840), int16(25747), int16(28058), int16(30012),
 18862  	int16(2026), int16(2431), int16(2845), int16(3618),
 18863  	int16(7950), int16(9802), int16(12721), int16(14460),
 18864  	int16(16576), int16(18984), int16(21376), int16(23319),
 18865  	int16(24961), int16(26718), int16(28971), int16(30640),
 18866  	int16(3429), int16(3833), int16(4472), int16(4912),
 18867  	int16(7723), int16(10386), int16(12981), int16(15322),
 18868  	int16(16699), int16(18807), int16(20778), int16(22551),
 18869  	int16(24627), int16(26494), int16(28334), int16(30482),
 18870  	int16(4740), int16(5169), int16(5796), int16(6485),
 18871  	int16(6998), int16(8830), int16(11777), int16(14414),
 18872  	int16(16831), int16(18413), int16(20789), int16(22369),
 18873  	int16(24236), int16(25835), int16(27807), int16(30021),
 18874  	int16(150), int16(168), int16(-17), int16(-107),
 18875  	int16(-142), int16(-229), int16(-320), int16(-406),
 18876  	int16(-503), int16(-620), int16(-867), int16(-935),
 18877  	int16(-902), int16(-680), int16(-398), int16(-114),
 18878  	int16(-398), int16(-355), int16(49), int16(255),
 18879  	int16(114), int16(260), int16(399), int16(264),
 18880  	int16(317), int16(431), int16(514), int16(531),
 18881  	int16(435), int16(356), int16(238), int16(106),
 18882  	int16(-43), int16(-36), int16(-169), int16(-224),
 18883  	int16(-391), int16(-633), int16(-776), int16(-970),
 18884  	int16(-844), int16(-455), int16(-181), int16(-12),
 18885  	int16(85), int16(85), int16(164), int16(195),
 18886  	int16(122), int16(85), int16(-158), int16(-640),
 18887  	int16(-903), int16(9), int16(7), int16(-124),
 18888  	int16(149), int16(32), int16(220), int16(369),
 18889  	int16(242), int16(115), int16(79), int16(84),
 18890  	int16(-146), int16(-216), int16(-70), int16(1024),
 18891  	int16(751), int16(574), int16(440), int16(377),
 18892  	int16(352), int16(203), int16(30), int16(16),
 18893  	int16(-3), int16(81), int16(161), int16(100),
 18894  	int16(-148), int16(-176), int16(933), int16(750),
 18895  	int16(404), int16(171), int16(-2), int16(-146),
 18896  	int16(-411), int16(-442), int16(-541), int16(-552),
 18897  	int16(-442), int16(-269), int16(-240), int16(-52),
 18898  	int16(603), int16(635), int16(405), int16(178),
 18899  	int16(215), int16(19), int16(-153), int16(-167),
 18900  	int16(-290), int16(-219), int16(151), int16(271),
 18901  	int16(151), int16(119), int16(303), int16(266),
 18902  	int16(100), int16(69), int16(-293), int16(-657),
 18903  	int16(939), int16(659), int16(442), int16(351),
 18904  	int16(132), int16(98), int16(-16), int16(-1),
 18905  	int16(-135), int16(-200), int16(-223), int16(-89),
 18906  	int16(167), int16(154), int16(172), int16(237),
 18907  	int16(-45), int16(-183), int16(-228), int16(-486),
 18908  	int16(263), int16(608), int16(158), int16(-125),
 18909  	int16(-390), int16(-227), int16(-118), int16(43),
 18910  	int16(-457), int16(-392), int16(-769), int16(-840),
 18911  	int16(20), int16(-117), int16(-194), int16(-189),
 18912  	int16(-173), int16(-173), int16(-33), int16(32),
 18913  	int16(174), int16(144), int16(115), int16(167),
 18914  	int16(57), int16(44), int16(14), int16(147),
 18915  	int16(96), int16(-54), int16(-142), int16(-129),
 18916  	int16(-254), int16(-331), int16(304), int16(310),
 18917  	int16(-52), int16(-419), int16(-846), int16(-1060),
 18918  	int16(-88), int16(-123), int16(-202), int16(-343),
 18919  	int16(-554), int16(-961), int16(-951), int16(327),
 18920  	int16(159), int16(81), int16(255), int16(227),
 18921  	int16(120), int16(203), int16(256), int16(192),
 18922  	int16(164), int16(224), int16(290), int16(195),
 18923  	int16(216), int16(209), int16(128), int16(832),
 18924  	int16(1028), int16(889), int16(698), int16(504),
 18925  	int16(408), int16(355), int16(218), int16(32),
 18926  	int16(-115), int16(-84), int16(-276), int16(-100),
 18927  	int16(-312), int16(-484), int16(899), int16(682),
 18928  	int16(465), int16(456), int16(241), int16(-12),
 18929  	int16(-275), int16(-425), int16(-461), int16(-367),
 18930  	int16(-33), int16(-28), int16(-102), int16(-194),
 18931  	int16(-527), int16(863), int16(906), int16(463),
 18932  	int16(245), int16(13), int16(-212), int16(-305),
 18933  	int16(-105), int16(163), int16(279), int16(176),
 18934  	int16(93), int16(67), int16(115), int16(192),
 18935  	int16(61), int16(-50), int16(-132), int16(-175),
 18936  	int16(-224), int16(-271), int16(-629), int16(-252),
 18937  	int16(1158), int16(972), int16(638), int16(280),
 18938  	int16(300), int16(326), int16(143), int16(-152),
 18939  	int16(-214), int16(-287), int16(53), int16(-42),
 18940  	int16(-236), int16(-352), int16(-423), int16(-248),
 18941  	int16(-129), int16(-163), int16(-178), int16(-119),
 18942  	int16(85), int16(57), int16(514), int16(382),
 18943  	int16(374), int16(402), int16(424), int16(423),
 18944  	int16(271), int16(197), int16(97), int16(40),
 18945  	int16(39), int16(-97), int16(-191), int16(-164),
 18946  	int16(-230), int16(-256), int16(-410), int16(396),
 18947  	int16(327), int16(127), int16(10), int16(-119),
 18948  	int16(-167), int16(-291), int16(-274), int16(-141),
 18949  	int16(-99), int16(-226), int16(-218), int16(-139),
 18950  	int16(-224), int16(-209), int16(-268), int16(-442),
 18951  	int16(-413), int16(222), int16(58), int16(521),
 18952  	int16(344), int16(258), int16(76), int16(-42),
 18953  	int16(-142), int16(-165), int16(-123), int16(-92),
 18954  	int16(47), int16(8), int16(-3), int16(-191),
 18955  	int16(-11), int16(-164), int16(-167), int16(-351),
 18956  	int16(-740), int16(311), int16(538), int16(291),
 18957  	int16(184), int16(29), int16(-105), int16(9),
 18958  	int16(-30), int16(-54), int16(-17), int16(-77),
 18959  	int16(-271), int16(-412), int16(-622), int16(-648),
 18960  	int16(476), int16(186), int16(-66), int16(-197),
 18961  	int16(-73), int16(-94), int16(-15), int16(47),
 18962  	int16(28), int16(112), int16(-58), int16(-33),
 18963  	int16(65), int16(19), int16(84), int16(86),
 18964  	int16(276), int16(114), int16(472), int16(786),
 18965  	int16(799), int16(625), int16(415), int16(178),
 18966  	int16(-35), int16(-26), int16(5), int16(9),
 18967  	int16(83), int16(39), int16(37), int16(39),
 18968  	int16(-184), int16(-374), int16(-265), int16(-362),
 18969  	int16(-501), int16(337), int16(716), int16(478),
 18970  	int16(-60), int16(-125), int16(-163), int16(362),
 18971  	int16(17), int16(-122), int16(-233), int16(279),
 18972  	int16(138), int16(157), int16(318), int16(193),
 18973  	int16(189), int16(209), int16(266), int16(252),
 18974  	int16(-46), int16(-56), int16(-277), int16(-429),
 18975  	int16(464), int16(386), int16(142), int16(44),
 18976  	int16(-43), int16(66), int16(264), int16(182),
 18977  	int16(47), int16(14), int16(-26), int16(-79),
 18978  	int16(49), int16(15), int16(-128), int16(-203),
 18979  	int16(-400), int16(-478), int16(325), int16(27),
 18980  	int16(234), int16(411), int16(205), int16(129),
 18981  	int16(12), int16(58), int16(123), int16(57),
 18982  	int16(171), int16(137), int16(96), int16(128),
 18983  	int16(-32), int16(134), int16(-12), int16(57),
 18984  	int16(119), int16(26), int16(-22), int16(-165),
 18985  	int16(-500), int16(-701), int16(-528), int16(-116),
 18986  	int16(64), int16(-8), int16(97), int16(-9),
 18987  	int16(-162), int16(-66), int16(-156), int16(-194),
 18988  	int16(-303), int16(-546), int16(-341), int16(546),
 18989  	int16(358), int16(95), int16(45), int16(76),
 18990  	int16(270), int16(403), int16(205), int16(100),
 18991  	int16(123), int16(50), int16(-53), int16(-144),
 18992  	int16(-110), int16(-13), int16(32), int16(-228),
 18993  	int16(-130), int16(353), int16(296), int16(56),
 18994  	int16(-372), int16(-253), int16(365), int16(73),
 18995  	int16(10), int16(-34), int16(-139), int16(-191),
 18996  	int16(-96), int16(5), int16(44), int16(-85),
 18997  	int16(-179), int16(-129), int16(-192), int16(-246),
 18998  	int16(-85), int16(-110), int16(-155), int16(-44),
 18999  	int16(-27), int16(145), int16(138), int16(79),
 19000  	int16(32), int16(-148), int16(-577), int16(-634),
 19001  	int16(191), int16(94), int16(-9), int16(-35),
 19002  	int16(-77), int16(-84), int16(-56), int16(-171),
 19003  	int16(-298), int16(-271), int16(-243), int16(-156),
 19004  	int16(-328), int16(-235), int16(-76), int16(-128),
 19005  	int16(-121), int16(129), int16(13), int16(-22),
 19006  	int16(32), int16(45), int16(-248), int16(-65),
 19007  	int16(193), int16(-81), int16(299), int16(57),
 19008  	int16(-147), int16(192), int16(-165), int16(-354),
 19009  	int16(-334), int16(-106), int16(-156), int16(-40),
 19010  	int16(-3), int16(-68), int16(124), int16(-257),
 19011  	int16(78), int16(124), int16(170), int16(412),
 19012  	int16(227), int16(105), int16(-104), int16(12),
 19013  	int16(154), int16(250), int16(274), int16(258),
 19014  	int16(4), int16(-27), int16(235), int16(152),
 19015  	int16(51), int16(338), int16(300), int16(7),
 19016  	int16(-314), int16(-411), int16(215), int16(170),
 19017  	int16(-9), int16(-93), int16(-77), int16(76),
 19018  	int16(67), int16(54), int16(200), int16(315),
 19019  	int16(163), int16(72), int16(-91), int16(-402),
 19020  	int16(158), int16(187), int16(-156), int16(-91),
 19021  	int16(290), int16(267), int16(167), int16(91),
 19022  	int16(140), int16(171), int16(112), int16(9),
 19023  	int16(-42), int16(-177), int16(-440), int16(385),
 19024  	int16(80), int16(15), int16(172), int16(129),
 19025  	int16(41), int16(-129), int16(-372), int16(-24),
 19026  	int16(-75), int16(-30), int16(-170), int16(10),
 19027  	int16(-118), int16(57), int16(78), int16(-101),
 19028  	int16(232), int16(161), int16(123), int16(256),
 19029  	int16(277), int16(101), int16(-192), int16(-629),
 19030  	int16(-100), int16(-60), int16(-232), int16(66),
 19031  	int16(13), int16(-13), int16(-80), int16(-239),
 19032  	int16(239), int16(37), int16(32), int16(89),
 19033  	int16(-319), int16(-579), int16(450), int16(360),
 19034  	int16(3), int16(-29), int16(-299), int16(-89),
 19035  	int16(-54), int16(-110), int16(-246), int16(-164),
 19036  	int16(6), int16(-188), int16(338), int16(176),
 19037  	int16(-92), int16(197), int16(137), int16(134),
 19038  	int16(12), int16(-2), int16(56), int16(-183),
 19039  	int16(114), int16(-36), int16(-131), int16(-204),
 19040  	int16(75), int16(-25), int16(-174), int16(191),
 19041  	int16(-15), int16(-290), int16(-429), int16(-267),
 19042  	int16(79), int16(37), int16(106), int16(23),
 19043  	int16(-384), int16(425), int16(70), int16(-14),
 19044  	int16(212), int16(105), int16(15), int16(-2),
 19045  	int16(-42), int16(-37), int16(-123), int16(108),
 19046  	int16(28), int16(-48), int16(193), int16(197),
 19047  	int16(173), int16(-33), int16(37), int16(73),
 19048  	int16(-57), int16(256), int16(137), int16(-58),
 19049  	int16(-430), int16(-228), int16(217), int16(-51),
 19050  	int16(-10), int16(-58), int16(-6), int16(22),
 19051  	int16(104), int16(61), int16(-119), int16(169),
 19052  	int16(144), int16(16), int16(-46), int16(-394),
 19053  	int16(60), int16(454), int16(-80), int16(-298),
 19054  	int16(-65), int16(25), int16(0), int16(-24),
 19055  	int16(-65), int16(-417), int16(465), int16(276),
 19056  	int16(-3), int16(-194), int16(-13), int16(130),
 19057  	int16(19), int16(-6), int16(-21), int16(-24),
 19058  	int16(-180), int16(-53), int16(-85), int16(20),
 19059  	int16(118), int16(147), int16(113), int16(-75),
 19060  	int16(-289), int16(226), int16(-122), int16(227),
 19061  	int16(270), int16(125), int16(109), int16(197),
 19062  	int16(125), int16(138), int16(44), int16(60),
 19063  	int16(25), int16(-55), int16(-167), int16(-32),
 19064  	int16(-139), int16(-193), int16(-173), int16(-316),
 19065  	int16(287), int16(-208), int16(253), int16(239),
 19066  	int16(27), int16(-80), int16(-188), int16(-28),
 19067  	int16(-182), int16(-235), int16(156), int16(-117),
 19068  	int16(128), int16(-48), int16(-58), int16(-226),
 19069  	int16(172), int16(181), int16(167), int16(19),
 19070  	int16(62), int16(10), int16(2), int16(181),
 19071  	int16(151), int16(108), int16(-16), int16(-11),
 19072  	int16(-78), int16(-331), int16(411), int16(133),
 19073  	int16(17), int16(104), int16(64), int16(-184),
 19074  	int16(24), int16(-30), int16(-3), int16(-283),
 19075  	int16(121), int16(204), int16(-8), int16(-199),
 19076  	int16(-21), int16(-80), int16(-169), int16(-157),
 19077  	int16(-191), int16(-136), int16(81), int16(155),
 19078  	int16(14), int16(-131), int16(244), int16(74),
 19079  	int16(-57), int16(-47), int16(-280), int16(347),
 19080  	int16(111), int16(-77), int16(-128), int16(-142),
 19081  	int16(-194), int16(-125), int16(-6), int16(-68),
 19082  	int16(91), int16(1), int16(23), int16(14),
 19083  	int16(-154), int16(-34), int16(23), int16(-38),
 19084  	int16(-343), int16(503), int16(146), int16(-38),
 19085  	int16(-46), int16(-41), int16(58), int16(31),
 19086  	int16(63), int16(-48), int16(-117), int16(45),
 19087  	int16(28), int16(1), int16(-89), int16(-5),
 19088  	int16(-44), int16(-29), int16(-448), int16(487),
 19089  	int16(204), int16(81), int16(46), int16(-106),
 19090  	int16(-302), int16(380), int16(120), int16(-38),
 19091  	int16(-12), int16(-39), int16(70), int16(-3),
 19092  	int16(25), int16(-65), int16(30), int16(-11),
 19093  	int16(34), int16(-15), int16(22), int16(-115),
 19094  	int16(0), int16(-79), int16(-83), int16(45),
 19095  	int16(114), int16(43), int16(150), int16(36),
 19096  	int16(233), int16(149), int16(195), int16(5),
 19097  	int16(25), int16(-52), int16(-475), int16(274),
 19098  	int16(28), int16(-39), int16(-8), int16(-66),
 19099  	int16(-255), int16(258), int16(56), int16(143),
 19100  	int16(-45), int16(-190), int16(165), int16(-60),
 19101  	int16(20), int16(2), int16(125), int16(-129),
 19102  	int16(51), int16(-8), int16(-335), int16(288),
 19103  	int16(38), int16(59), int16(25), int16(-42),
 19104  	int16(23), int16(-118), int16(-112), int16(11),
 19105  	int16(-55), int16(-133), int16(-109), int16(24),
 19106  	int16(-105), int16(78), int16(-64), int16(-245),
 19107  	int16(202), int16(-65), int16(-127), int16(162),
 19108  	int16(40), int16(-94), int16(89), int16(-85),
 19109  	int16(-119), int16(-103), int16(97), int16(9),
 19110  	int16(-70), int16(-28), int16(194), int16(86),
 19111  	int16(-112), int16(-92), int16(-114), int16(74),
 19112  	int16(-49), int16(46), int16(-84), int16(-178),
 19113  	int16(113), int16(52), int16(-205), int16(333),
 19114  	int16(88), int16(222), int16(56), int16(-55),
 19115  	int16(13), int16(86), int16(4), int16(-77),
 19116  	int16(224), int16(114), int16(-105), int16(112),
 19117  	int16(125), int16(-29), int16(-18), int16(-144),
 19118  	int16(22), int16(-58), int16(-99), int16(28),
 19119  	int16(114), int16(-66), int16(-32), int16(-169),
 19120  	int16(-314), int16(285), int16(72), int16(-74),
 19121  	int16(179), int16(28), int16(-79), int16(-182),
 19122  	int16(13), int16(-55), int16(147), int16(13),
 19123  	int16(12), int16(-54), int16(31), int16(-84),
 19124  	int16(-17), int16(-75), int16(-228), int16(83),
 19125  	int16(-375), int16(436), int16(110), int16(-63),
 19126  	int16(-27), int16(-136), int16(169), int16(-56),
 19127  	int16(-8), int16(-171), int16(184), int16(-42),
 19128  	int16(148), int16(68), int16(204), int16(235),
 19129  	int16(110), int16(-229), int16(91), int16(171),
 19130  	int16(-43), int16(-3), int16(-26), int16(-99),
 19131  	int16(-111), int16(71), int16(-170), int16(202),
 19132  	int16(-67), int16(181), int16(-37), int16(109),
 19133  	int16(-120), int16(3), int16(-55), int16(-260),
 19134  	int16(-16), int16(152), int16(91), int16(142),
 19135  	int16(42), int16(44), int16(134), int16(47),
 19136  	int16(17), int16(-35), int16(22), int16(79),
 19137  	int16(-169), int16(41), int16(46), int16(277),
 19138  	int16(-93), int16(-49), int16(-126), int16(37),
 19139  	int16(-103), int16(-34), int16(-22), int16(-90),
 19140  	int16(-134), int16(-205), int16(92), int16(-9),
 19141  	int16(1), int16(-195), int16(-239), int16(45),
 19142  	int16(54), int16(18), int16(-23), int16(-1),
 19143  	int16(-80), int16(-98), int16(-20), int16(-261),
 19144  	int16(306), int16(72), int16(20), int16(-89),
 19145  	int16(-217), int16(11), int16(6), int16(-82),
 19146  	int16(89), int16(13), int16(-129), int16(-89),
 19147  	int16(83), int16(-71), int16(-55), int16(130),
 19148  	int16(-98), int16(-146), int16(-27), int16(-57),
 19149  	int16(53), int16(275), int16(17), int16(170),
 19150  	int16(-5), int16(-54), int16(132), int16(-64),
 19151  	int16(72), int16(160), int16(-125), int16(-168),
 19152  	int16(72), int16(40), int16(170), int16(78),
 19153  	int16(248), int16(116), int16(20), int16(84),
 19154  	int16(31), int16(-34), int16(190), int16(38),
 19155  	int16(13), int16(-106), int16(225), int16(27),
 19156  	int16(-168), int16(24), int16(-157), int16(-122),
 19157  	int16(165), int16(11), int16(-161), int16(-213),
 19158  	int16(-12), int16(-51), int16(-101), int16(42),
 19159  	int16(101), int16(27), int16(55), int16(111),
 19160  	int16(75), int16(71), int16(-96), int16(-1),
 19161  	int16(65), int16(-277), int16(393), int16(-26),
 19162  	int16(-44), int16(-68), int16(-84), int16(-66),
 19163  	int16(-95), int16(235), int16(179), int16(-25),
 19164  	int16(-41), int16(27), int16(-91), int16(-128),
 19165  	int16(-222), int16(146), int16(-72), int16(-30),
 19166  	int16(-24), int16(55), int16(-126), int16(-68),
 19167  	int16(-58), int16(-127), int16(13), int16(-97),
 19168  	int16(-106), int16(174), int16(-100), int16(155),
 19169  	int16(101), int16(-146), int16(-21), int16(261),
 19170  	int16(22), int16(38), int16(-66), int16(65),
 19171  	int16(4), int16(70), int16(64), int16(144),
 19172  	int16(59), int16(213), int16(71), int16(-337),
 19173  	int16(303), int16(-52), int16(51), int16(-56),
 19174  	int16(1), int16(10), int16(-15), int16(-5),
 19175  	int16(34), int16(52), int16(228), int16(131),
 19176  	int16(161), int16(-127), int16(-214), int16(238),
 19177  	int16(123), int16(64), int16(-147), int16(-50),
 19178  	int16(-34), int16(-127), int16(204), int16(162),
 19179  	int16(85), int16(41), int16(5), int16(-140),
 19180  	int16(73), int16(-150), int16(56), int16(-96),
 19181  	int16(-66), int16(-20), int16(2), int16(-235),
 19182  	int16(59), int16(-22), int16(-107), int16(150),
 19183  	int16(-16), int16(-47), int16(-4), int16(81),
 19184  	int16(-67), int16(167), int16(149), int16(149),
 19185  	int16(-157), int16(288), int16(-156), int16(-27),
 19186  	int16(-8), int16(18), int16(83), int16(-24),
 19187  	int16(-41), int16(-167), int16(158), int16(-100),
 19188  	int16(93), int16(53), int16(201), int16(15),
 19189  	int16(42), int16(266), int16(278), int16(-12),
 19190  	int16(-6), int16(-37), int16(85), int16(6),
 19191  	int16(20), int16(-188), int16(-271), int16(107),
 19192  	int16(-13), int16(-80), int16(51), int16(202),
 19193  	int16(173), int16(-69), int16(78), int16(-188),
 19194  	int16(46), int16(4), int16(153), int16(12),
 19195  	int16(-138), int16(169), int16(5), int16(-58),
 19196  	int16(-123), int16(-108), int16(-243), int16(150),
 19197  	int16(10), int16(-191), int16(246), int16(-15),
 19198  	int16(38), int16(25), int16(-10), int16(14),
 19199  	int16(61), int16(50), int16(-206), int16(-215),
 19200  	int16(-220), int16(90), int16(5), int16(-149),
 19201  	int16(-219), int16(56), int16(142), int16(24),
 19202  	int16(-376), int16(77), int16(-80), int16(75),
 19203  	int16(6), int16(42), int16(-101), int16(16),
 19204  	int16(56), int16(14), int16(-57), int16(3),
 19205  	int16(-17), int16(80), int16(57), int16(-36),
 19206  	int16(88), int16(-59), int16(-97), int16(-19),
 19207  	int16(-148), int16(46), int16(-219), int16(226),
 19208  	int16(114), int16(-4), int16(-72), int16(-15),
 19209  	int16(37), int16(-49), int16(-28), int16(247),
 19210  	int16(44), int16(123), int16(47), int16(-122),
 19211  	int16(-38), int16(17), int16(4), int16(-113),
 19212  	int16(-32), int16(-224), int16(154), int16(-134),
 19213  	int16(196), int16(71), int16(-267), int16(-85),
 19214  	int16(28), int16(-70), int16(89), int16(-120),
 19215  	int16(99), int16(-2), int16(64), int16(76),
 19216  	int16(-166), int16(-48), int16(189), int16(-35),
 19217  	int16(-92), int16(-169), int16(-123), int16(339),
 19218  	int16(38), int16(-25), int16(38), int16(-35),
 19219  	int16(225), int16(-139), int16(-50), int16(-63),
 19220  	int16(246), int16(60), int16(-185), int16(-109),
 19221  	int16(-49), int16(-53), int16(-167), int16(51),
 19222  	int16(149), int16(60), int16(-101), int16(-33),
 19223  	int16(25), int16(-76), int16(120), int16(32),
 19224  	int16(-30), int16(-83), int16(102), int16(91),
 19225  	int16(-186), int16(-261), int16(131), int16(-197),
 19226  } /* SKP_Silk_tables_NLSF_CB0_16.c:429:17 */
 19227  
 19228  var SKP_Silk_NLSF_CB0_16_Stage_info = [10]SKP_Silk_NLSF_CBS{
 19229  	{FnVectors: 128, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19230  	{FnVectors: 16, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19231  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19232  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19233  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19234  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19235  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19236  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19237  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19238  	{FnVectors: 16, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19239  } /* SKP_Silk_tables_NLSF_CB0_16.c:1297:25 */
 19240  
 19241  var SKP_Silk_NLSF_CB0_16 = SKP_Silk_NLSF_CB_struct{
 19242  	FnStages:       10,
 19243  	FCBStages:      0,
 19244  	FNDeltaMin_Q15: 0,
 19245  	FCDF:           0,
 19246  	FStartPtr:      0,
 19247  	FMiddleIx:      0,
 19248  } /* SKP_Silk_tables_NLSF_CB0_16.c:1311:31 */
 19249  
 19250  var SKP_Silk_NLSF_MSVQ_CB1_10_CDF = [78]uint16{
 19251  	uint16(0),
 19252  	uint16(17096),
 19253  	uint16(24130),
 19254  	uint16(28997),
 19255  	uint16(33179),
 19256  	uint16(36696),
 19257  	uint16(40213),
 19258  	uint16(42493),
 19259  	uint16(44252),
 19260  	uint16(45973),
 19261  	uint16(47551),
 19262  	uint16(49095),
 19263  	uint16(50542),
 19264  	uint16(51898),
 19265  	uint16(53196),
 19266  	uint16(54495),
 19267  	uint16(55685),
 19268  	uint16(56851),
 19269  	uint16(57749),
 19270  	uint16(58628),
 19271  	uint16(59435),
 19272  	uint16(60207),
 19273  	uint16(60741),
 19274  	uint16(61220),
 19275  	uint16(61700),
 19276  	uint16(62179),
 19277  	uint16(62659),
 19278  	uint16(63138),
 19279  	uint16(63617),
 19280  	uint16(64097),
 19281  	uint16(64576),
 19282  	uint16(65056),
 19283  	uint16(65535),
 19284  	uint16(0),
 19285  	uint16(20378),
 19286  	uint16(33032),
 19287  	uint16(40395),
 19288  	uint16(46721),
 19289  	uint16(51707),
 19290  	uint16(56585),
 19291  	uint16(61157),
 19292  	uint16(65535),
 19293  	uint16(0),
 19294  	uint16(15055),
 19295  	uint16(25472),
 19296  	uint16(35447),
 19297  	uint16(42501),
 19298  	uint16(48969),
 19299  	uint16(54773),
 19300  	uint16(60212),
 19301  	uint16(65535),
 19302  	uint16(0),
 19303  	uint16(12069),
 19304  	uint16(22440),
 19305  	uint16(32812),
 19306  	uint16(40145),
 19307  	uint16(46870),
 19308  	uint16(53595),
 19309  	uint16(59630),
 19310  	uint16(65535),
 19311  	uint16(0),
 19312  	uint16(10839),
 19313  	uint16(19954),
 19314  	uint16(27957),
 19315  	uint16(35961),
 19316  	uint16(43965),
 19317  	uint16(51465),
 19318  	uint16(58805),
 19319  	uint16(65535),
 19320  	uint16(0),
 19321  	uint16(8933),
 19322  	uint16(17674),
 19323  	uint16(26415),
 19324  	uint16(34785),
 19325  	uint16(42977),
 19326  	uint16(50820),
 19327  	uint16(58496),
 19328  	uint16(65535),
 19329  } /* SKP_Silk_tables_NLSF_CB1_10.c:38:18 */
 19330  
 19331  var SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr = [6]uintptr{
 19332  	0,
 19333  	0,
 19334  	0,
 19335  	0,
 19336  	0,
 19337  	0,
 19338  } /* SKP_Silk_tables_NLSF_CB1_10.c:120:18 */
 19339  
 19340  var SKP_Silk_NLSF_MSVQ_CB1_10_CDF_middle_idx = [6]int32{
 19341  	5,
 19342  	3,
 19343  	4,
 19344  	4,
 19345  	5,
 19346  	5,
 19347  } /* SKP_Silk_tables_NLSF_CB1_10.c:130:15 */
 19348  
 19349  var SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5 = [72]int16{
 19350  	int16(62), int16(103),
 19351  	int16(120), int16(127),
 19352  	int16(135), int16(135),
 19353  	int16(155), int16(167),
 19354  	int16(168), int16(172),
 19355  	int16(173), int16(176),
 19356  	int16(179), int16(181),
 19357  	int16(181), int16(185),
 19358  	int16(186), int16(198),
 19359  	int16(199), int16(203),
 19360  	int16(205), int16(222),
 19361  	int16(227), int16(227),
 19362  	int16(227), int16(227),
 19363  	int16(227), int16(227),
 19364  	int16(227), int16(227),
 19365  	int16(227), int16(227),
 19366  	int16(54), int16(76),
 19367  	int16(101), int16(108),
 19368  	int16(119), int16(120),
 19369  	int16(123), int16(125),
 19370  	int16(68), int16(85),
 19371  	int16(87), int16(103),
 19372  	int16(107), int16(112),
 19373  	int16(115), int16(116),
 19374  	int16(78), int16(85),
 19375  	int16(85), int16(101),
 19376  	int16(105), int16(105),
 19377  	int16(110), int16(111),
 19378  	int16(83), int16(91),
 19379  	int16(97), int16(97),
 19380  	int16(97), int16(100),
 19381  	int16(101), int16(105),
 19382  	int16(92), int16(93),
 19383  	int16(93), int16(95),
 19384  	int16(96), int16(98),
 19385  	int16(99), int16(103),
 19386  } /* SKP_Silk_tables_NLSF_CB1_10.c:140:17 */
 19387  
 19388  var SKP_Silk_NLSF_MSVQ_CB1_10_ndelta_min_Q15 = [11]int32{
 19389  	462,
 19390  	3,
 19391  	64,
 19392  	74,
 19393  	98,
 19394  	50,
 19395  	97,
 19396  	68,
 19397  	120,
 19398  	53,
 19399  	639,
 19400  } /* SKP_Silk_tables_NLSF_CB1_10.c:180:15 */
 19401  
 19402  var SKP_Silk_NLSF_MSVQ_CB1_10_Q15 = [720]int16{
 19403  	int16(1877), int16(4646),
 19404  	int16(7712), int16(10745),
 19405  	int16(13964), int16(17028),
 19406  	int16(20239), int16(23182),
 19407  	int16(26471), int16(29287),
 19408  	int16(1612), int16(3278),
 19409  	int16(7086), int16(9975),
 19410  	int16(13228), int16(16264),
 19411  	int16(19596), int16(22690),
 19412  	int16(26037), int16(28965),
 19413  	int16(2169), int16(3830),
 19414  	int16(6460), int16(8958),
 19415  	int16(11960), int16(14750),
 19416  	int16(18408), int16(21659),
 19417  	int16(25018), int16(28043),
 19418  	int16(3680), int16(6024),
 19419  	int16(8986), int16(12256),
 19420  	int16(15201), int16(18188),
 19421  	int16(21741), int16(24460),
 19422  	int16(27484), int16(30059),
 19423  	int16(2584), int16(5187),
 19424  	int16(7799), int16(10902),
 19425  	int16(13179), int16(15765),
 19426  	int16(19017), int16(22431),
 19427  	int16(25891), int16(28698),
 19428  	int16(3731), int16(5751),
 19429  	int16(8650), int16(11742),
 19430  	int16(15090), int16(17407),
 19431  	int16(20391), int16(23421),
 19432  	int16(26228), int16(29247),
 19433  	int16(2107), int16(6323),
 19434  	int16(8915), int16(12226),
 19435  	int16(14775), int16(17791),
 19436  	int16(20664), int16(23679),
 19437  	int16(26829), int16(29353),
 19438  	int16(1677), int16(2870),
 19439  	int16(5386), int16(8077),
 19440  	int16(11817), int16(15176),
 19441  	int16(18657), int16(22006),
 19442  	int16(25513), int16(28689),
 19443  	int16(2111), int16(3625),
 19444  	int16(7027), int16(10588),
 19445  	int16(14059), int16(17193),
 19446  	int16(21137), int16(24260),
 19447  	int16(27577), int16(30036),
 19448  	int16(2428), int16(4010),
 19449  	int16(5765), int16(9376),
 19450  	int16(13805), int16(15821),
 19451  	int16(19444), int16(22389),
 19452  	int16(25295), int16(29310),
 19453  	int16(2256), int16(4628),
 19454  	int16(8377), int16(12441),
 19455  	int16(15283), int16(19462),
 19456  	int16(22257), int16(25551),
 19457  	int16(28432), int16(30304),
 19458  	int16(2352), int16(3675),
 19459  	int16(6129), int16(11868),
 19460  	int16(14551), int16(16655),
 19461  	int16(19624), int16(21883),
 19462  	int16(26526), int16(28849),
 19463  	int16(5243), int16(7248),
 19464  	int16(10558), int16(13269),
 19465  	int16(15651), int16(17919),
 19466  	int16(21141), int16(23827),
 19467  	int16(27102), int16(29519),
 19468  	int16(4422), int16(6725),
 19469  	int16(10449), int16(13273),
 19470  	int16(16124), int16(19921),
 19471  	int16(22826), int16(26061),
 19472  	int16(28763), int16(30583),
 19473  	int16(4508), int16(6291),
 19474  	int16(9504), int16(11809),
 19475  	int16(13827), int16(15950),
 19476  	int16(19077), int16(22084),
 19477  	int16(25740), int16(28658),
 19478  	int16(2540), int16(4297),
 19479  	int16(8579), int16(13578),
 19480  	int16(16634), int16(19101),
 19481  	int16(21547), int16(23887),
 19482  	int16(26777), int16(29146),
 19483  	int16(3377), int16(6358),
 19484  	int16(10224), int16(14518),
 19485  	int16(17905), int16(21056),
 19486  	int16(23637), int16(25784),
 19487  	int16(28161), int16(30109),
 19488  	int16(4177), int16(5942),
 19489  	int16(8159), int16(10108),
 19490  	int16(12130), int16(15470),
 19491  	int16(20191), int16(23326),
 19492  	int16(26782), int16(29359),
 19493  	int16(2492), int16(3801),
 19494  	int16(6144), int16(9825),
 19495  	int16(16000), int16(18671),
 19496  	int16(20893), int16(23663),
 19497  	int16(25899), int16(28974),
 19498  	int16(3011), int16(4727),
 19499  	int16(6834), int16(10505),
 19500  	int16(12465), int16(14496),
 19501  	int16(17065), int16(20052),
 19502  	int16(25265), int16(28057),
 19503  	int16(4149), int16(7197),
 19504  	int16(12338), int16(15076),
 19505  	int16(18002), int16(20190),
 19506  	int16(22187), int16(24723),
 19507  	int16(27083), int16(29125),
 19508  	int16(2975), int16(4578),
 19509  	int16(6448), int16(8378),
 19510  	int16(9671), int16(13225),
 19511  	int16(19502), int16(22277),
 19512  	int16(26058), int16(28850),
 19513  	int16(4102), int16(5760),
 19514  	int16(7744), int16(9484),
 19515  	int16(10744), int16(12308),
 19516  	int16(14677), int16(19607),
 19517  	int16(24841), int16(28381),
 19518  	int16(4931), int16(9287),
 19519  	int16(12477), int16(13395),
 19520  	int16(13712), int16(14351),
 19521  	int16(16048), int16(19867),
 19522  	int16(24188), int16(28994),
 19523  	int16(4141), int16(7867),
 19524  	int16(13140), int16(17720),
 19525  	int16(20064), int16(21108),
 19526  	int16(21692), int16(22722),
 19527  	int16(23736), int16(27449),
 19528  	int16(4011), int16(8720),
 19529  	int16(13234), int16(16206),
 19530  	int16(17601), int16(18289),
 19531  	int16(18524), int16(19689),
 19532  	int16(23234), int16(27882),
 19533  	int16(3420), int16(5995),
 19534  	int16(11230), int16(15117),
 19535  	int16(15907), int16(16783),
 19536  	int16(17762), int16(23347),
 19537  	int16(26898), int16(29946),
 19538  	int16(3080), int16(6786),
 19539  	int16(10465), int16(13676),
 19540  	int16(18059), int16(23615),
 19541  	int16(27058), int16(29082),
 19542  	int16(29563), int16(29905),
 19543  	int16(3038), int16(5620),
 19544  	int16(9266), int16(12870),
 19545  	int16(18803), int16(19610),
 19546  	int16(20010), int16(20802),
 19547  	int16(23882), int16(29306),
 19548  	int16(3314), int16(6420),
 19549  	int16(9046), int16(13262),
 19550  	int16(15869), int16(23117),
 19551  	int16(23667), int16(24215),
 19552  	int16(24487), int16(25915),
 19553  	int16(3469), int16(6963),
 19554  	int16(10103), int16(15282),
 19555  	int16(20531), int16(23240),
 19556  	int16(25024), int16(26021),
 19557  	int16(26736), int16(27255),
 19558  	int16(3041), int16(6459),
 19559  	int16(9777), int16(12896),
 19560  	int16(16315), int16(19410),
 19561  	int16(24070), int16(29353),
 19562  	int16(31795), int16(32075),
 19563  	int16(-200), int16(-134),
 19564  	int16(-113), int16(-204),
 19565  	int16(-347), int16(-440),
 19566  	int16(-352), int16(-211),
 19567  	int16(-418), int16(-172),
 19568  	int16(-313), int16(59),
 19569  	int16(495), int16(772),
 19570  	int16(721), int16(614),
 19571  	int16(334), int16(444),
 19572  	int16(225), int16(242),
 19573  	int16(161), int16(16),
 19574  	int16(274), int16(564),
 19575  	int16(-73), int16(-188),
 19576  	int16(-395), int16(-171),
 19577  	int16(777), int16(508),
 19578  	int16(1340), int16(1145),
 19579  	int16(699), int16(196),
 19580  	int16(223), int16(173),
 19581  	int16(90), int16(25),
 19582  	int16(-26), int16(18),
 19583  	int16(133), int16(-105),
 19584  	int16(-360), int16(-277),
 19585  	int16(859), int16(634),
 19586  	int16(41), int16(-557),
 19587  	int16(-768), int16(-926),
 19588  	int16(-601), int16(-1021),
 19589  	int16(-1189), int16(-365),
 19590  	int16(225), int16(107),
 19591  	int16(374), int16(-50),
 19592  	int16(433), int16(417),
 19593  	int16(156), int16(39),
 19594  	int16(-597), int16(-1397),
 19595  	int16(-1594), int16(-592),
 19596  	int16(-485), int16(-292),
 19597  	int16(253), int16(87),
 19598  	int16(-0), int16(-6),
 19599  	int16(-25), int16(-345),
 19600  	int16(-240), int16(120),
 19601  	int16(1261), int16(946),
 19602  	int16(166), int16(-277),
 19603  	int16(241), int16(167),
 19604  	int16(170), int16(429),
 19605  	int16(518), int16(714),
 19606  	int16(602), int16(254),
 19607  	int16(134), int16(92),
 19608  	int16(-152), int16(-324),
 19609  	int16(-394), int16(49),
 19610  	int16(-151), int16(-304),
 19611  	int16(-724), int16(-657),
 19612  	int16(-162), int16(-369),
 19613  	int16(-35), int16(3),
 19614  	int16(-2), int16(-312),
 19615  	int16(-200), int16(-92),
 19616  	int16(-227), int16(242),
 19617  	int16(628), int16(565),
 19618  	int16(-124), int16(1056),
 19619  	int16(770), int16(101),
 19620  	int16(-84), int16(-33),
 19621  	int16(4), int16(-192),
 19622  	int16(-272), int16(5),
 19623  	int16(-627), int16(-977),
 19624  	int16(419), int16(472),
 19625  	int16(53), int16(-103),
 19626  	int16(145), int16(322),
 19627  	int16(-95), int16(-31),
 19628  	int16(-100), int16(-303),
 19629  	int16(-560), int16(-1067),
 19630  	int16(-413), int16(714),
 19631  	int16(283), int16(2),
 19632  	int16(-223), int16(-367),
 19633  	int16(523), int16(360),
 19634  	int16(-38), int16(-115),
 19635  	int16(378), int16(-591),
 19636  	int16(-718), int16(448),
 19637  	int16(-481), int16(-274),
 19638  	int16(180), int16(-88),
 19639  	int16(-581), int16(-157),
 19640  	int16(-696), int16(-1265),
 19641  	int16(394), int16(-479),
 19642  	int16(-23), int16(124),
 19643  	int16(-43), int16(19),
 19644  	int16(-113), int16(-236),
 19645  	int16(-412), int16(-659),
 19646  	int16(-200), int16(2),
 19647  	int16(-69), int16(-342),
 19648  	int16(199), int16(55),
 19649  	int16(58), int16(-36),
 19650  	int16(-51), int16(-62),
 19651  	int16(507), int16(507),
 19652  	int16(427), int16(442),
 19653  	int16(36), int16(601),
 19654  	int16(-141), int16(68),
 19655  	int16(274), int16(274),
 19656  	int16(68), int16(-12),
 19657  	int16(-4), int16(71),
 19658  	int16(-193), int16(-464),
 19659  	int16(-425), int16(-383),
 19660  	int16(408), int16(203),
 19661  	int16(-337), int16(236),
 19662  	int16(410), int16(-59),
 19663  	int16(-25), int16(-341),
 19664  	int16(-449), int16(28),
 19665  	int16(-9), int16(90),
 19666  	int16(332), int16(-14),
 19667  	int16(-905), int16(96),
 19668  	int16(-540), int16(-242),
 19669  	int16(679), int16(-59),
 19670  	int16(192), int16(-24),
 19671  	int16(60), int16(-217),
 19672  	int16(5), int16(-37),
 19673  	int16(179), int16(-20),
 19674  	int16(311), int16(519),
 19675  	int16(274), int16(72),
 19676  	int16(-326), int16(-1030),
 19677  	int16(-262), int16(213),
 19678  	int16(380), int16(82),
 19679  	int16(328), int16(411),
 19680  	int16(-540), int16(574),
 19681  	int16(-283), int16(151),
 19682  	int16(181), int16(-402),
 19683  	int16(-278), int16(-240),
 19684  	int16(-110), int16(-227),
 19685  	int16(-264), int16(-89),
 19686  	int16(-250), int16(-259),
 19687  	int16(-27), int16(106),
 19688  	int16(-239), int16(-98),
 19689  	int16(-390), int16(118),
 19690  	int16(61), int16(104),
 19691  	int16(294), int16(532),
 19692  	int16(92), int16(-13),
 19693  	int16(60), int16(-233),
 19694  	int16(335), int16(541),
 19695  	int16(307), int16(-26),
 19696  	int16(-110), int16(-91),
 19697  	int16(-231), int16(-460),
 19698  	int16(170), int16(201),
 19699  	int16(96), int16(-372),
 19700  	int16(132), int16(435),
 19701  	int16(-302), int16(216),
 19702  	int16(-279), int16(-41),
 19703  	int16(74), int16(190),
 19704  	int16(368), int16(273),
 19705  	int16(-186), int16(-608),
 19706  	int16(-157), int16(159),
 19707  	int16(12), int16(278),
 19708  	int16(245), int16(307),
 19709  	int16(25), int16(-187),
 19710  	int16(-16), int16(55),
 19711  	int16(30), int16(-163),
 19712  	int16(548), int16(-307),
 19713  	int16(106), int16(-5),
 19714  	int16(27), int16(330),
 19715  	int16(-416), int16(475),
 19716  	int16(438), int16(-235),
 19717  	int16(104), int16(137),
 19718  	int16(21), int16(-5),
 19719  	int16(-300), int16(-468),
 19720  	int16(521), int16(-347),
 19721  	int16(170), int16(-200),
 19722  	int16(-219), int16(308),
 19723  	int16(-122), int16(-133),
 19724  	int16(219), int16(-16),
 19725  	int16(359), int16(412),
 19726  	int16(-89), int16(-111),
 19727  	int16(48), int16(322),
 19728  	int16(142), int16(177),
 19729  	int16(-286), int16(-127),
 19730  	int16(-39), int16(-63),
 19731  	int16(-42), int16(-451),
 19732  	int16(160), int16(308),
 19733  	int16(-57), int16(193),
 19734  	int16(-48), int16(74),
 19735  	int16(-346), int16(59),
 19736  	int16(-27), int16(27),
 19737  	int16(-469), int16(-277),
 19738  	int16(-344), int16(282),
 19739  	int16(262), int16(122),
 19740  	int16(171), int16(-249),
 19741  	int16(27), int16(258),
 19742  	int16(188), int16(-3),
 19743  	int16(67), int16(-206),
 19744  	int16(-284), int16(291),
 19745  	int16(-117), int16(-88),
 19746  	int16(-477), int16(375),
 19747  	int16(50), int16(106),
 19748  	int16(99), int16(-182),
 19749  	int16(438), int16(-376),
 19750  	int16(-401), int16(-49),
 19751  	int16(119), int16(-23),
 19752  	int16(-10), int16(-48),
 19753  	int16(-116), int16(-200),
 19754  	int16(-310), int16(121),
 19755  	int16(73), int16(7),
 19756  	int16(237), int16(-226),
 19757  	int16(139), int16(-456),
 19758  	int16(397), int16(35),
 19759  	int16(3), int16(-108),
 19760  	int16(323), int16(-75),
 19761  	int16(332), int16(198),
 19762  	int16(-99), int16(-21),
 19763  } /* SKP_Silk_tables_NLSF_CB1_10.c:195:17 */
 19764  
 19765  var SKP_Silk_NLSF_CB1_10_Stage_info = [6]SKP_Silk_NLSF_CBS{
 19766  	{FnVectors: 32, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19767  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19768  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19769  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19770  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19771  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 19772  } /* SKP_Silk_tables_NLSF_CB1_10.c:559:25 */
 19773  
 19774  var SKP_Silk_NLSF_CB1_10 = SKP_Silk_NLSF_CB_struct{
 19775  	FnStages:       6,
 19776  	FCBStages:      0,
 19777  	FNDeltaMin_Q15: 0,
 19778  	FCDF:           0,
 19779  	FStartPtr:      0,
 19780  	FMiddleIx:      0,
 19781  } /* SKP_Silk_tables_NLSF_CB1_10.c:569:31 */
 19782  
 19783  var SKP_Silk_NLSF_MSVQ_CB1_16_CDF = [114]uint16{
 19784  	uint16(0),
 19785  	uint16(19099),
 19786  	uint16(26957),
 19787  	uint16(30639),
 19788  	uint16(34242),
 19789  	uint16(37546),
 19790  	uint16(40447),
 19791  	uint16(43287),
 19792  	uint16(46005),
 19793  	uint16(48445),
 19794  	uint16(49865),
 19795  	uint16(51284),
 19796  	uint16(52673),
 19797  	uint16(53975),
 19798  	uint16(55221),
 19799  	uint16(56441),
 19800  	uint16(57267),
 19801  	uint16(58025),
 19802  	uint16(58648),
 19803  	uint16(59232),
 19804  	uint16(59768),
 19805  	uint16(60248),
 19806  	uint16(60729),
 19807  	uint16(61210),
 19808  	uint16(61690),
 19809  	uint16(62171),
 19810  	uint16(62651),
 19811  	uint16(63132),
 19812  	uint16(63613),
 19813  	uint16(64093),
 19814  	uint16(64574),
 19815  	uint16(65054),
 19816  	uint16(65535),
 19817  	uint16(0),
 19818  	uint16(28808),
 19819  	uint16(38775),
 19820  	uint16(46801),
 19821  	uint16(51785),
 19822  	uint16(55886),
 19823  	uint16(59410),
 19824  	uint16(62572),
 19825  	uint16(65535),
 19826  	uint16(0),
 19827  	uint16(27376),
 19828  	uint16(38639),
 19829  	uint16(45052),
 19830  	uint16(51465),
 19831  	uint16(55448),
 19832  	uint16(59021),
 19833  	uint16(62594),
 19834  	uint16(65535),
 19835  	uint16(0),
 19836  	uint16(33403),
 19837  	uint16(39569),
 19838  	uint16(45102),
 19839  	uint16(49961),
 19840  	uint16(54047),
 19841  	uint16(57959),
 19842  	uint16(61788),
 19843  	uint16(65535),
 19844  	uint16(0),
 19845  	uint16(25851),
 19846  	uint16(43356),
 19847  	uint16(47828),
 19848  	uint16(52204),
 19849  	uint16(55964),
 19850  	uint16(59413),
 19851  	uint16(62507),
 19852  	uint16(65535),
 19853  	uint16(0),
 19854  	uint16(34277),
 19855  	uint16(40337),
 19856  	uint16(45432),
 19857  	uint16(50311),
 19858  	uint16(54326),
 19859  	uint16(58171),
 19860  	uint16(61853),
 19861  	uint16(65535),
 19862  	uint16(0),
 19863  	uint16(33538),
 19864  	uint16(39865),
 19865  	uint16(45302),
 19866  	uint16(50076),
 19867  	uint16(54549),
 19868  	uint16(58478),
 19869  	uint16(62159),
 19870  	uint16(65535),
 19871  	uint16(0),
 19872  	uint16(27445),
 19873  	uint16(35258),
 19874  	uint16(40665),
 19875  	uint16(46072),
 19876  	uint16(51362),
 19877  	uint16(56540),
 19878  	uint16(61086),
 19879  	uint16(65535),
 19880  	uint16(0),
 19881  	uint16(22080),
 19882  	uint16(30779),
 19883  	uint16(37065),
 19884  	uint16(43085),
 19885  	uint16(48849),
 19886  	uint16(54613),
 19887  	uint16(60133),
 19888  	uint16(65535),
 19889  	uint16(0),
 19890  	uint16(13417),
 19891  	uint16(21748),
 19892  	uint16(30078),
 19893  	uint16(38231),
 19894  	uint16(46383),
 19895  	uint16(53091),
 19896  	uint16(59515),
 19897  	uint16(65535),
 19898  } /* SKP_Silk_tables_NLSF_CB1_16.c:38:18 */
 19899  
 19900  var SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr = [10]uintptr{
 19901  	0,
 19902  	0,
 19903  	0,
 19904  	0,
 19905  	0,
 19906  	0,
 19907  	0,
 19908  	0,
 19909  	0,
 19910  	0,
 19911  } /* SKP_Silk_tables_NLSF_CB1_16.c:156:18 */
 19912  
 19913  var SKP_Silk_NLSF_MSVQ_CB1_16_CDF_middle_idx = [10]int32{
 19914  	5,
 19915  	2,
 19916  	2,
 19917  	2,
 19918  	2,
 19919  	2,
 19920  	2,
 19921  	3,
 19922  	3,
 19923  	4,
 19924  } /* SKP_Silk_tables_NLSF_CB1_16.c:170:15 */
 19925  
 19926  var SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5 = [104]int16{
 19927  	int16(57), int16(98),
 19928  	int16(133), int16(134),
 19929  	int16(138), int16(144),
 19930  	int16(145), int16(147),
 19931  	int16(152), int16(177),
 19932  	int16(177), int16(178),
 19933  	int16(181), int16(183),
 19934  	int16(184), int16(202),
 19935  	int16(206), int16(215),
 19936  	int16(218), int16(222),
 19937  	int16(227), int16(227),
 19938  	int16(227), int16(227),
 19939  	int16(227), int16(227),
 19940  	int16(227), int16(227),
 19941  	int16(227), int16(227),
 19942  	int16(227), int16(227),
 19943  	int16(38), int16(87),
 19944  	int16(97), int16(119),
 19945  	int16(128), int16(135),
 19946  	int16(140), int16(143),
 19947  	int16(40), int16(81),
 19948  	int16(107), int16(107),
 19949  	int16(129), int16(134),
 19950  	int16(134), int16(143),
 19951  	int16(31), int16(109),
 19952  	int16(114), int16(120),
 19953  	int16(128), int16(130),
 19954  	int16(131), int16(132),
 19955  	int16(43), int16(61),
 19956  	int16(124), int16(125),
 19957  	int16(132), int16(136),
 19958  	int16(141), int16(142),
 19959  	int16(30), int16(110),
 19960  	int16(118), int16(120),
 19961  	int16(129), int16(131),
 19962  	int16(133), int16(133),
 19963  	int16(31), int16(108),
 19964  	int16(115), int16(121),
 19965  	int16(124), int16(130),
 19966  	int16(133), int16(137),
 19967  	int16(40), int16(98),
 19968  	int16(115), int16(115),
 19969  	int16(116), int16(117),
 19970  	int16(123), int16(124),
 19971  	int16(50), int16(93),
 19972  	int16(108), int16(110),
 19973  	int16(112), int16(112),
 19974  	int16(114), int16(115),
 19975  	int16(73), int16(95),
 19976  	int16(95), int16(96),
 19977  	int16(96), int16(105),
 19978  	int16(107), int16(110),
 19979  } /* SKP_Silk_tables_NLSF_CB1_16.c:184:17 */
 19980  
 19981  var SKP_Silk_NLSF_MSVQ_CB1_16_ndelta_min_Q15 = [17]int32{
 19982  	148,
 19983  	3,
 19984  	60,
 19985  	68,
 19986  	117,
 19987  	86,
 19988  	121,
 19989  	124,
 19990  	152,
 19991  	153,
 19992  	207,
 19993  	151,
 19994  	225,
 19995  	239,
 19996  	126,
 19997  	183,
 19998  	792,
 19999  } /* SKP_Silk_tables_NLSF_CB1_16.c:240:15 */
 20000  
 20001  var SKP_Silk_NLSF_MSVQ_CB1_16_Q15 = [1664]int16{
 20002  	int16(1309), int16(3060), int16(5071), int16(6996),
 20003  	int16(9028), int16(10938), int16(12934), int16(14891),
 20004  	int16(16933), int16(18854), int16(20792), int16(22764),
 20005  	int16(24753), int16(26659), int16(28626), int16(30501),
 20006  	int16(1264), int16(2745), int16(4610), int16(6408),
 20007  	int16(8286), int16(10043), int16(12084), int16(14108),
 20008  	int16(16118), int16(18163), int16(20095), int16(22164),
 20009  	int16(24264), int16(26316), int16(28329), int16(30251),
 20010  	int16(1044), int16(2080), int16(3672), int16(5179),
 20011  	int16(7140), int16(9100), int16(11070), int16(13065),
 20012  	int16(15423), int16(17790), int16(19931), int16(22101),
 20013  	int16(24290), int16(26361), int16(28499), int16(30418),
 20014  	int16(1131), int16(2476), int16(4478), int16(6149),
 20015  	int16(7902), int16(9875), int16(11938), int16(13809),
 20016  	int16(15869), int16(17730), int16(19948), int16(21707),
 20017  	int16(23761), int16(25535), int16(27426), int16(28917),
 20018  	int16(1040), int16(2004), int16(4026), int16(6100),
 20019  	int16(8432), int16(10494), int16(12610), int16(14694),
 20020  	int16(16797), int16(18775), int16(20799), int16(22782),
 20021  	int16(24772), int16(26682), int16(28631), int16(30516),
 20022  	int16(2310), int16(3812), int16(5913), int16(7933),
 20023  	int16(10033), int16(11881), int16(13885), int16(15798),
 20024  	int16(17751), int16(19576), int16(21482), int16(23276),
 20025  	int16(25157), int16(27010), int16(28833), int16(30623),
 20026  	int16(1254), int16(2847), int16(5013), int16(6781),
 20027  	int16(8626), int16(10370), int16(12726), int16(14633),
 20028  	int16(16281), int16(17852), int16(19870), int16(21472),
 20029  	int16(23002), int16(24629), int16(26710), int16(27960),
 20030  	int16(1468), int16(3059), int16(4987), int16(7026),
 20031  	int16(8741), int16(10412), int16(12281), int16(14020),
 20032  	int16(15970), int16(17723), int16(19640), int16(21522),
 20033  	int16(23472), int16(25661), int16(27986), int16(30225),
 20034  	int16(2171), int16(3566), int16(5605), int16(7384),
 20035  	int16(9404), int16(11220), int16(13030), int16(14758),
 20036  	int16(16687), int16(18417), int16(20346), int16(22091),
 20037  	int16(24055), int16(26212), int16(28356), int16(30397),
 20038  	int16(2409), int16(4676), int16(7543), int16(9786),
 20039  	int16(11419), int16(12935), int16(14368), int16(15653),
 20040  	int16(17366), int16(18943), int16(20762), int16(22477),
 20041  	int16(24440), int16(26327), int16(28284), int16(30242),
 20042  	int16(2354), int16(4222), int16(6820), int16(9107),
 20043  	int16(11596), int16(13934), int16(15973), int16(17682),
 20044  	int16(19158), int16(20517), int16(21991), int16(23420),
 20045  	int16(25178), int16(26936), int16(28794), int16(30527),
 20046  	int16(1323), int16(2414), int16(4184), int16(6039),
 20047  	int16(7534), int16(9398), int16(11099), int16(13097),
 20048  	int16(14799), int16(16451), int16(18434), int16(20887),
 20049  	int16(23490), int16(25838), int16(28046), int16(30225),
 20050  	int16(1361), int16(3243), int16(6048), int16(8511),
 20051  	int16(11001), int16(13145), int16(15073), int16(16608),
 20052  	int16(18126), int16(19381), int16(20912), int16(22607),
 20053  	int16(24660), int16(26668), int16(28663), int16(30566),
 20054  	int16(1216), int16(2648), int16(5901), int16(8422),
 20055  	int16(10037), int16(11425), int16(12973), int16(14603),
 20056  	int16(16686), int16(18600), int16(20555), int16(22415),
 20057  	int16(24450), int16(26280), int16(28206), int16(30077),
 20058  	int16(2417), int16(4048), int16(6316), int16(8433),
 20059  	int16(10510), int16(12757), int16(15072), int16(17295),
 20060  	int16(19573), int16(21503), int16(23329), int16(24782),
 20061  	int16(26235), int16(27689), int16(29214), int16(30819),
 20062  	int16(1012), int16(2345), int16(4991), int16(7377),
 20063  	int16(9465), int16(11916), int16(14296), int16(16566),
 20064  	int16(18672), int16(20544), int16(22292), int16(23838),
 20065  	int16(25415), int16(27050), int16(28848), int16(30551),
 20066  	int16(1937), int16(3693), int16(6267), int16(8019),
 20067  	int16(10372), int16(12194), int16(14287), int16(15657),
 20068  	int16(17431), int16(18864), int16(20769), int16(22206),
 20069  	int16(24037), int16(25463), int16(27383), int16(28602),
 20070  	int16(1969), int16(3305), int16(5017), int16(6726),
 20071  	int16(8375), int16(9993), int16(11634), int16(13280),
 20072  	int16(15078), int16(16751), int16(18464), int16(20119),
 20073  	int16(21959), int16(23858), int16(26224), int16(29298),
 20074  	int16(1198), int16(2647), int16(5428), int16(7423),
 20075  	int16(9775), int16(12155), int16(14665), int16(16344),
 20076  	int16(18121), int16(19790), int16(21557), int16(22847),
 20077  	int16(24484), int16(25742), int16(27639), int16(28711),
 20078  	int16(1636), int16(3353), int16(5447), int16(7597),
 20079  	int16(9837), int16(11647), int16(13964), int16(16019),
 20080  	int16(17862), int16(20116), int16(22319), int16(24037),
 20081  	int16(25966), int16(28086), int16(29914), int16(31294),
 20082  	int16(2676), int16(4105), int16(6378), int16(8223),
 20083  	int16(10058), int16(11549), int16(13072), int16(14453),
 20084  	int16(15956), int16(17355), int16(18931), int16(20402),
 20085  	int16(22183), int16(23884), int16(25717), int16(27723),
 20086  	int16(1373), int16(2593), int16(4449), int16(5633),
 20087  	int16(7300), int16(8425), int16(9474), int16(10818),
 20088  	int16(12769), int16(15722), int16(19002), int16(21429),
 20089  	int16(23682), int16(25924), int16(28135), int16(30333),
 20090  	int16(1596), int16(3183), int16(5378), int16(7164),
 20091  	int16(8670), int16(10105), int16(11470), int16(12834),
 20092  	int16(13991), int16(15042), int16(16642), int16(17903),
 20093  	int16(20759), int16(25283), int16(27770), int16(30240),
 20094  	int16(2037), int16(3987), int16(6237), int16(8117),
 20095  	int16(9954), int16(12245), int16(14217), int16(15892),
 20096  	int16(17775), int16(20114), int16(22314), int16(25942),
 20097  	int16(26305), int16(26483), int16(26796), int16(28561),
 20098  	int16(2181), int16(3858), int16(5760), int16(7924),
 20099  	int16(10041), int16(11577), int16(13769), int16(15700),
 20100  	int16(17429), int16(19879), int16(23583), int16(24538),
 20101  	int16(25212), int16(25693), int16(28688), int16(30507),
 20102  	int16(1992), int16(3882), int16(6474), int16(7883),
 20103  	int16(9381), int16(12672), int16(14340), int16(15701),
 20104  	int16(16658), int16(17832), int16(20850), int16(22885),
 20105  	int16(24677), int16(26457), int16(28491), int16(30460),
 20106  	int16(2391), int16(3988), int16(5448), int16(7432),
 20107  	int16(11014), int16(12579), int16(13140), int16(14146),
 20108  	int16(15898), int16(18592), int16(21104), int16(22993),
 20109  	int16(24673), int16(27186), int16(28142), int16(29612),
 20110  	int16(1713), int16(5102), int16(6989), int16(7798),
 20111  	int16(8670), int16(10110), int16(12746), int16(14881),
 20112  	int16(16709), int16(18407), int16(20126), int16(22107),
 20113  	int16(24181), int16(26198), int16(28237), int16(30137),
 20114  	int16(1612), int16(3617), int16(6148), int16(8359),
 20115  	int16(9576), int16(11528), int16(14936), int16(17809),
 20116  	int16(18287), int16(18729), int16(19001), int16(21111),
 20117  	int16(24631), int16(26596), int16(28740), int16(30643),
 20118  	int16(2266), int16(4168), int16(7862), int16(9546),
 20119  	int16(9618), int16(9703), int16(10134), int16(13897),
 20120  	int16(16265), int16(18432), int16(20587), int16(22605),
 20121  	int16(24754), int16(26994), int16(29125), int16(30840),
 20122  	int16(1840), int16(3917), int16(6272), int16(7809),
 20123  	int16(9714), int16(11438), int16(13767), int16(15799),
 20124  	int16(19244), int16(21972), int16(22980), int16(23180),
 20125  	int16(23723), int16(25650), int16(29117), int16(31085),
 20126  	int16(1458), int16(3612), int16(6008), int16(7488),
 20127  	int16(9827), int16(11893), int16(14086), int16(15734),
 20128  	int16(17440), int16(19535), int16(22424), int16(24767),
 20129  	int16(29246), int16(29928), int16(30516), int16(30947),
 20130  	int16(-102), int16(-121), int16(-31), int16(-6),
 20131  	int16(5), int16(-2), int16(8), int16(-18),
 20132  	int16(-4), int16(6), int16(14), int16(-2),
 20133  	int16(-12), int16(-16), int16(-12), int16(-60),
 20134  	int16(-126), int16(-353), int16(-574), int16(-677),
 20135  	int16(-657), int16(-617), int16(-498), int16(-393),
 20136  	int16(-348), int16(-277), int16(-225), int16(-164),
 20137  	int16(-102), int16(-70), int16(-31), int16(33),
 20138  	int16(4), int16(379), int16(387), int16(551),
 20139  	int16(605), int16(620), int16(532), int16(482),
 20140  	int16(442), int16(454), int16(385), int16(347),
 20141  	int16(322), int16(299), int16(266), int16(200),
 20142  	int16(1168), int16(951), int16(672), int16(246),
 20143  	int16(60), int16(-161), int16(-259), int16(-234),
 20144  	int16(-253), int16(-282), int16(-203), int16(-187),
 20145  	int16(-155), int16(-176), int16(-198), int16(-178),
 20146  	int16(10), int16(170), int16(393), int16(609),
 20147  	int16(555), int16(208), int16(-330), int16(-571),
 20148  	int16(-769), int16(-633), int16(-319), int16(-43),
 20149  	int16(95), int16(105), int16(106), int16(116),
 20150  	int16(-152), int16(-140), int16(-125), int16(5),
 20151  	int16(173), int16(274), int16(264), int16(331),
 20152  	int16(-37), int16(-293), int16(-609), int16(-786),
 20153  	int16(-959), int16(-814), int16(-645), int16(-238),
 20154  	int16(-91), int16(36), int16(-11), int16(-101),
 20155  	int16(-279), int16(-227), int16(-40), int16(90),
 20156  	int16(530), int16(677), int16(890), int16(1104),
 20157  	int16(999), int16(835), int16(564), int16(295),
 20158  	int16(-280), int16(-364), int16(-340), int16(-331),
 20159  	int16(-284), int16(288), int16(761), int16(880),
 20160  	int16(988), int16(627), int16(146), int16(-226),
 20161  	int16(-203), int16(-181), int16(-142), int16(39),
 20162  	int16(24), int16(-26), int16(-107), int16(-92),
 20163  	int16(-161), int16(-135), int16(-131), int16(-88),
 20164  	int16(-160), int16(-156), int16(-75), int16(-43),
 20165  	int16(-36), int16(-6), int16(-33), int16(33),
 20166  	int16(-324), int16(-415), int16(-108), int16(124),
 20167  	int16(157), int16(191), int16(203), int16(197),
 20168  	int16(144), int16(109), int16(152), int16(176),
 20169  	int16(190), int16(122), int16(101), int16(159),
 20170  	int16(663), int16(668), int16(480), int16(400),
 20171  	int16(379), int16(444), int16(446), int16(458),
 20172  	int16(343), int16(351), int16(310), int16(228),
 20173  	int16(133), int16(44), int16(75), int16(63),
 20174  	int16(-84), int16(39), int16(-29), int16(35),
 20175  	int16(-94), int16(-233), int16(-261), int16(-354),
 20176  	int16(77), int16(262), int16(-24), int16(-145),
 20177  	int16(-333), int16(-409), int16(-404), int16(-597),
 20178  	int16(-488), int16(-300), int16(910), int16(592),
 20179  	int16(412), int16(120), int16(130), int16(-51),
 20180  	int16(-37), int16(-77), int16(-172), int16(-181),
 20181  	int16(-159), int16(-148), int16(-72), int16(-62),
 20182  	int16(510), int16(516), int16(113), int16(-585),
 20183  	int16(-1075), int16(-957), int16(-417), int16(-195),
 20184  	int16(9), int16(7), int16(-88), int16(-173),
 20185  	int16(-91), int16(54), int16(98), int16(95),
 20186  	int16(-28), int16(197), int16(-527), int16(-621),
 20187  	int16(157), int16(122), int16(-168), int16(147),
 20188  	int16(309), int16(300), int16(336), int16(315),
 20189  	int16(396), int16(408), int16(376), int16(106),
 20190  	int16(-162), int16(-170), int16(-315), int16(98),
 20191  	int16(821), int16(908), int16(570), int16(-33),
 20192  	int16(-312), int16(-568), int16(-572), int16(-378),
 20193  	int16(-107), int16(23), int16(156), int16(93),
 20194  	int16(-129), int16(-87), int16(20), int16(-72),
 20195  	int16(-37), int16(40), int16(21), int16(27),
 20196  	int16(48), int16(75), int16(77), int16(65),
 20197  	int16(46), int16(71), int16(66), int16(47),
 20198  	int16(136), int16(344), int16(236), int16(322),
 20199  	int16(170), int16(283), int16(269), int16(291),
 20200  	int16(162), int16(-43), int16(-204), int16(-259),
 20201  	int16(-240), int16(-305), int16(-350), int16(-312),
 20202  	int16(447), int16(348), int16(345), int16(257),
 20203  	int16(71), int16(-131), int16(-77), int16(-190),
 20204  	int16(-202), int16(-40), int16(35), int16(133),
 20205  	int16(261), int16(365), int16(438), int16(303),
 20206  	int16(-8), int16(22), int16(140), int16(137),
 20207  	int16(-300), int16(-641), int16(-764), int16(-268),
 20208  	int16(-23), int16(-25), int16(73), int16(-162),
 20209  	int16(-150), int16(-212), int16(-72), int16(6),
 20210  	int16(39), int16(78), int16(104), int16(-93),
 20211  	int16(-308), int16(-136), int16(117), int16(-71),
 20212  	int16(-513), int16(-820), int16(-700), int16(-450),
 20213  	int16(-161), int16(-23), int16(29), int16(78),
 20214  	int16(337), int16(106), int16(-406), int16(-782),
 20215  	int16(-112), int16(233), int16(383), int16(62),
 20216  	int16(-126), int16(6), int16(-77), int16(-29),
 20217  	int16(-146), int16(-123), int16(-51), int16(-27),
 20218  	int16(-27), int16(-381), int16(-641), int16(402),
 20219  	int16(539), int16(8), int16(-207), int16(-366),
 20220  	int16(-36), int16(-27), int16(-204), int16(-227),
 20221  	int16(-237), int16(-189), int16(-64), int16(51),
 20222  	int16(-92), int16(-137), int16(-281), int16(62),
 20223  	int16(233), int16(92), int16(148), int16(294),
 20224  	int16(363), int16(416), int16(564), int16(625),
 20225  	int16(370), int16(-36), int16(-469), int16(-462),
 20226  	int16(102), int16(168), int16(32), int16(117),
 20227  	int16(-21), int16(97), int16(139), int16(89),
 20228  	int16(104), int16(35), int16(4), int16(82),
 20229  	int16(66), int16(58), int16(73), int16(93),
 20230  	int16(-76), int16(-320), int16(-236), int16(-189),
 20231  	int16(-203), int16(-142), int16(-27), int16(-73),
 20232  	int16(9), int16(-9), int16(-25), int16(12),
 20233  	int16(-15), int16(4), int16(4), int16(-50),
 20234  	int16(314), int16(180), int16(162), int16(-49),
 20235  	int16(199), int16(-108), int16(-227), int16(-66),
 20236  	int16(-447), int16(-67), int16(-264), int16(-394),
 20237  	int16(5), int16(55), int16(-133), int16(-176),
 20238  	int16(-116), int16(-241), int16(272), int16(109),
 20239  	int16(282), int16(262), int16(192), int16(-64),
 20240  	int16(-392), int16(-514), int16(156), int16(203),
 20241  	int16(154), int16(72), int16(-34), int16(-160),
 20242  	int16(-73), int16(3), int16(-33), int16(-431),
 20243  	int16(321), int16(18), int16(-567), int16(-590),
 20244  	int16(-108), int16(88), int16(66), int16(51),
 20245  	int16(-31), int16(-193), int16(-46), int16(65),
 20246  	int16(-29), int16(-23), int16(215), int16(-31),
 20247  	int16(101), int16(-113), int16(32), int16(304),
 20248  	int16(88), int16(320), int16(448), int16(5),
 20249  	int16(-439), int16(-562), int16(-508), int16(-135),
 20250  	int16(-13), int16(-171), int16(-8), int16(182),
 20251  	int16(-99), int16(-181), int16(-149), int16(376),
 20252  	int16(476), int16(64), int16(-396), int16(-652),
 20253  	int16(-150), int16(176), int16(222), int16(65),
 20254  	int16(-590), int16(719), int16(271), int16(399),
 20255  	int16(245), int16(72), int16(-156), int16(-152),
 20256  	int16(-176), int16(59), int16(94), int16(125),
 20257  	int16(-9), int16(-7), int16(9), int16(1),
 20258  	int16(-61), int16(-116), int16(-82), int16(1),
 20259  	int16(79), int16(22), int16(-44), int16(-15),
 20260  	int16(-48), int16(-65), int16(-62), int16(-101),
 20261  	int16(-102), int16(-54), int16(-70), int16(-78),
 20262  	int16(-80), int16(-25), int16(398), int16(71),
 20263  	int16(139), int16(38), int16(90), int16(194),
 20264  	int16(222), int16(249), int16(165), int16(94),
 20265  	int16(221), int16(262), int16(163), int16(91),
 20266  	int16(-206), int16(573), int16(200), int16(-287),
 20267  	int16(-147), int16(5), int16(-18), int16(-85),
 20268  	int16(-74), int16(-125), int16(-87), int16(85),
 20269  	int16(141), int16(4), int16(-4), int16(28),
 20270  	int16(234), int16(48), int16(-150), int16(-111),
 20271  	int16(-506), int16(237), int16(-209), int16(345),
 20272  	int16(94), int16(-124), int16(77), int16(121),
 20273  	int16(143), int16(12), int16(-80), int16(-48),
 20274  	int16(191), int16(144), int16(-93), int16(-65),
 20275  	int16(-151), int16(-643), int16(435), int16(106),
 20276  	int16(87), int16(7), int16(65), int16(102),
 20277  	int16(94), int16(68), int16(5), int16(99),
 20278  	int16(222), int16(93), int16(94), int16(355),
 20279  	int16(-13), int16(-89), int16(-228), int16(-503),
 20280  	int16(287), int16(109), int16(108), int16(449),
 20281  	int16(253), int16(-29), int16(-109), int16(-116),
 20282  	int16(15), int16(-73), int16(-20), int16(131),
 20283  	int16(-147), int16(72), int16(59), int16(-150),
 20284  	int16(-594), int16(273), int16(316), int16(132),
 20285  	int16(199), int16(106), int16(198), int16(212),
 20286  	int16(220), int16(82), int16(45), int16(-13),
 20287  	int16(223), int16(137), int16(270), int16(38),
 20288  	int16(252), int16(135), int16(-177), int16(-207),
 20289  	int16(-360), int16(-102), int16(403), int16(406),
 20290  	int16(-14), int16(83), int16(64), int16(51),
 20291  	int16(-7), int16(-99), int16(-97), int16(-88),
 20292  	int16(-124), int16(-65), int16(42), int16(32),
 20293  	int16(28), int16(29), int16(12), int16(20),
 20294  	int16(119), int16(-26), int16(-212), int16(-201),
 20295  	int16(373), int16(251), int16(141), int16(103),
 20296  	int16(36), int16(-52), int16(66), int16(18),
 20297  	int16(-6), int16(-95), int16(-196), int16(5),
 20298  	int16(98), int16(-85), int16(-108), int16(218),
 20299  	int16(-164), int16(20), int16(356), int16(172),
 20300  	int16(37), int16(266), int16(23), int16(112),
 20301  	int16(-24), int16(-99), int16(-92), int16(-178),
 20302  	int16(29), int16(-278), int16(388), int16(-60),
 20303  	int16(-220), int16(300), int16(-13), int16(154),
 20304  	int16(191), int16(15), int16(-37), int16(-110),
 20305  	int16(-153), int16(-150), int16(-114), int16(-7),
 20306  	int16(-94), int16(-31), int16(-62), int16(-177),
 20307  	int16(4), int16(-70), int16(35), int16(453),
 20308  	int16(147), int16(-247), int16(-328), int16(101),
 20309  	int16(20), int16(-114), int16(147), int16(108),
 20310  	int16(-119), int16(-109), int16(-102), int16(-238),
 20311  	int16(55), int16(-102), int16(173), int16(-89),
 20312  	int16(129), int16(138), int16(-330), int16(-160),
 20313  	int16(485), int16(154), int16(-59), int16(-170),
 20314  	int16(-20), int16(-34), int16(-261), int16(-40),
 20315  	int16(-129), int16(77), int16(-84), int16(69),
 20316  	int16(83), int16(160), int16(169), int16(63),
 20317  	int16(-516), int16(30), int16(336), int16(52),
 20318  	int16(-0), int16(-52), int16(-124), int16(158),
 20319  	int16(19), int16(197), int16(-10), int16(-375),
 20320  	int16(405), int16(285), int16(114), int16(-395),
 20321  	int16(-47), int16(196), int16(62), int16(87),
 20322  	int16(-106), int16(-65), int16(-75), int16(-69),
 20323  	int16(-13), int16(34), int16(99), int16(59),
 20324  	int16(83), int16(98), int16(44), int16(0),
 20325  	int16(24), int16(18), int16(17), int16(70),
 20326  	int16(-22), int16(194), int16(208), int16(144),
 20327  	int16(-79), int16(-15), int16(32), int16(-104),
 20328  	int16(-28), int16(-105), int16(-186), int16(-212),
 20329  	int16(-228), int16(-79), int16(-76), int16(51),
 20330  	int16(-71), int16(72), int16(118), int16(-34),
 20331  	int16(-3), int16(-171), int16(5), int16(2),
 20332  	int16(-108), int16(-125), int16(62), int16(-58),
 20333  	int16(58), int16(-121), int16(73), int16(-466),
 20334  	int16(92), int16(63), int16(-94), int16(-78),
 20335  	int16(-76), int16(212), int16(36), int16(-225),
 20336  	int16(-71), int16(-354), int16(152), int16(143),
 20337  	int16(-79), int16(-246), int16(-51), int16(-31),
 20338  	int16(-6), int16(-270), int16(240), int16(210),
 20339  	int16(30), int16(-157), int16(-231), int16(74),
 20340  	int16(-146), int16(88), int16(-273), int16(156),
 20341  	int16(92), int16(56), int16(71), int16(2),
 20342  	int16(318), int16(164), int16(32), int16(-110),
 20343  	int16(-35), int16(-41), int16(-95), int16(-106),
 20344  	int16(11), int16(132), int16(-68), int16(55),
 20345  	int16(123), int16(-83), int16(-149), int16(212),
 20346  	int16(132), int16(0), int16(-194), int16(55),
 20347  	int16(206), int16(-108), int16(-353), int16(289),
 20348  	int16(-195), int16(1), int16(233), int16(-22),
 20349  	int16(-60), int16(20), int16(26), int16(68),
 20350  	int16(166), int16(27), int16(-58), int16(130),
 20351  	int16(112), int16(107), int16(27), int16(-165),
 20352  	int16(115), int16(-93), int16(-37), int16(38),
 20353  	int16(83), int16(483), int16(65), int16(-229),
 20354  	int16(-13), int16(157), int16(85), int16(50),
 20355  	int16(136), int16(10), int16(32), int16(83),
 20356  	int16(82), int16(55), int16(5), int16(-9),
 20357  	int16(-52), int16(-78), int16(-81), int16(-51),
 20358  	int16(40), int16(18), int16(-127), int16(-224),
 20359  	int16(-41), int16(53), int16(-210), int16(-113),
 20360  	int16(24), int16(-17), int16(-187), int16(-89),
 20361  	int16(8), int16(121), int16(83), int16(77),
 20362  	int16(91), int16(-74), int16(-35), int16(-112),
 20363  	int16(-161), int16(-173), int16(102), int16(132),
 20364  	int16(-125), int16(-61), int16(103), int16(-260),
 20365  	int16(52), int16(166), int16(-32), int16(-156),
 20366  	int16(-87), int16(-56), int16(60), int16(-70),
 20367  	int16(-124), int16(242), int16(114), int16(-251),
 20368  	int16(-166), int16(201), int16(127), int16(28),
 20369  	int16(-11), int16(23), int16(-80), int16(-115),
 20370  	int16(-20), int16(-51), int16(-348), int16(340),
 20371  	int16(-34), int16(133), int16(13), int16(92),
 20372  	int16(-124), int16(-136), int16(-120), int16(-26),
 20373  	int16(-6), int16(17), int16(28), int16(21),
 20374  	int16(120), int16(-168), int16(160), int16(-35),
 20375  	int16(115), int16(28), int16(9), int16(7),
 20376  	int16(-56), int16(39), int16(156), int16(256),
 20377  	int16(-18), int16(1), int16(277), int16(82),
 20378  	int16(-70), int16(-144), int16(-88), int16(-13),
 20379  	int16(-59), int16(-157), int16(8), int16(-134),
 20380  	int16(21), int16(-40), int16(58), int16(-21),
 20381  	int16(194), int16(-276), int16(97), int16(279),
 20382  	int16(-56), int16(-140), int16(125), int16(57),
 20383  	int16(-184), int16(-204), int16(-70), int16(-2),
 20384  	int16(128), int16(-202), int16(-78), int16(230),
 20385  	int16(-23), int16(161), int16(-102), int16(1),
 20386  	int16(1), int16(180), int16(-31), int16(-86),
 20387  	int16(-167), int16(-57), int16(-60), int16(27),
 20388  	int16(-13), int16(99), int16(108), int16(111),
 20389  	int16(76), int16(69), int16(34), int16(-21),
 20390  	int16(53), int16(38), int16(34), int16(78),
 20391  	int16(73), int16(219), int16(51), int16(15),
 20392  	int16(-72), int16(-103), int16(-207), int16(30),
 20393  	int16(213), int16(-14), int16(31), int16(-94),
 20394  	int16(-40), int16(-144), int16(67), int16(4),
 20395  	int16(105), int16(59), int16(-240), int16(25),
 20396  	int16(244), int16(69), int16(58), int16(23),
 20397  	int16(-24), int16(-5), int16(-15), int16(-133),
 20398  	int16(-71), int16(-67), int16(181), int16(29),
 20399  	int16(-45), int16(121), int16(96), int16(51),
 20400  	int16(-72), int16(-53), int16(56), int16(-153),
 20401  	int16(-27), int16(85), int16(183), int16(211),
 20402  	int16(105), int16(-34), int16(-46), int16(43),
 20403  	int16(-72), int16(-93), int16(36), int16(-128),
 20404  	int16(29), int16(111), int16(-95), int16(-156),
 20405  	int16(-179), int16(-235), int16(21), int16(-39),
 20406  	int16(-71), int16(-33), int16(-61), int16(-252),
 20407  	int16(230), int16(-131), int16(157), int16(-21),
 20408  	int16(-85), int16(-28), int16(-123), int16(80),
 20409  	int16(-160), int16(63), int16(47), int16(-6),
 20410  	int16(-49), int16(-96), int16(-19), int16(17),
 20411  	int16(-58), int16(17), int16(-0), int16(-13),
 20412  	int16(-170), int16(25), int16(-35), int16(59),
 20413  	int16(10), int16(-31), int16(-413), int16(81),
 20414  	int16(62), int16(18), int16(-164), int16(245),
 20415  	int16(92), int16(-165), int16(42), int16(26),
 20416  	int16(126), int16(-248), int16(193), int16(-55),
 20417  	int16(16), int16(39), int16(14), int16(50),
 20418  } /* SKP_Silk_tables_NLSF_CB1_16.c:261:17 */
 20419  
 20420  var SKP_Silk_NLSF_CB1_16_Stage_info = [10]SKP_Silk_NLSF_CBS{
 20421  	{FnVectors: 32, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20422  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20423  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20424  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20425  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20426  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20427  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20428  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20429  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20430  	{FnVectors: 8, FCB_NLSF_Q15: 0, FRates_Q5: 0},
 20431  } /* SKP_Silk_tables_NLSF_CB1_16.c:681:25 */
 20432  
 20433  var SKP_Silk_NLSF_CB1_16 = SKP_Silk_NLSF_CB_struct{
 20434  	FnStages:       10,
 20435  	FCBStages:      0,
 20436  	FNDeltaMin_Q15: 0,
 20437  	FCDF:           0,
 20438  	FStartPtr:      0,
 20439  	FMiddleIx:      0,
 20440  } /* SKP_Silk_tables_NLSF_CB1_16.c:695:31 */
 20441  
 20442  /* Piece-wise linear mapping from bitrate in kbps to coding quality in dB SNR */
 20443  var TargetRate_table_NB = [8]int32{
 20444  	0, 8000, 9000, 11000, 13000, 16000, 22000, 100000,
 20445  } /* SKP_Silk_tables_other.c:37:17 */
 20446  var TargetRate_table_MB = [8]int32{
 20447  	0, 10000, 12000, 14000, 17000, 21000, 28000, 100000,
 20448  } /* SKP_Silk_tables_other.c:40:17 */
 20449  var TargetRate_table_WB = [8]int32{
 20450  	0, 11000, 14000, 17000, 21000, 26000, 36000, 100000,
 20451  } /* SKP_Silk_tables_other.c:43:17 */
 20452  var TargetRate_table_SWB = [8]int32{
 20453  	0, 13000, 16000, 19000, 25000, 32000, 46000, 100000,
 20454  } /* SKP_Silk_tables_other.c:46:17 */
 20455  var SNR_table_Q1 = [8]int32{
 20456  	19, 31, 35, 39, 43, 47, 54, 64,
 20457  } /* SKP_Silk_tables_other.c:49:17 */
 20458  
 20459  var SNR_table_one_bit_per_sample_Q7 = [4]int32{
 20460  	1984, 2240, 2408, 2708,
 20461  } /* SKP_Silk_tables_other.c:53:17 */
 20462  
 20463  /* Filter coeficicnts for HP filter: 4. Order filter implementad as two biquad filters  */
 20464  var SKP_Silk_SWB_detect_B_HP_Q13 = [3][3]int16{
 20465  	//{400, -550, 400}, {400, 130, 400}, {400, 390, 400}
 20466  	{int16(575), int16(-948), int16(575)}, {int16(575), int16(-221), int16(575)}, {int16(575), int16(104), int16(575)},
 20467  } /* SKP_Silk_tables_other.c:58:17 */
 20468  var SKP_Silk_SWB_detect_A_HP_Q13 = [3][2]int16{
 20469  	{int16(14613), int16(6868)}, {int16(12883), int16(7337)}, {int16(11586), int16(7911)},
 20470  	//{14880, 6900}, {14400, 7300}, {13700, 7800}
 20471  } /* SKP_Silk_tables_other.c:62:17 */
 20472  
 20473  /* Decoder high-pass filter coefficients for 24 kHz sampling, -6 dB @ 44 Hz */
 20474  var SKP_Silk_Dec_A_HP_24 = [2]int16{int16(-16220), int16(8030)}              /* SKP_Silk_tables_other.c:68:17 */ // second order AR coefs, Q13
 20475  var SKP_Silk_Dec_B_HP_24 = [3]int16{int16(8000), int16(-16000), int16(8000)} /* SKP_Silk_tables_other.c:69:17 */ // second order MA coefs, Q13
 20476  
 20477  /* Decoder high-pass filter coefficients for 16 kHz sampling, - 6 dB @ 46 Hz */
 20478  var SKP_Silk_Dec_A_HP_16 = [2]int16{int16(-16127), int16(7940)}              /* SKP_Silk_tables_other.c:72:17 */ // second order AR coefs, Q13
 20479  var SKP_Silk_Dec_B_HP_16 = [3]int16{int16(8000), int16(-16000), int16(8000)} /* SKP_Silk_tables_other.c:73:17 */ // second order MA coefs, Q13
 20480  
 20481  /* Decoder high-pass filter coefficients for 12 kHz sampling, -6 dB @ 44 Hz */
 20482  var SKP_Silk_Dec_A_HP_12 = [2]int16{int16(-16043), int16(7859)}              /* SKP_Silk_tables_other.c:76:17 */ // second order AR coefs, Q13
 20483  var SKP_Silk_Dec_B_HP_12 = [3]int16{int16(8000), int16(-16000), int16(8000)} /* SKP_Silk_tables_other.c:77:17 */ // second order MA coefs, Q13
 20484  
 20485  /* Decoder high-pass filter coefficients for 8 kHz sampling, -6 dB @ 43 Hz */
 20486  var SKP_Silk_Dec_A_HP_8 = [2]int16{int16(-15885), int16(7710)}              /* SKP_Silk_tables_other.c:80:17 */ // second order AR coefs, Q13
 20487  var SKP_Silk_Dec_B_HP_8 = [3]int16{int16(8000), int16(-16000), int16(8000)} /* SKP_Silk_tables_other.c:81:17 */ // second order MA coefs, Q13
 20488  
 20489  /* table for LSB coding */
 20490  var SKP_Silk_lsb_CDF = [3]uint16{uint16(0), uint16(40000), uint16(65535)} /* SKP_Silk_tables_other.c:84:18 */
 20491  
 20492  /* tables for LTPScale */
 20493  var SKP_Silk_LTPscale_CDF = [4]uint16{uint16(0), uint16(32000), uint16(48000), uint16(65535)} /* SKP_Silk_tables_other.c:87:18 */
 20494  var SKP_Silk_LTPscale_offset int32 = 2                                                        /* SKP_Silk_tables_other.c:88:18 */
 20495  
 20496  /* tables for VAD flag */
 20497  var SKP_Silk_vadflag_CDF = [3]uint16{uint16(0), uint16(22000), uint16(65535)} /* SKP_Silk_tables_other.c:91:18 */ // 66% for speech, 33% for no speech
 20498  var SKP_Silk_vadflag_offset int32 = 1                                         /* SKP_Silk_tables_other.c:92:18 */
 20499  
 20500  /* tables for sampling rate */
 20501  var SKP_Silk_SamplingRates_table = [4]int32{8, 12, 16, 24}                                                        /* SKP_Silk_tables_other.c:95:18 */
 20502  var SKP_Silk_SamplingRates_CDF = [5]uint16{uint16(0), uint16(16000), uint16(32000), uint16(48000), uint16(65535)} /* SKP_Silk_tables_other.c:96:18 */
 20503  var SKP_Silk_SamplingRates_offset int32 = 2                                                                       /* SKP_Silk_tables_other.c:97:18 */
 20504  
 20505  /* tables for NLSF interpolation factor */
 20506  var SKP_Silk_NLSF_interpolation_factor_CDF = [6]uint16{uint16(0), uint16(3706), uint16(8703), uint16(19226), uint16(30926), uint16(65535)} /* SKP_Silk_tables_other.c:100:18 */
 20507  var SKP_Silk_NLSF_interpolation_factor_offset int32 = 4                                                                                    /* SKP_Silk_tables_other.c:101:18 */
 20508  
 20509  /* Table for frame termination indication */
 20510  var SKP_Silk_FrameTermination_CDF = [5]uint16{uint16(0), uint16(20000), uint16(45000), uint16(56000), uint16(65535)} /* SKP_Silk_tables_other.c:104:18 */
 20511  var SKP_Silk_FrameTermination_offset int32 = 2                                                                       /* SKP_Silk_tables_other.c:105:18 */
 20512  
 20513  /* Table for random seed */
 20514  var SKP_Silk_Seed_CDF = [5]uint16{uint16(0), uint16(16384), uint16(32768), uint16(49152), uint16(65535)} /* SKP_Silk_tables_other.c:108:18 */
 20515  var SKP_Silk_Seed_offset int32 = 2                                                                       /* SKP_Silk_tables_other.c:109:18 */
 20516  
 20517  /* Quantization offsets */
 20518  var SKP_Silk_Quantization_Offsets_Q10 = [2][2]int16{
 20519  	{int16(32), int16(100)}, {int16(100), int16(256)},
 20520  } /* SKP_Silk_tables_other.c:112:18 */
 20521  
 20522  /* Table for LTPScale */
 20523  var SKP_Silk_LTPScales_table_Q14 = [3]int16{int16(15565), int16(11469), int16(8192)} /* SKP_Silk_tables_other.c:117:17 */
 20524  
 20525  /*  Elliptic/Cauer filters designed with 0.1 dB passband ripple,
 20526      80 dB minimum stopband attenuation, and
 20527      [0.95 : 0.15 : 0.35] normalized cut off frequencies. */
 20528  
 20529  /* Interpolation points for filter coefficients used in the bandwidth transition smoother */
 20530  var SKP_Silk_Transition_LP_B_Q28 = [5][3]int32{
 20531  	{250767114, 501534038, 250767114},
 20532  	{209867381, 419732057, 209867381},
 20533  	{170987846, 341967853, 170987846},
 20534  	{131531482, 263046905, 131531482},
 20535  	{89306658, 178584282, 89306658},
 20536  } /* SKP_Silk_tables_other.c:125:17 */
 20537  
 20538  /* Interpolation points for filter coefficients used in the bandwidth transition smoother */
 20539  var SKP_Silk_Transition_LP_A_Q28 = [5][2]int32{
 20540  	{506393414, 239854379},
 20541  	{411067935, 169683996},
 20542  	{306733530, 116694253},
 20543  	{185807084, 77959395},
 20544  	{35497197, 57401098},
 20545  } /* SKP_Silk_tables_other.c:135:17 */
 20546  
 20547  var SKP_Silk_pitch_lag_NB_CDF = [130]uint16{
 20548  	uint16(0), uint16(194), uint16(395), uint16(608), uint16(841), uint16(1099), uint16(1391), uint16(1724),
 20549  	uint16(2105), uint16(2544), uint16(3047), uint16(3624), uint16(4282), uint16(5027), uint16(5865), uint16(6799),
 20550  	uint16(7833), uint16(8965), uint16(10193), uint16(11510), uint16(12910), uint16(14379), uint16(15905), uint16(17473),
 20551  	uint16(19065), uint16(20664), uint16(22252), uint16(23814), uint16(25335), uint16(26802), uint16(28206), uint16(29541),
 20552  	uint16(30803), uint16(31992), uint16(33110), uint16(34163), uint16(35156), uint16(36098), uint16(36997), uint16(37861),
 20553  	uint16(38698), uint16(39515), uint16(40319), uint16(41115), uint16(41906), uint16(42696), uint16(43485), uint16(44273),
 20554  	uint16(45061), uint16(45847), uint16(46630), uint16(47406), uint16(48175), uint16(48933), uint16(49679), uint16(50411),
 20555  	uint16(51126), uint16(51824), uint16(52502), uint16(53161), uint16(53799), uint16(54416), uint16(55011), uint16(55584),
 20556  	uint16(56136), uint16(56666), uint16(57174), uint16(57661), uint16(58126), uint16(58570), uint16(58993), uint16(59394),
 20557  	uint16(59775), uint16(60134), uint16(60472), uint16(60790), uint16(61087), uint16(61363), uint16(61620), uint16(61856),
 20558  	uint16(62075), uint16(62275), uint16(62458), uint16(62625), uint16(62778), uint16(62918), uint16(63045), uint16(63162),
 20559  	uint16(63269), uint16(63368), uint16(63459), uint16(63544), uint16(63623), uint16(63698), uint16(63769), uint16(63836),
 20560  	uint16(63901), uint16(63963), uint16(64023), uint16(64081), uint16(64138), uint16(64194), uint16(64248), uint16(64301),
 20561  	uint16(64354), uint16(64406), uint16(64457), uint16(64508), uint16(64558), uint16(64608), uint16(64657), uint16(64706),
 20562  	uint16(64754), uint16(64803), uint16(64851), uint16(64899), uint16(64946), uint16(64994), uint16(65041), uint16(65088),
 20563  	uint16(65135), uint16(65181), uint16(65227), uint16(65272), uint16(65317), uint16(65361), uint16(65405), uint16(65449),
 20564  	uint16(65492), uint16(65535),
 20565  } /* SKP_Silk_tables_pitch_lag.c:30:18 */
 20566  
 20567  var SKP_Silk_pitch_lag_NB_CDF_offset int32 = 43 /* SKP_Silk_tables_pitch_lag.c:50:15 */
 20568  
 20569  var SKP_Silk_pitch_contour_NB_CDF = [12]uint16{
 20570  	uint16(0), uint16(14445), uint16(18587), uint16(25628), uint16(30013), uint16(34859), uint16(40597), uint16(48426),
 20571  	uint16(54460), uint16(59033), uint16(62990), uint16(65535),
 20572  } /* SKP_Silk_tables_pitch_lag.c:52:18 */
 20573  
 20574  var SKP_Silk_pitch_contour_NB_CDF_offset int32 = 5 /* SKP_Silk_tables_pitch_lag.c:57:15 */
 20575  
 20576  var SKP_Silk_pitch_lag_MB_CDF = [194]uint16{
 20577  	uint16(0), uint16(132), uint16(266), uint16(402), uint16(542), uint16(686), uint16(838), uint16(997),
 20578  	uint16(1167), uint16(1349), uint16(1546), uint16(1760), uint16(1993), uint16(2248), uint16(2528), uint16(2835),
 20579  	uint16(3173), uint16(3544), uint16(3951), uint16(4397), uint16(4882), uint16(5411), uint16(5984), uint16(6604),
 20580  	uint16(7270), uint16(7984), uint16(8745), uint16(9552), uint16(10405), uint16(11300), uint16(12235), uint16(13206),
 20581  	uint16(14209), uint16(15239), uint16(16289), uint16(17355), uint16(18430), uint16(19507), uint16(20579), uint16(21642),
 20582  	uint16(22688), uint16(23712), uint16(24710), uint16(25677), uint16(26610), uint16(27507), uint16(28366), uint16(29188),
 20583  	uint16(29971), uint16(30717), uint16(31427), uint16(32104), uint16(32751), uint16(33370), uint16(33964), uint16(34537),
 20584  	uint16(35091), uint16(35630), uint16(36157), uint16(36675), uint16(37186), uint16(37692), uint16(38195), uint16(38697),
 20585  	uint16(39199), uint16(39701), uint16(40206), uint16(40713), uint16(41222), uint16(41733), uint16(42247), uint16(42761),
 20586  	uint16(43277), uint16(43793), uint16(44309), uint16(44824), uint16(45336), uint16(45845), uint16(46351), uint16(46851),
 20587  	uint16(47347), uint16(47836), uint16(48319), uint16(48795), uint16(49264), uint16(49724), uint16(50177), uint16(50621),
 20588  	uint16(51057), uint16(51484), uint16(51902), uint16(52312), uint16(52714), uint16(53106), uint16(53490), uint16(53866),
 20589  	uint16(54233), uint16(54592), uint16(54942), uint16(55284), uint16(55618), uint16(55944), uint16(56261), uint16(56571),
 20590  	uint16(56873), uint16(57167), uint16(57453), uint16(57731), uint16(58001), uint16(58263), uint16(58516), uint16(58762),
 20591  	uint16(58998), uint16(59226), uint16(59446), uint16(59656), uint16(59857), uint16(60050), uint16(60233), uint16(60408),
 20592  	uint16(60574), uint16(60732), uint16(60882), uint16(61024), uint16(61159), uint16(61288), uint16(61410), uint16(61526),
 20593  	uint16(61636), uint16(61742), uint16(61843), uint16(61940), uint16(62033), uint16(62123), uint16(62210), uint16(62293),
 20594  	uint16(62374), uint16(62452), uint16(62528), uint16(62602), uint16(62674), uint16(62744), uint16(62812), uint16(62879),
 20595  	uint16(62945), uint16(63009), uint16(63072), uint16(63135), uint16(63196), uint16(63256), uint16(63316), uint16(63375),
 20596  	uint16(63434), uint16(63491), uint16(63549), uint16(63605), uint16(63661), uint16(63717), uint16(63772), uint16(63827),
 20597  	uint16(63881), uint16(63935), uint16(63988), uint16(64041), uint16(64094), uint16(64147), uint16(64199), uint16(64252),
 20598  	uint16(64304), uint16(64356), uint16(64409), uint16(64461), uint16(64513), uint16(64565), uint16(64617), uint16(64669),
 20599  	uint16(64721), uint16(64773), uint16(64824), uint16(64875), uint16(64925), uint16(64975), uint16(65024), uint16(65072),
 20600  	uint16(65121), uint16(65168), uint16(65215), uint16(65262), uint16(65308), uint16(65354), uint16(65399), uint16(65445),
 20601  	uint16(65490), uint16(65535),
 20602  } /* SKP_Silk_tables_pitch_lag.c:59:18 */
 20603  
 20604  var SKP_Silk_pitch_lag_MB_CDF_offset int32 = 64 /* SKP_Silk_tables_pitch_lag.c:87:15 */
 20605  
 20606  var SKP_Silk_pitch_lag_WB_CDF = [258]uint16{
 20607  	uint16(0), uint16(106), uint16(213), uint16(321), uint16(429), uint16(539), uint16(651), uint16(766),
 20608  	uint16(884), uint16(1005), uint16(1132), uint16(1264), uint16(1403), uint16(1549), uint16(1705), uint16(1870),
 20609  	uint16(2047), uint16(2236), uint16(2439), uint16(2658), uint16(2893), uint16(3147), uint16(3420), uint16(3714),
 20610  	uint16(4030), uint16(4370), uint16(4736), uint16(5127), uint16(5546), uint16(5993), uint16(6470), uint16(6978),
 20611  	uint16(7516), uint16(8086), uint16(8687), uint16(9320), uint16(9985), uint16(10680), uint16(11405), uint16(12158),
 20612  	uint16(12938), uint16(13744), uint16(14572), uint16(15420), uint16(16286), uint16(17166), uint16(18057), uint16(18955),
 20613  	uint16(19857), uint16(20759), uint16(21657), uint16(22547), uint16(23427), uint16(24293), uint16(25141), uint16(25969),
 20614  	uint16(26774), uint16(27555), uint16(28310), uint16(29037), uint16(29736), uint16(30406), uint16(31048), uint16(31662),
 20615  	uint16(32248), uint16(32808), uint16(33343), uint16(33855), uint16(34345), uint16(34815), uint16(35268), uint16(35704),
 20616  	uint16(36127), uint16(36537), uint16(36938), uint16(37330), uint16(37715), uint16(38095), uint16(38471), uint16(38844),
 20617  	uint16(39216), uint16(39588), uint16(39959), uint16(40332), uint16(40707), uint16(41084), uint16(41463), uint16(41844),
 20618  	uint16(42229), uint16(42615), uint16(43005), uint16(43397), uint16(43791), uint16(44186), uint16(44583), uint16(44982),
 20619  	uint16(45381), uint16(45780), uint16(46179), uint16(46578), uint16(46975), uint16(47371), uint16(47765), uint16(48156),
 20620  	uint16(48545), uint16(48930), uint16(49312), uint16(49690), uint16(50064), uint16(50433), uint16(50798), uint16(51158),
 20621  	uint16(51513), uint16(51862), uint16(52206), uint16(52544), uint16(52877), uint16(53204), uint16(53526), uint16(53842),
 20622  	uint16(54152), uint16(54457), uint16(54756), uint16(55050), uint16(55338), uint16(55621), uint16(55898), uint16(56170),
 20623  	uint16(56436), uint16(56697), uint16(56953), uint16(57204), uint16(57449), uint16(57689), uint16(57924), uint16(58154),
 20624  	uint16(58378), uint16(58598), uint16(58812), uint16(59022), uint16(59226), uint16(59426), uint16(59620), uint16(59810),
 20625  	uint16(59994), uint16(60173), uint16(60348), uint16(60517), uint16(60681), uint16(60840), uint16(60993), uint16(61141),
 20626  	uint16(61284), uint16(61421), uint16(61553), uint16(61679), uint16(61800), uint16(61916), uint16(62026), uint16(62131),
 20627  	uint16(62231), uint16(62326), uint16(62417), uint16(62503), uint16(62585), uint16(62663), uint16(62737), uint16(62807),
 20628  	uint16(62874), uint16(62938), uint16(62999), uint16(63057), uint16(63113), uint16(63166), uint16(63217), uint16(63266),
 20629  	uint16(63314), uint16(63359), uint16(63404), uint16(63446), uint16(63488), uint16(63528), uint16(63567), uint16(63605),
 20630  	uint16(63642), uint16(63678), uint16(63713), uint16(63748), uint16(63781), uint16(63815), uint16(63847), uint16(63879),
 20631  	uint16(63911), uint16(63942), uint16(63973), uint16(64003), uint16(64033), uint16(64063), uint16(64092), uint16(64121),
 20632  	uint16(64150), uint16(64179), uint16(64207), uint16(64235), uint16(64263), uint16(64291), uint16(64319), uint16(64347),
 20633  	uint16(64374), uint16(64401), uint16(64428), uint16(64455), uint16(64481), uint16(64508), uint16(64534), uint16(64560),
 20634  	uint16(64585), uint16(64610), uint16(64635), uint16(64660), uint16(64685), uint16(64710), uint16(64734), uint16(64758),
 20635  	uint16(64782), uint16(64807), uint16(64831), uint16(64855), uint16(64878), uint16(64902), uint16(64926), uint16(64950),
 20636  	uint16(64974), uint16(64998), uint16(65022), uint16(65045), uint16(65069), uint16(65093), uint16(65116), uint16(65139),
 20637  	uint16(65163), uint16(65186), uint16(65209), uint16(65231), uint16(65254), uint16(65276), uint16(65299), uint16(65321),
 20638  	uint16(65343), uint16(65364), uint16(65386), uint16(65408), uint16(65429), uint16(65450), uint16(65471), uint16(65493),
 20639  	uint16(65514), uint16(65535),
 20640  } /* SKP_Silk_tables_pitch_lag.c:89:18 */
 20641  
 20642  var SKP_Silk_pitch_lag_WB_CDF_offset int32 = 86 /* SKP_Silk_tables_pitch_lag.c:125:15 */
 20643  
 20644  var SKP_Silk_pitch_lag_SWB_CDF = [386]uint16{
 20645  	uint16(0), uint16(253), uint16(505), uint16(757), uint16(1008), uint16(1258), uint16(1507), uint16(1755),
 20646  	uint16(2003), uint16(2249), uint16(2494), uint16(2738), uint16(2982), uint16(3225), uint16(3469), uint16(3713),
 20647  	uint16(3957), uint16(4202), uint16(4449), uint16(4698), uint16(4949), uint16(5203), uint16(5460), uint16(5720),
 20648  	uint16(5983), uint16(6251), uint16(6522), uint16(6798), uint16(7077), uint16(7361), uint16(7650), uint16(7942),
 20649  	uint16(8238), uint16(8539), uint16(8843), uint16(9150), uint16(9461), uint16(9775), uint16(10092), uint16(10411),
 20650  	uint16(10733), uint16(11057), uint16(11383), uint16(11710), uint16(12039), uint16(12370), uint16(12701), uint16(13034),
 20651  	uint16(13368), uint16(13703), uint16(14040), uint16(14377), uint16(14716), uint16(15056), uint16(15398), uint16(15742),
 20652  	uint16(16087), uint16(16435), uint16(16785), uint16(17137), uint16(17492), uint16(17850), uint16(18212), uint16(18577),
 20653  	uint16(18946), uint16(19318), uint16(19695), uint16(20075), uint16(20460), uint16(20849), uint16(21243), uint16(21640),
 20654  	uint16(22041), uint16(22447), uint16(22856), uint16(23269), uint16(23684), uint16(24103), uint16(24524), uint16(24947),
 20655  	uint16(25372), uint16(25798), uint16(26225), uint16(26652), uint16(27079), uint16(27504), uint16(27929), uint16(28352),
 20656  	uint16(28773), uint16(29191), uint16(29606), uint16(30018), uint16(30427), uint16(30831), uint16(31231), uint16(31627),
 20657  	uint16(32018), uint16(32404), uint16(32786), uint16(33163), uint16(33535), uint16(33902), uint16(34264), uint16(34621),
 20658  	uint16(34973), uint16(35320), uint16(35663), uint16(36000), uint16(36333), uint16(36662), uint16(36985), uint16(37304),
 20659  	uint16(37619), uint16(37929), uint16(38234), uint16(38535), uint16(38831), uint16(39122), uint16(39409), uint16(39692),
 20660  	uint16(39970), uint16(40244), uint16(40513), uint16(40778), uint16(41039), uint16(41295), uint16(41548), uint16(41796),
 20661  	uint16(42041), uint16(42282), uint16(42520), uint16(42754), uint16(42985), uint16(43213), uint16(43438), uint16(43660),
 20662  	uint16(43880), uint16(44097), uint16(44312), uint16(44525), uint16(44736), uint16(44945), uint16(45153), uint16(45359),
 20663  	uint16(45565), uint16(45769), uint16(45972), uint16(46175), uint16(46377), uint16(46578), uint16(46780), uint16(46981),
 20664  	uint16(47182), uint16(47383), uint16(47585), uint16(47787), uint16(47989), uint16(48192), uint16(48395), uint16(48599),
 20665  	uint16(48804), uint16(49009), uint16(49215), uint16(49422), uint16(49630), uint16(49839), uint16(50049), uint16(50259),
 20666  	uint16(50470), uint16(50682), uint16(50894), uint16(51107), uint16(51320), uint16(51533), uint16(51747), uint16(51961),
 20667  	uint16(52175), uint16(52388), uint16(52601), uint16(52813), uint16(53025), uint16(53236), uint16(53446), uint16(53655),
 20668  	uint16(53863), uint16(54069), uint16(54274), uint16(54477), uint16(54679), uint16(54879), uint16(55078), uint16(55274),
 20669  	uint16(55469), uint16(55662), uint16(55853), uint16(56042), uint16(56230), uint16(56415), uint16(56598), uint16(56779),
 20670  	uint16(56959), uint16(57136), uint16(57311), uint16(57484), uint16(57654), uint16(57823), uint16(57989), uint16(58152),
 20671  	uint16(58314), uint16(58473), uint16(58629), uint16(58783), uint16(58935), uint16(59084), uint16(59230), uint16(59373),
 20672  	uint16(59514), uint16(59652), uint16(59787), uint16(59919), uint16(60048), uint16(60174), uint16(60297), uint16(60417),
 20673  	uint16(60533), uint16(60647), uint16(60757), uint16(60865), uint16(60969), uint16(61070), uint16(61167), uint16(61262),
 20674  	uint16(61353), uint16(61442), uint16(61527), uint16(61609), uint16(61689), uint16(61765), uint16(61839), uint16(61910),
 20675  	uint16(61979), uint16(62045), uint16(62109), uint16(62170), uint16(62230), uint16(62287), uint16(62343), uint16(62396),
 20676  	uint16(62448), uint16(62498), uint16(62547), uint16(62594), uint16(62640), uint16(62685), uint16(62728), uint16(62770),
 20677  	uint16(62811), uint16(62852), uint16(62891), uint16(62929), uint16(62967), uint16(63004), uint16(63040), uint16(63075),
 20678  	uint16(63110), uint16(63145), uint16(63178), uint16(63212), uint16(63244), uint16(63277), uint16(63308), uint16(63340),
 20679  	uint16(63371), uint16(63402), uint16(63432), uint16(63462), uint16(63491), uint16(63521), uint16(63550), uint16(63578),
 20680  	uint16(63607), uint16(63635), uint16(63663), uint16(63690), uint16(63718), uint16(63744), uint16(63771), uint16(63798),
 20681  	uint16(63824), uint16(63850), uint16(63875), uint16(63900), uint16(63925), uint16(63950), uint16(63975), uint16(63999),
 20682  	uint16(64023), uint16(64046), uint16(64069), uint16(64092), uint16(64115), uint16(64138), uint16(64160), uint16(64182),
 20683  	uint16(64204), uint16(64225), uint16(64247), uint16(64268), uint16(64289), uint16(64310), uint16(64330), uint16(64351),
 20684  	uint16(64371), uint16(64391), uint16(64411), uint16(64431), uint16(64450), uint16(64470), uint16(64489), uint16(64508),
 20685  	uint16(64527), uint16(64545), uint16(64564), uint16(64582), uint16(64600), uint16(64617), uint16(64635), uint16(64652),
 20686  	uint16(64669), uint16(64686), uint16(64702), uint16(64719), uint16(64735), uint16(64750), uint16(64766), uint16(64782),
 20687  	uint16(64797), uint16(64812), uint16(64827), uint16(64842), uint16(64857), uint16(64872), uint16(64886), uint16(64901),
 20688  	uint16(64915), uint16(64930), uint16(64944), uint16(64959), uint16(64974), uint16(64988), uint16(65003), uint16(65018),
 20689  	uint16(65033), uint16(65048), uint16(65063), uint16(65078), uint16(65094), uint16(65109), uint16(65125), uint16(65141),
 20690  	uint16(65157), uint16(65172), uint16(65188), uint16(65204), uint16(65220), uint16(65236), uint16(65252), uint16(65268),
 20691  	uint16(65283), uint16(65299), uint16(65314), uint16(65330), uint16(65345), uint16(65360), uint16(65375), uint16(65390),
 20692  	uint16(65405), uint16(65419), uint16(65434), uint16(65449), uint16(65463), uint16(65477), uint16(65492), uint16(65506),
 20693  	uint16(65521), uint16(65535),
 20694  } /* SKP_Silk_tables_pitch_lag.c:128:18 */
 20695  
 20696  var SKP_Silk_pitch_lag_SWB_CDF_offset int32 = 128 /* SKP_Silk_tables_pitch_lag.c:180:15 */
 20697  
 20698  var SKP_Silk_pitch_contour_CDF = [35]uint16{
 20699  	uint16(0), uint16(372), uint16(843), uint16(1315), uint16(1836), uint16(2644), uint16(3576), uint16(4719),
 20700  	uint16(6088), uint16(7621), uint16(9396), uint16(11509), uint16(14245), uint16(17618), uint16(20777), uint16(24294),
 20701  	uint16(27992), uint16(33116), uint16(40100), uint16(44329), uint16(47558), uint16(50679), uint16(53130), uint16(55557),
 20702  	uint16(57510), uint16(59022), uint16(60285), uint16(61345), uint16(62316), uint16(63140), uint16(63762), uint16(64321),
 20703  	uint16(64729), uint16(65099), uint16(65535),
 20704  } /* SKP_Silk_tables_pitch_lag.c:183:18 */
 20705  
 20706  var SKP_Silk_pitch_contour_CDF_offset int32 = 17 /* SKP_Silk_tables_pitch_lag.c:191:15 */
 20707  
 20708  var SKP_Silk_pitch_delta_CDF = [23]uint16{
 20709  	uint16(0), uint16(343), uint16(740), uint16(1249), uint16(1889), uint16(2733), uint16(3861), uint16(5396),
 20710  	uint16(7552), uint16(10890), uint16(16053), uint16(24152), uint16(30220), uint16(34680), uint16(37973), uint16(40405),
 20711  	uint16(42243), uint16(43708), uint16(44823), uint16(45773), uint16(46462), uint16(47055), uint16(65535),
 20712  } /* SKP_Silk_tables_pitch_lag.c:193:18 */
 20713  
 20714  var SKP_Silk_pitch_delta_CDF_offset int32 = 11 /* SKP_Silk_tables_pitch_lag.c:199:15 */
 20715  
 20716  var SKP_Silk_max_pulses_table = [4]int32{
 20717  	6, 8, 12, 18,
 20718  } /* SKP_Silk_tables_pulses_per_block.c:30:15 */
 20719  
 20720  var SKP_Silk_pulses_per_block_CDF = [10][21]uint16{
 20721  	{
 20722  		uint16(0), uint16(47113), uint16(61501), uint16(64590), uint16(65125), uint16(65277), uint16(65352), uint16(65407),
 20723  		uint16(65450), uint16(65474), uint16(65488), uint16(65501), uint16(65508), uint16(65514), uint16(65516), uint16(65520),
 20724  		uint16(65521), uint16(65523), uint16(65524), uint16(65526), uint16(65535),
 20725  	},
 20726  	{
 20727  		uint16(0), uint16(26368), uint16(47760), uint16(58803), uint16(63085), uint16(64567), uint16(65113), uint16(65333),
 20728  		uint16(65424), uint16(65474), uint16(65498), uint16(65511), uint16(65517), uint16(65520), uint16(65523), uint16(65525),
 20729  		uint16(65526), uint16(65528), uint16(65529), uint16(65530), uint16(65535),
 20730  	},
 20731  	{
 20732  		uint16(0), uint16(9601), uint16(28014), uint16(45877), uint16(57210), uint16(62560), uint16(64611), uint16(65260),
 20733  		uint16(65447), uint16(65500), uint16(65511), uint16(65519), uint16(65521), uint16(65525), uint16(65526), uint16(65529),
 20734  		uint16(65530), uint16(65531), uint16(65532), uint16(65534), uint16(65535),
 20735  	},
 20736  	{
 20737  		uint16(0), uint16(3351), uint16(12462), uint16(25972), uint16(39782), uint16(50686), uint16(57644), uint16(61525),
 20738  		uint16(63521), uint16(64506), uint16(65009), uint16(65255), uint16(65375), uint16(65441), uint16(65471), uint16(65488),
 20739  		uint16(65497), uint16(65505), uint16(65509), uint16(65512), uint16(65535),
 20740  	},
 20741  	{
 20742  		uint16(0), uint16(488), uint16(2944), uint16(9295), uint16(19712), uint16(32160), uint16(43976), uint16(53121),
 20743  		uint16(59144), uint16(62518), uint16(64213), uint16(65016), uint16(65346), uint16(65470), uint16(65511), uint16(65515),
 20744  		uint16(65525), uint16(65529), uint16(65531), uint16(65534), uint16(65535),
 20745  	},
 20746  	{
 20747  		uint16(0), uint16(17013), uint16(30405), uint16(40812), uint16(48142), uint16(53466), uint16(57166), uint16(59845),
 20748  		uint16(61650), uint16(62873), uint16(63684), uint16(64223), uint16(64575), uint16(64811), uint16(64959), uint16(65051),
 20749  		uint16(65111), uint16(65143), uint16(65165), uint16(65183), uint16(65535),
 20750  	},
 20751  	{
 20752  		uint16(0), uint16(2994), uint16(8323), uint16(15845), uint16(24196), uint16(32300), uint16(39340), uint16(45140),
 20753  		uint16(49813), uint16(53474), uint16(56349), uint16(58518), uint16(60167), uint16(61397), uint16(62313), uint16(62969),
 20754  		uint16(63410), uint16(63715), uint16(63906), uint16(64056), uint16(65535),
 20755  	},
 20756  	{
 20757  		uint16(0), uint16(88), uint16(721), uint16(2795), uint16(7542), uint16(14888), uint16(24420), uint16(34593),
 20758  		uint16(43912), uint16(51484), uint16(56962), uint16(60558), uint16(62760), uint16(64037), uint16(64716), uint16(65069),
 20759  		uint16(65262), uint16(65358), uint16(65398), uint16(65420), uint16(65535),
 20760  	},
 20761  	{
 20762  		uint16(0), uint16(287), uint16(789), uint16(2064), uint16(4398), uint16(8174), uint16(13534), uint16(20151),
 20763  		uint16(27347), uint16(34533), uint16(41295), uint16(47242), uint16(52070), uint16(55772), uint16(58458), uint16(60381),
 20764  		uint16(61679), uint16(62533), uint16(63109), uint16(63519), uint16(65535),
 20765  	},
 20766  	{
 20767  		uint16(0), uint16(1), uint16(3), uint16(91), uint16(4521), uint16(14708), uint16(28329), uint16(41955),
 20768  		uint16(52116), uint16(58375), uint16(61729), uint16(63534), uint16(64459), uint16(64924), uint16(65092), uint16(65164),
 20769  		uint16(65182), uint16(65198), uint16(65203), uint16(65211), uint16(65535),
 20770  	},
 20771  } /* SKP_Silk_tables_pulses_per_block.c:34:18 */
 20772  
 20773  var SKP_Silk_pulses_per_block_CDF_offset int32 = 6 /* SKP_Silk_tables_pulses_per_block.c:88:15 */
 20774  
 20775  var SKP_Silk_pulses_per_block_BITS_Q6 = [9][20]int16{
 20776  	{
 20777  		int16(30), int16(140), int16(282), int16(444), int16(560), int16(625), int16(654), int16(677),
 20778  		int16(731), int16(780), int16(787), int16(844), int16(859), int16(960), int16(896), int16(1024),
 20779  		int16(960), int16(1024), int16(960), int16(821),
 20780  	},
 20781  	{
 20782  		int16(84), int16(103), int16(164), int16(252), int16(350), int16(442), int16(526), int16(607),
 20783  		int16(663), int16(731), int16(787), int16(859), int16(923), int16(923), int16(960), int16(1024),
 20784  		int16(960), int16(1024), int16(1024), int16(875),
 20785  	},
 20786  	{
 20787  		int16(177), int16(117), int16(120), int16(162), int16(231), int16(320), int16(426), int16(541),
 20788  		int16(657), int16(803), int16(832), int16(960), int16(896), int16(1024), int16(923), int16(1024),
 20789  		int16(1024), int16(1024), int16(960), int16(1024),
 20790  	},
 20791  	{
 20792  		int16(275), int16(182), int16(146), int16(144), int16(166), int16(207), int16(261), int16(322),
 20793  		int16(388), int16(450), int16(516), int16(582), int16(637), int16(710), int16(762), int16(821),
 20794  		int16(832), int16(896), int16(923), int16(734),
 20795  	},
 20796  	{
 20797  		int16(452), int16(303), int16(216), int16(170), int16(153), int16(158), int16(182), int16(220),
 20798  		int16(274), int16(337), int16(406), int16(489), int16(579), int16(681), int16(896), int16(811),
 20799  		int16(896), int16(960), int16(923), int16(1024),
 20800  	},
 20801  	{
 20802  		int16(125), int16(147), int16(170), int16(202), int16(232), int16(265), int16(295), int16(332),
 20803  		int16(368), int16(406), int16(443), int16(483), int16(520), int16(563), int16(606), int16(646),
 20804  		int16(704), int16(739), int16(757), int16(483),
 20805  	},
 20806  	{
 20807  		int16(285), int16(232), int16(200), int16(190), int16(193), int16(206), int16(224), int16(244),
 20808  		int16(266), int16(289), int16(315), int16(340), int16(367), int16(394), int16(425), int16(462),
 20809  		int16(496), int16(539), int16(561), int16(350),
 20810  	},
 20811  	{
 20812  		int16(611), int16(428), int16(319), int16(242), int16(202), int16(178), int16(172), int16(180),
 20813  		int16(199), int16(229), int16(268), int16(313), int16(364), int16(422), int16(482), int16(538),
 20814  		int16(603), int16(683), int16(739), int16(586),
 20815  	},
 20816  	{
 20817  		int16(501), int16(450), int16(364), int16(308), int16(264), int16(231), int16(212), int16(204),
 20818  		int16(204), int16(210), int16(222), int16(241), int16(265), int16(295), int16(326), int16(362),
 20819  		int16(401), int16(437), int16(469), int16(321),
 20820  	},
 20821  } /* SKP_Silk_tables_pulses_per_block.c:91:17 */
 20822  
 20823  var SKP_Silk_rate_levels_CDF = [2][10]uint16{
 20824  	{
 20825  		uint16(0), uint16(2005), uint16(12717), uint16(20281), uint16(31328), uint16(36234), uint16(45816), uint16(57753),
 20826  		uint16(63104), uint16(65535),
 20827  	},
 20828  	{
 20829  		uint16(0), uint16(8553), uint16(23489), uint16(36031), uint16(46295), uint16(53519), uint16(56519), uint16(59151),
 20830  		uint16(64185), uint16(65535),
 20831  	},
 20832  } /* SKP_Silk_tables_pulses_per_block.c:140:18 */
 20833  
 20834  var SKP_Silk_rate_levels_CDF_offset int32 = 4 /* SKP_Silk_tables_pulses_per_block.c:152:15 */
 20835  
 20836  var SKP_Silk_rate_levels_BITS_Q6 = [2][9]int16{
 20837  	{
 20838  		int16(322), int16(167), int16(199), int16(164), int16(239), int16(178), int16(157), int16(231),
 20839  		int16(304),
 20840  	},
 20841  	{
 20842  		int16(188), int16(137), int16(153), int16(171), int16(204), int16(285), int16(297), int16(237),
 20843  		int16(358),
 20844  	},
 20845  } /* SKP_Silk_tables_pulses_per_block.c:155:17 */
 20846  
 20847  var SKP_Silk_shell_code_table0 = [33]uint16{
 20848  	uint16(0), uint16(32748), uint16(65535), uint16(0), uint16(9505), uint16(56230), uint16(65535), uint16(0),
 20849  	uint16(4093), uint16(32204), uint16(61720), uint16(65535), uint16(0), uint16(2285), uint16(16207), uint16(48750),
 20850  	uint16(63424), uint16(65535), uint16(0), uint16(1709), uint16(9446), uint16(32026), uint16(55752), uint16(63876),
 20851  	uint16(65535), uint16(0), uint16(1623), uint16(6986), uint16(21845), uint16(45381), uint16(59147), uint16(64186),
 20852  	uint16(65535),
 20853  } /* SKP_Silk_tables_pulses_per_block.c:167:18 */
 20854  
 20855  var SKP_Silk_shell_code_table1 = [52]uint16{
 20856  	uint16(0), uint16(32691), uint16(65535), uint16(0), uint16(12782), uint16(52752), uint16(65535), uint16(0),
 20857  	uint16(4847), uint16(32665), uint16(60899), uint16(65535), uint16(0), uint16(2500), uint16(17305), uint16(47989),
 20858  	uint16(63369), uint16(65535), uint16(0), uint16(1843), uint16(10329), uint16(32419), uint16(55433), uint16(64277),
 20859  	uint16(65535), uint16(0), uint16(1485), uint16(7062), uint16(21465), uint16(43414), uint16(59079), uint16(64623),
 20860  	uint16(65535), uint16(0), uint16(0), uint16(4841), uint16(14797), uint16(31799), uint16(49667), uint16(61309),
 20861  	uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0), uint16(8032), uint16(21695), uint16(41078),
 20862  	uint16(56317), uint16(65535), uint16(65535), uint16(65535),
 20863  } /* SKP_Silk_tables_pulses_per_block.c:175:18 */
 20864  
 20865  var SKP_Silk_shell_code_table2 = [102]uint16{
 20866  	uint16(0), uint16(32615), uint16(65535), uint16(0), uint16(14447), uint16(50912), uint16(65535), uint16(0),
 20867  	uint16(6301), uint16(32587), uint16(59361), uint16(65535), uint16(0), uint16(3038), uint16(18640), uint16(46809),
 20868  	uint16(62852), uint16(65535), uint16(0), uint16(1746), uint16(10524), uint16(32509), uint16(55273), uint16(64278),
 20869  	uint16(65535), uint16(0), uint16(1234), uint16(6360), uint16(21259), uint16(43712), uint16(59651), uint16(64805),
 20870  	uint16(65535), uint16(0), uint16(1020), uint16(4461), uint16(14030), uint16(32286), uint16(51249), uint16(61904),
 20871  	uint16(65100), uint16(65535), uint16(0), uint16(851), uint16(3435), uint16(10006), uint16(23241), uint16(40797),
 20872  	uint16(55444), uint16(63009), uint16(65252), uint16(65535), uint16(0), uint16(0), uint16(2075), uint16(7137),
 20873  	uint16(17119), uint16(31499), uint16(46982), uint16(58723), uint16(63976), uint16(65535), uint16(65535), uint16(0),
 20874  	uint16(0), uint16(0), uint16(3820), uint16(11572), uint16(23038), uint16(37789), uint16(51969), uint16(61243),
 20875  	uint16(65535), uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0), uint16(0), uint16(6882),
 20876  	uint16(16828), uint16(30444), uint16(44844), uint16(57365), uint16(65535), uint16(65535), uint16(65535), uint16(65535),
 20877  	uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(10093), uint16(22963), uint16(38779),
 20878  	uint16(54426), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535),
 20879  } /* SKP_Silk_tables_pulses_per_block.c:185:18 */
 20880  
 20881  var SKP_Silk_shell_code_table3 = [207]uint16{
 20882  	uint16(0), uint16(32324), uint16(65535), uint16(0), uint16(15328), uint16(49505), uint16(65535), uint16(0),
 20883  	uint16(7474), uint16(32344), uint16(57955), uint16(65535), uint16(0), uint16(3944), uint16(19450), uint16(45364),
 20884  	uint16(61873), uint16(65535), uint16(0), uint16(2338), uint16(11698), uint16(32435), uint16(53915), uint16(63734),
 20885  	uint16(65535), uint16(0), uint16(1506), uint16(7074), uint16(21778), uint16(42972), uint16(58861), uint16(64590),
 20886  	uint16(65535), uint16(0), uint16(1027), uint16(4490), uint16(14383), uint16(32264), uint16(50980), uint16(61712),
 20887  	uint16(65043), uint16(65535), uint16(0), uint16(760), uint16(3022), uint16(9696), uint16(23264), uint16(41465),
 20888  	uint16(56181), uint16(63253), uint16(65251), uint16(65535), uint16(0), uint16(579), uint16(2256), uint16(6873),
 20889  	uint16(16661), uint16(31951), uint16(48250), uint16(59403), uint16(64198), uint16(65360), uint16(65535), uint16(0),
 20890  	uint16(464), uint16(1783), uint16(5181), uint16(12269), uint16(24247), uint16(39877), uint16(53490), uint16(61502),
 20891  	uint16(64591), uint16(65410), uint16(65535), uint16(0), uint16(366), uint16(1332), uint16(3880), uint16(9273),
 20892  	uint16(18585), uint16(32014), uint16(45928), uint16(56659), uint16(62616), uint16(64899), uint16(65483), uint16(65535),
 20893  	uint16(0), uint16(286), uint16(1065), uint16(3089), uint16(6969), uint16(14148), uint16(24859), uint16(38274),
 20894  	uint16(50715), uint16(59078), uint16(63448), uint16(65091), uint16(65481), uint16(65535), uint16(0), uint16(0),
 20895  	uint16(482), uint16(2010), uint16(5302), uint16(10408), uint16(18988), uint16(30698), uint16(43634), uint16(54233),
 20896  	uint16(60828), uint16(64119), uint16(65288), uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0),
 20897  	uint16(1006), uint16(3531), uint16(7857), uint16(14832), uint16(24543), uint16(36272), uint16(47547), uint16(56883),
 20898  	uint16(62327), uint16(64746), uint16(65535), uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0),
 20899  	uint16(0), uint16(1863), uint16(4950), uint16(10730), uint16(19284), uint16(29397), uint16(41382), uint16(52335),
 20900  	uint16(59755), uint16(63834), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(0), uint16(0),
 20901  	uint16(0), uint16(0), uint16(0), uint16(2513), uint16(7290), uint16(14487), uint16(24275), uint16(35312),
 20902  	uint16(46240), uint16(55841), uint16(62007), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535),
 20903  	uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0), uint16(3606), uint16(9573),
 20904  	uint16(18764), uint16(28667), uint16(40220), uint16(51290), uint16(59924), uint16(65535), uint16(65535), uint16(65535),
 20905  	uint16(65535), uint16(65535), uint16(65535), uint16(0), uint16(0), uint16(0), uint16(0), uint16(0),
 20906  	uint16(0), uint16(0), uint16(4879), uint16(13091), uint16(23376), uint16(36061), uint16(49395), uint16(59315),
 20907  	uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535), uint16(65535),
 20908  } /* SKP_Silk_tables_pulses_per_block.c:201:18 */
 20909  
 20910  var SKP_Silk_shell_code_table_offsets = [19]uint16{
 20911  	uint16(0), uint16(0), uint16(3), uint16(7), uint16(12), uint16(18), uint16(25), uint16(33),
 20912  	uint16(42), uint16(52), uint16(63), uint16(75), uint16(88), uint16(102), uint16(117), uint16(133),
 20913  	uint16(150), uint16(168), uint16(187),
 20914  } /* SKP_Silk_tables_pulses_per_block.c:230:18 */
 20915  
 20916  var SKP_Silk_sign_CDF = [36]uint16{
 20917  	uint16(37840), uint16(36944), uint16(36251), uint16(35304),
 20918  	uint16(34715), uint16(35503), uint16(34529), uint16(34296),
 20919  	uint16(34016), uint16(47659), uint16(44945), uint16(42503),
 20920  	uint16(40235), uint16(38569), uint16(40254), uint16(37851),
 20921  	uint16(37243), uint16(36595), uint16(43410), uint16(44121),
 20922  	uint16(43127), uint16(40978), uint16(38845), uint16(40433),
 20923  	uint16(38252), uint16(37795), uint16(36637), uint16(59159),
 20924  	uint16(55630), uint16(51806), uint16(48073), uint16(45036),
 20925  	uint16(48416), uint16(43857), uint16(42678), uint16(41146),
 20926  } /* SKP_Silk_tables_sign.c:30:18 */
 20927  
 20928  var SKP_Silk_type_offset_CDF = [5]uint16{
 20929  	uint16(0), uint16(37522), uint16(41030), uint16(44212), uint16(65535),
 20930  } /* SKP_Silk_tables_type_offset.c:30:18 */
 20931  
 20932  var SKP_Silk_type_offset_CDF_offset int32 = 2 /* SKP_Silk_tables_type_offset.c:34:15 */
 20933  
 20934  var SKP_Silk_type_offset_joint_CDF = [4][5]uint16{
 20935  	{
 20936  		uint16(0), uint16(57686), uint16(61230), uint16(62358), uint16(65535),
 20937  	},
 20938  	{
 20939  		uint16(0), uint16(18346), uint16(40067), uint16(43659), uint16(65535),
 20940  	},
 20941  	{
 20942  		uint16(0), uint16(22694), uint16(24279), uint16(35507), uint16(65535),
 20943  	},
 20944  	{
 20945  		uint16(0), uint16(6067), uint16(7215), uint16(13010), uint16(65535),
 20946  	},
 20947  } /* SKP_Silk_tables_type_offset.c:37:18 */
 20948  
 20949  /***********************************************************************
 20950  Copyright (c) 2006-2012, Skype Limited. All rights reserved.
 20951  Redistribution and use in source and binary forms, with or without
 20952  modification, (subject to the limitations in the disclaimer below)
 20953  are permitted provided that the following conditions are met:
 20954  - Redistributions of source code must retain the above copyright notice,
 20955  this list of conditions and the following disclaimer.
 20956  - Redistributions in binary form must reproduce the above copyright
 20957  notice, this list of conditions and the following disclaimer in the
 20958  documentation and/or other materials provided with the distribution.
 20959  - Neither the name of Skype Limited, nor the names of specific
 20960  contributors, may be used to endorse or promote products derived from
 20961  this software without specific prior written permission.
 20962  NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED
 20963  BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
 20964  CONTRIBUTORS ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
 20965  BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
 20966  FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 20967  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 20968  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 20969  NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
 20970  USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 20971  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 20972  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 20973  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 20974  ***********************************************************************/
 20975  
 20976  /*******************/
 20977  /* Pitch estimator */
 20978  /*******************/
 20979  
 20980  /* Level of noise floor for whitening filter LPC analysis in pitch analysis */
 20981  
 20982  /* Bandwidth expansion for whitening filter in pitch analysis */
 20983  
 20984  /* Threshold used by pitch estimator for early escape */
 20985  
 20986  /*********************/
 20987  /* Linear prediction */
 20988  /*********************/
 20989  
 20990  /* LPC analysis defines: regularization and bandwidth expansion */
 20991  
 20992  /* LTP analysis defines */
 20993  
 20994  /* LTP quantization settings */
 20995  
 20996  /***********************/
 20997  /* High pass filtering */
 20998  /***********************/
 20999  
 21000  /* Smoothing parameters for low end of pitch frequency range estimation */
 21001  
 21002  /* Min and max values for low end of pitch frequency range estimation */
 21003  
 21004  /* Max absolute difference between log2 of pitch frequency and smoother state, to enter the smoother */
 21005  
 21006  /***********/
 21007  /* Various */
 21008  /***********/
 21009  
 21010  /* Required speech activity for counting frame as active */
 21011  
 21012  /* Speech Activity LBRR enable threshold (needs tuning) */
 21013  
 21014  /*************************/
 21015  /* Perceptual parameters */
 21016  /*************************/
 21017  
 21018  /* reduction in coding SNR during low speech activity */
 21019  
 21020  /* factor for reducing quantization noise during voiced speech */
 21021  
 21022  /* factor for reducing quantization noise for unvoiced sparse signals */
 21023  
 21024  /* threshold for sparseness measure above which to use lower quantization offset during unvoiced */
 21025  
 21026  /* warping control */
 21027  
 21028  /* fraction added to first autocorrelation value */
 21029  
 21030  /* noise shaping filter chirp factor */
 21031  
 21032  /* difference between chirp factors for analysis and synthesis noise shaping filters at low bitrates */
 21033  
 21034  /* gain reduction for fricatives */
 21035  
 21036  /* extra harmonic boosting (signal shaping) at low bitrates */
 21037  
 21038  /* extra harmonic boosting (signal shaping) for noisy input signals */
 21039  
 21040  /* harmonic noise shaping */
 21041  
 21042  /* extra harmonic noise shaping for high bitrates or noisy input */
 21043  
 21044  /* parameter for shaping noise towards higher frequencies */
 21045  
 21046  /* parameter for shaping noise even more towards higher frequencies during voiced speech */
 21047  
 21048  /* parameter for applying a high-pass tilt to the input signal */
 21049  
 21050  /* parameter for extra high-pass tilt to the input signal at high rates */
 21051  
 21052  /* parameter for reducing noise at the very low frequencies */
 21053  
 21054  /* less reduction of noise at the very low frequencies for signals with low SNR at low frequencies */
 21055  
 21056  /* noise floor to put a lower limit on the quantization step size */
 21057  
 21058  /* noise floor relative to active speech gain level */
 21059  
 21060  /* subframe smoothing coefficient for determining active speech gain level (lower -> more smoothing) */
 21061  
 21062  /* subframe smoothing coefficient for HarmBoost, HarmShapeGain, Tilt (lower -> more smoothing) */
 21063  
 21064  /* parameters defining the R/D tradeoff in the residual quantizer */
 21065  
 21066  /**********************************/
 21067  /* Initialization of the Silk VAD */
 21068  /**********************************/
 21069  func SKP_Silk_VAD_Init(tls *libc.TLS, psSilk_VAD uintptr) int32 { /* SKP_Silk_VAD.c:39:9: */
 21070  	var b int32
 21071  	var ret int32 = 0
 21072  
 21073  	/* reset state memory */
 21074  	libc.Xmemset(tls, psSilk_VAD, 0, uint32(unsafe.Sizeof(SKP_Silk_VAD_state{})))
 21075  
 21076  	/* init noise levels */
 21077  	/* Initialize array with approx pink noise levels (psd proportional to inverse of frequency) */
 21078  	for b = 0; b < 4; b++ {
 21079  		*(*int32)(unsafe.Pointer((psSilk_VAD + 92 /* &.NoiseLevelBias */) + uintptr(b)*4)) = SKP_max_32(tls, ((50) / (b + 1)), 1)
 21080  	}
 21081  
 21082  	/* Initialize state */
 21083  	for b = 0; b < 4; b++ {
 21084  		*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4)) = ((100) * (*(*int32)(unsafe.Pointer((psSilk_VAD + 92 /* &.NoiseLevelBias */) + uintptr(b)*4))))
 21085  		*(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(b)*4)) = ((0x7FFFFFFF) / (*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4))))
 21086  	}
 21087  	(*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).Fcounter = 15
 21088  
 21089  	/* init smoothed energy-to-noise ratio*/
 21090  	for b = 0; b < 4; b++ {
 21091  		*(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4)) = (100 * 256) /* 100 * 256 --> 20 dB SNR */
 21092  	}
 21093  
 21094  	return ret
 21095  }
 21096  
 21097  /* Weighting factors for tilt measure */
 21098  var tiltWeights = [4]int32{30000, 6000, -12000, -12000} /* SKP_Silk_VAD.c:70:24 */
 21099  
 21100  /***************************************/
 21101  /* Get the speech activity level in Q8 */
 21102  /***************************************/
 21103  func SKP_Silk_VAD_GetSA_Q8(tls *libc.TLS, psSilk_VAD uintptr, pSA_Q8 uintptr, pSNR_dB_Q7 uintptr, pQuality_Q15 uintptr, pTilt_Q15 uintptr, pIn uintptr, framelength int32) int32 { /* SKP_Silk_VAD.c:75:9: */
 21104  	bp := tls.Alloc(4832)
 21105  	defer tls.Free(4832)
 21106  
 21107  	var SA_Q15 int32
 21108  	var input_tilt int32
 21109  	// var scratch [720]int32 at bp+1920, 2880
 21110  
 21111  	var decimated_framelength int32
 21112  	var dec_subframe_length int32
 21113  	var dec_subframe_offset int32
 21114  	var SNR_Q7 int32
 21115  	var i int32
 21116  	var b int32
 21117  	var s int32
 21118  	var sumSquared int32
 21119  	var smooth_coef_Q16 int32
 21120  	var HPstateTmp int16
 21121  	// var X [4][240]int16 at bp, 1920
 21122  
 21123  	// var Xnrg [4]int32 at bp+4800, 16
 21124  
 21125  	// var NrgToNoiseRatio_Q8 [4]int32 at bp+4816, 16
 21126  
 21127  	var speech_nrg int32
 21128  	var x_tmp int32
 21129  	var ret int32 = 0
 21130  
 21131  	/* Safety checks */
 21132  
 21133  	/***********************/
 21134  	/* Filter and Decimate */
 21135  	/***********************/
 21136  	/* 0-8 kHz to 0-4 kHz and 4-8 kHz */
 21137  	SKP_Silk_ana_filt_bank_1(tls, pIn, (psSilk_VAD /* &.AnaState */), (bp /* &X */), (bp /* &X */ + 3*480), (bp + 1920 /* &scratch */), framelength)
 21138  
 21139  	/* 0-4 kHz to 0-2 kHz and 2-4 kHz */
 21140  	SKP_Silk_ana_filt_bank_1(tls, (bp /* &X */), (psSilk_VAD + 8 /* &.AnaState1 */), (bp /* &X */), (bp /* &X */ + 2*480), (bp + 1920 /* &scratch */), ((framelength) >> (1)))
 21141  
 21142  	/* 0-2 kHz to 0-1 kHz and 1-2 kHz */
 21143  	SKP_Silk_ana_filt_bank_1(tls, (bp /* &X */), (psSilk_VAD + 16 /* &.AnaState2 */), (bp /* &X */), (bp /* &X */ + 1*480), (bp + 1920 /* &scratch */), ((framelength) >> (2)))
 21144  
 21145  	/*********************************************/
 21146  	/* HP filter on lowest band (differentiator) */
 21147  	/*********************************************/
 21148  	decimated_framelength = ((framelength) >> (3))
 21149  	*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((decimated_framelength-1))*2)) = (int16((int32(*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((decimated_framelength-1))*2)))) >> (1)))
 21150  	HPstateTmp = *(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((decimated_framelength-1))*2))
 21151  	for i = (decimated_framelength - 1); i > 0; i-- {
 21152  		*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((i-1))*2)) = (int16((int32(*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((i-1))*2)))) >> (1)))
 21153  		*(*int16)(unsafe.Pointer((bp /* &X */) + uintptr(i)*2)) -= int16((int32(*(*int16)(unsafe.Pointer((bp /* &X[0] */) + uintptr((i-1))*2)))))
 21154  	}
 21155  	*(*int16)(unsafe.Pointer((bp /* &X */))) -= int16((int32((*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).FHPstate)))
 21156  	(*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).FHPstate = HPstateTmp
 21157  
 21158  	/*************************************/
 21159  	/* Calculate the energy in each band */
 21160  	/*************************************/
 21161  	for b = 0; b < 4; b++ {
 21162  		/* Find the decimated framelength in the non-uniformly divided bands */
 21163  		decimated_framelength = ((framelength) >> (SKP_min_int(tls, (4 - b), (4 - 1))))
 21164  
 21165  		/* Split length into subframe lengths */
 21166  		dec_subframe_length = ((decimated_framelength) >> (2))
 21167  		dec_subframe_offset = 0
 21168  
 21169  		/* Compute energy per sub-frame */
 21170  		/* initialize with summed energy of last subframe */
 21171  		*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) = *(*int32)(unsafe.Pointer((psSilk_VAD + 24 /* &.XnrgSubfr */) + uintptr(b)*4))
 21172  		for s = 0; s < (int32(1) << 2); s++ {
 21173  			sumSquared = 0
 21174  			for i = 0; i < dec_subframe_length; i++ {
 21175  				/* The energy will be less than dec_subframe_length * ( SKP_int16_MIN / 8 ) ^ 2.            */
 21176  				/* Therefore we can accumulate with no risk of overflow (unless dec_subframe_length > 128)  */
 21177  				x_tmp = ((int32(*(*int16)(unsafe.Pointer((bp /* &X[0] */ + uintptr(b)*480) + uintptr((i+dec_subframe_offset))*2)))) >> (3))
 21178  				sumSquared = ((sumSquared) + ((int32(int16(x_tmp))) * (int32(int16(x_tmp)))))
 21179  
 21180  				/* Safety check */
 21181  
 21182  			}
 21183  
 21184  			/* Add/saturate summed energy of current subframe */
 21185  			if s < ((int32(1) << 2) - 1) {
 21186  				*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) = func() int32 {
 21187  					if ((uint32((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) + (sumSquared))) & 0x80000000) != 0 {
 21188  						return 0x7FFFFFFF
 21189  					}
 21190  					return ((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) + (sumSquared))
 21191  				}()
 21192  			} else {
 21193  				/* Look-ahead subframe */
 21194  				*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) = func() int32 {
 21195  					if ((uint32((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) + ((sumSquared) >> (1)))) & 0x80000000) != 0 {
 21196  						return 0x7FFFFFFF
 21197  					}
 21198  					return ((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) + ((sumSquared) >> (1)))
 21199  				}()
 21200  			}
 21201  
 21202  			dec_subframe_offset = dec_subframe_offset + (dec_subframe_length)
 21203  		}
 21204  		*(*int32)(unsafe.Pointer((psSilk_VAD + 24 /* &.XnrgSubfr */) + uintptr(b)*4)) = sumSquared
 21205  	}
 21206  
 21207  	/********************/
 21208  	/* Noise estimation */
 21209  	/********************/
 21210  	SKP_Silk_VAD_GetNoiseLevels(tls, (bp + 4800 /* &Xnrg */), psSilk_VAD)
 21211  
 21212  	/***********************************************/
 21213  	/* Signal-plus-noise to noise ratio estimation */
 21214  	/***********************************************/
 21215  	sumSquared = 0
 21216  	input_tilt = 0
 21217  	for b = 0; b < 4; b++ {
 21218  		speech_nrg = (*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) - *(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4)))
 21219  		if speech_nrg > 0 {
 21220  			/* Divide, with sufficient resolution */
 21221  			if (uint32(*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) & 0xFF800000) == uint32(0) {
 21222  				*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) = (((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) << (8)) / (*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4)) + 1))
 21223  			} else {
 21224  				*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) = ((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4))) / (((*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4))) >> (8)) + 1))
 21225  			}
 21226  
 21227  			/* Convert to log domain */
 21228  			SNR_Q7 = (SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4))) - (8 * 128))
 21229  
 21230  			/* Sum-of-squares */
 21231  			sumSquared = ((sumSquared) + ((int32(int16(SNR_Q7))) * (int32(int16(SNR_Q7))))) /* Q14 */
 21232  
 21233  			/* Tilt measure */
 21234  			if speech_nrg < (int32(1) << 20) {
 21235  				/* Scale down SNR value for small subband speech energies */
 21236  				SNR_Q7 = (((((SKP_Silk_SQRT_APPROX(tls, speech_nrg)) << (6)) >> 16) * (int32(int16(SNR_Q7)))) + (((((SKP_Silk_SQRT_APPROX(tls, speech_nrg)) << (6)) & 0x0000FFFF) * (int32(int16(SNR_Q7)))) >> 16))
 21237  			}
 21238  			input_tilt = ((input_tilt) + ((((tiltWeights[b]) >> 16) * (int32(int16(SNR_Q7)))) + ((((tiltWeights[b]) & 0x0000FFFF) * (int32(int16(SNR_Q7)))) >> 16)))
 21239  		} else {
 21240  			*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) = 256
 21241  		}
 21242  	}
 21243  
 21244  	/* Mean-of-squares */
 21245  	sumSquared = ((sumSquared) / (4)) /* Q14 */
 21246  
 21247  	/* Root-mean-square approximation, scale to dBs, and write to output pointer */
 21248  	*(*int32)(unsafe.Pointer(pSNR_dB_Q7)) = int32((int16(3 * SKP_Silk_SQRT_APPROX(tls, sumSquared)))) /* Q7 */
 21249  
 21250  	/*********************************/
 21251  	/* Speech Probability Estimation */
 21252  	/*********************************/
 21253  	SA_Q15 = SKP_Silk_sigm_Q15(tls, ((((int32((45000)) >> 16) * (int32(int16(*(*int32)(unsafe.Pointer(pSNR_dB_Q7)))))) + ((((45000) & 0x0000FFFF) * (int32(int16(*(*int32)(unsafe.Pointer(pSNR_dB_Q7)))))) >> 16)) - 128))
 21254  
 21255  	/**************************/
 21256  	/* Frequency Tilt Measure */
 21257  	/**************************/
 21258  	*(*int32)(unsafe.Pointer(pTilt_Q15)) = ((SKP_Silk_sigm_Q15(tls, input_tilt) - 16384) << (1))
 21259  
 21260  	/**************************************************/
 21261  	/* Scale the sigmoid output based on power levels */
 21262  	/**************************************************/
 21263  	speech_nrg = 0
 21264  	for b = 0; b < 4; b++ {
 21265  		/* Accumulate signal-without-noise energies, higher frequency bands have more weight */
 21266  		speech_nrg = speech_nrg + ((b + 1) * ((*(*int32)(unsafe.Pointer(bp + 4800 /* &Xnrg[0] */ + uintptr(b)*4)) - *(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(b)*4))) >> (4)))
 21267  	}
 21268  
 21269  	/* Power scaling */
 21270  	if speech_nrg <= 0 {
 21271  		SA_Q15 = ((SA_Q15) >> (1))
 21272  	} else if speech_nrg < 32768 {
 21273  		/* square-root */
 21274  		speech_nrg = SKP_Silk_SQRT_APPROX(tls, ((speech_nrg) << (15)))
 21275  		SA_Q15 = ((((32768 + speech_nrg) >> 16) * (int32(int16(SA_Q15)))) + ((((32768 + speech_nrg) & 0x0000FFFF) * (int32(int16(SA_Q15)))) >> 16))
 21276  	}
 21277  
 21278  	/* Copy the resulting speech activity in Q8 to *pSA_Q8 */
 21279  	*(*int32)(unsafe.Pointer(pSA_Q8)) = SKP_min_int(tls, ((SA_Q15) >> (7)), 0xFF)
 21280  
 21281  	/***********************************/
 21282  	/* Energy Level and SNR estimation */
 21283  	/***********************************/
 21284  	/* Smoothing coefficient */
 21285  	smooth_coef_Q16 = (((int32((4096)) >> 16) * (int32((int16((((SA_Q15) >> 16) * (int32(int16(SA_Q15)))) + ((((SA_Q15) & 0x0000FFFF) * (int32(int16(SA_Q15)))) >> 16)))))) + ((((4096) & 0x0000FFFF) * (int32((int16((((SA_Q15) >> 16) * (int32(int16(SA_Q15)))) + ((((SA_Q15) & 0x0000FFFF) * (int32(int16(SA_Q15)))) >> 16)))))) >> 16))
 21286  	for b = 0; b < 4; b++ {
 21287  		/* compute smoothed energy-to-noise ratio per band */
 21288  		*(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4)) = ((*(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4))) + ((((*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) - *(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4))) >> 16) * (int32(int16(smooth_coef_Q16)))) + ((((*(*int32)(unsafe.Pointer(bp + 4816 /* &NrgToNoiseRatio_Q8[0] */ + uintptr(b)*4)) - *(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4))) & 0x0000FFFF) * (int32(int16(smooth_coef_Q16)))) >> 16)))
 21289  
 21290  		/* signal to noise ratio in dB per band */
 21291  		SNR_Q7 = (3 * (SKP_Silk_lin2log(tls, *(*int32)(unsafe.Pointer((psSilk_VAD + 40 /* &.NrgRatioSmth_Q8 */) + uintptr(b)*4))) - (8 * 128)))
 21292  		/* quality = sigmoid( 0.25 * ( SNR_dB - 16 ) ); */
 21293  		*(*int32)(unsafe.Pointer(pQuality_Q15 + uintptr(b)*4)) = SKP_Silk_sigm_Q15(tls, ((SNR_Q7 - (16 * 128)) >> (4)))
 21294  	}
 21295  
 21296  	return ret
 21297  }
 21298  
 21299  /**************************/
 21300  /* Noise level estimation */
 21301  /**************************/
 21302  func SKP_Silk_VAD_GetNoiseLevels(tls *libc.TLS, pX uintptr, psSilk_VAD uintptr) { /* SKP_Silk_VAD.c:262:6: */
 21303  	var k int32
 21304  	var nl int32
 21305  	var nrg int32
 21306  	var inv_nrg int32
 21307  	var coef int32
 21308  	var min_coef int32
 21309  
 21310  	/* Initially faster smoothing */
 21311  	if (*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).Fcounter < 1000 { /* 1000 = 20 sec */
 21312  		min_coef = ((0x7FFF) / ((((*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).Fcounter) >> (4)) + 1))
 21313  	} else {
 21314  		min_coef = 0
 21315  	}
 21316  
 21317  	for k = 0; k < 4; k++ {
 21318  		/* Get old noise level estimate for current band */
 21319  		nl = *(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(k)*4))
 21320  
 21321  		/* Add bias */
 21322  		nrg = func() int32 {
 21323  			if ((uint32((*(*int32)(unsafe.Pointer(pX + uintptr(k)*4))) + (*(*int32)(unsafe.Pointer((psSilk_VAD + 92 /* &.NoiseLevelBias */) + uintptr(k)*4))))) & 0x80000000) != 0 {
 21324  				return 0x7FFFFFFF
 21325  			}
 21326  			return ((*(*int32)(unsafe.Pointer(pX + uintptr(k)*4))) + (*(*int32)(unsafe.Pointer((psSilk_VAD + 92 /* &.NoiseLevelBias */) + uintptr(k)*4))))
 21327  		}()
 21328  
 21329  		/* Invert energies */
 21330  		inv_nrg = ((0x7FFFFFFF) / (nrg))
 21331  
 21332  		/* Less update when subband energy is high */
 21333  		if nrg > ((nl) << (3)) {
 21334  			coef = (int32(1024) >> 3)
 21335  		} else if nrg < nl {
 21336  			coef = 1024
 21337  		} else {
 21338  			coef = ((((((((inv_nrg) >> 16) * (int32(int16(nl)))) + ((((inv_nrg) & 0x0000FFFF) * (int32(int16(nl)))) >> 16)) + ((inv_nrg) * (func() int32 {
 21339  				if (16) == 1 {
 21340  					return (((nl) >> 1) + ((nl) & 1))
 21341  				}
 21342  				return ((((nl) >> ((16) - 1)) + 1) >> 1)
 21343  			}()))) >> 16) * (int32((int16(int32(1024) << 1))))) + ((((((((inv_nrg) >> 16) * (int32(int16(nl)))) + ((((inv_nrg) & 0x0000FFFF) * (int32(int16(nl)))) >> 16)) + ((inv_nrg) * (func() int32 {
 21344  				if (16) == 1 {
 21345  					return (((nl) >> 1) + ((nl) & 1))
 21346  				}
 21347  				return ((((nl) >> ((16) - 1)) + 1) >> 1)
 21348  			}()))) & 0x0000FFFF) * (int32((int16(int32(1024) << 1))))) >> 16))
 21349  		}
 21350  
 21351  		/* Initially faster smoothing */
 21352  		coef = SKP_max_int(tls, coef, min_coef)
 21353  
 21354  		/* Smooth inverse energies */
 21355  		*(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4)) = ((*(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4))) + ((((inv_nrg - *(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4))) >> 16) * (int32(int16(coef)))) + ((((inv_nrg - *(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4))) & 0x0000FFFF) * (int32(int16(coef)))) >> 16)))
 21356  
 21357  		/* Compute noise level by inverting again */
 21358  		nl = ((0x7FFFFFFF) / (*(*int32)(unsafe.Pointer((psSilk_VAD + 76 /* &.inv_NL */) + uintptr(k)*4))))
 21359  
 21360  		/* Limit noise levels (guarantee 7 bits of head room) */
 21361  		nl = func() int32 {
 21362  			if (nl) < (0x00FFFFFF) {
 21363  				return nl
 21364  			}
 21365  			return 0x00FFFFFF
 21366  		}()
 21367  
 21368  		/* Store as part of state */
 21369  		*(*int32)(unsafe.Pointer((psSilk_VAD + 60 /* &.NL */) + uintptr(k)*4)) = nl
 21370  	}
 21371  
 21372  	/* Increment frame counter */
 21373  	(*SKP_Silk_VAD_state)(unsafe.Pointer(psSilk_VAD)).Fcounter++
 21374  }
 21375  
 21376  /* Entropy constrained MATRIX-weighted VQ, hard-coded to 5-element vectors, for a single input data vector */
 21377  func SKP_Silk_VQ_WMat_EC_FIX(tls *libc.TLS, ind uintptr, rate_dist_Q14 uintptr, in_Q14 uintptr, W_Q18 uintptr, cb_Q14 uintptr, cl_Q6 uintptr, mu_Q8 int32, L int32) { /* SKP_Silk_VQ_nearest_neighbor_FIX.c:31:6: */
 21378  	bp := tls.Alloc(10)
 21379  	defer tls.Free(10)
 21380  
 21381  	var k int32
 21382  	var cb_row_Q14 uintptr
 21383  	// var diff_Q14 [5]int16 at bp, 10
 21384  
 21385  	var sum1_Q14 int32
 21386  	var sum2_Q16 int32
 21387  
 21388  	/* Loop over codebook */
 21389  	*(*int32)(unsafe.Pointer(rate_dist_Q14)) = 0x7FFFFFFF
 21390  	cb_row_Q14 = cb_Q14
 21391  	for k = 0; k < L; k++ {
 21392  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14)))))
 21393  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14 + 1*2))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14 + 1*2)))))
 21394  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14 + 2*2))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14 + 2*2)))))
 21395  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14 + 3*2))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14 + 3*2)))))
 21396  		*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2)) = (int16(int32(*(*int16)(unsafe.Pointer(in_Q14 + 4*2))) - int32(*(*int16)(unsafe.Pointer(cb_row_Q14 + 4*2)))))
 21397  
 21398  		/* Weighted rate */
 21399  		sum1_Q14 = ((int32(int16(mu_Q8))) * (int32(*(*int16)(unsafe.Pointer(cl_Q6 + uintptr(k)*2)))))
 21400  
 21401  		/* first row of W_Q18 */
 21402  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 1*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 1*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) >> 16))
 21403  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 2*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 2*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) >> 16)))
 21404  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 3*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 3*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16)))
 21405  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 4*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 4*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16)))
 21406  		sum2_Q16 = ((sum2_Q16) << (1))
 21407  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */))))) + ((((*(*int32)(unsafe.Pointer(W_Q18))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */))))) >> 16)))
 21408  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */))))) >> 16)))
 21409  
 21410  		/* second row of W_Q18 */
 21411  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 7*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 7*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) >> 16))
 21412  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 8*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 8*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16)))
 21413  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 9*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 9*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16)))
 21414  		sum2_Q16 = ((sum2_Q16) << (1))
 21415  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 6*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 6*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) >> 16)))
 21416  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 1*2))))) >> 16)))
 21417  
 21418  		/* third row of W_Q18 */
 21419  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 13*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 13*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16))
 21420  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 14*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 14*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16)))
 21421  		sum2_Q16 = ((sum2_Q16) << (1))
 21422  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 12*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 12*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) >> 16)))
 21423  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 2*2))))) >> 16)))
 21424  
 21425  		/* fourth row of W_Q18 */
 21426  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 19*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 19*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16))
 21427  		sum2_Q16 = ((sum2_Q16) << (1))
 21428  		sum2_Q16 = ((sum2_Q16) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 18*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 18*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16)))
 21429  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 3*2))))) >> 16)))
 21430  
 21431  		/* last row of W_Q18 */
 21432  		sum2_Q16 = ((((*(*int32)(unsafe.Pointer(W_Q18 + 24*4))) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((*(*int32)(unsafe.Pointer(W_Q18 + 24*4))) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16))
 21433  		sum1_Q14 = ((sum1_Q14) + ((((sum2_Q16) >> 16) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) + ((((sum2_Q16) & 0x0000FFFF) * (int32(*(*int16)(unsafe.Pointer(bp /* &diff_Q14[0] */ + 4*2))))) >> 16)))
 21434  
 21435  		/* find best */
 21436  		if sum1_Q14 < *(*int32)(unsafe.Pointer(rate_dist_Q14)) {
 21437  			*(*int32)(unsafe.Pointer(rate_dist_Q14)) = sum1_Q14
 21438  			*(*int32)(unsafe.Pointer(ind)) = k
 21439  		}
 21440  
 21441  		/* Go to next cbk vector */
 21442  		cb_row_Q14 += 2 * (uintptr(5))
 21443  	}
 21444  }
 21445  
 21446  /* Autocorrelations for a warped frequency axis */
 21447  func SKP_Silk_warped_autocorrelation_FIX(tls *libc.TLS, corr uintptr, scale uintptr, input uintptr, warping_Q16 int16, length int32, order int32) { /* SKP_Silk_warped_autocorrelation_FIX.c:35:6: */
 21448  	bp := tls.Alloc(208)
 21449  	defer tls.Free(208)
 21450  
 21451  	var n int32
 21452  	var i int32
 21453  	var lsh int32
 21454  	var tmp1_QS int32
 21455  	var tmp2_QS int32
 21456  	*(*[17]int32)(unsafe.Pointer(bp /* state_QS */)) = [17]int32{0: 0}
 21457  	*(*[17]int64_t)(unsafe.Pointer(bp + 72 /* corr_QC */)) = [17]int64_t{0: int64(0)}
 21458  
 21459  	/* Order must be even */
 21460  
 21461  	/* Loop over samples */
 21462  	for n = 0; n < length; n++ {
 21463  		tmp1_QS = ((int32(*(*int16)(unsafe.Pointer(input + uintptr(n)*2)))) << (14))
 21464  		/* Loop over allpass sections */
 21465  		for i = 0; i < order; i = i + (2) {
 21466  			/* Output of allpass section */
 21467  			tmp2_QS = ((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr(i)*4))) + ((((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+1))*4)) - tmp1_QS) >> 16) * (int32(warping_Q16))) + ((((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+1))*4)) - tmp1_QS) & 0x0000FFFF) * (int32(warping_Q16))) >> 16)))
 21468  			*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr(i)*4)) = tmp1_QS
 21469  			*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC */ + uintptr(i)*8)) += (((int64_t(tmp1_QS)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */))))) >> ((2 * 14) - 10))
 21470  			/* Output of allpass section */
 21471  			tmp1_QS = ((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+1))*4))) + ((((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+2))*4)) - tmp2_QS) >> 16) * (int32(warping_Q16))) + ((((*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+2))*4)) - tmp2_QS) & 0x0000FFFF) * (int32(warping_Q16))) >> 16)))
 21472  			*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr((i+1))*4)) = tmp2_QS
 21473  			*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC */ + uintptr((i+1))*8)) += (((int64_t(tmp2_QS)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */))))) >> ((2 * 14) - 10))
 21474  		}
 21475  		*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */ + uintptr(order)*4)) = tmp1_QS
 21476  		*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC */ + uintptr(order)*8)) += (((int64_t(tmp1_QS)) * (int64_t(*(*int32)(unsafe.Pointer(bp /* &state_QS[0] */))))) >> ((2 * 14) - 10))
 21477  	}
 21478  
 21479  	lsh = (SKP_Silk_CLZ64(tls, *(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC[0] */))) - 35)
 21480  	lsh = func() int32 {
 21481  		if (-12 - 10) > (30 - 10) {
 21482  			return func() int32 {
 21483  				if (lsh) > (-12 - 10) {
 21484  					return (-12 - 10)
 21485  				}
 21486  				return func() int32 {
 21487  					if (lsh) < (30 - 10) {
 21488  						return (30 - 10)
 21489  					}
 21490  					return lsh
 21491  				}()
 21492  			}()
 21493  		}
 21494  		return func() int32 {
 21495  			if (lsh) > (30 - 10) {
 21496  				return (30 - 10)
 21497  			}
 21498  			return func() int32 {
 21499  				if (lsh) < (-12 - 10) {
 21500  					return (-12 - 10)
 21501  				}
 21502  				return lsh
 21503  			}()
 21504  		}()
 21505  	}()
 21506  	*(*int32)(unsafe.Pointer(scale)) = -(10 + lsh)
 21507  
 21508  	if lsh >= 0 {
 21509  		for i = 0; i < (order + 1); i++ {
 21510  			*(*int32)(unsafe.Pointer(corr + uintptr(i)*4)) = (int32((*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC[0] */ + uintptr(i)*8))) << (lsh)))
 21511  		}
 21512  	} else {
 21513  		for i = 0; i < (order + 1); i++ {
 21514  			*(*int32)(unsafe.Pointer(corr + uintptr(i)*4)) = (int32((*(*int64_t)(unsafe.Pointer(bp + 72 /* &corr_QC[0] */ + uintptr(i)*8))) >> (-lsh)))
 21515  		}
 21516  	}
 21517  	// If breaking, decrease QC
 21518  }
 21519  
 21520  func init() {
 21521  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 0)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_0))                                     // SKP_Silk_tables_LTP.c:89:5:
 21522  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 4)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_1))                                     // SKP_Silk_tables_LTP.c:90:5:
 21523  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 8)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_2))                                     // SKP_Silk_tables_LTP.c:91:5:
 21524  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 0)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_0))                                     // SKP_Silk_tables_LTP.c:89:5:
 21525  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 4)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_1))                                     // SKP_Silk_tables_LTP.c:90:5:
 21526  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_ptrs)) + 8)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_BITS_Q6_2))                                     // SKP_Silk_tables_LTP.c:91:5:
 21527  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 0)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_0))                                             // SKP_Silk_tables_LTP.c:83:5:
 21528  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 4)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_1))                                             // SKP_Silk_tables_LTP.c:84:5:
 21529  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 8)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_2))                                             // SKP_Silk_tables_LTP.c:85:5:
 21530  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 0)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_0))                                             // SKP_Silk_tables_LTP.c:83:5:
 21531  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 4)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_1))                                             // SKP_Silk_tables_LTP.c:84:5:
 21532  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_ptrs)) + 8)) = uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_CDF_2))                                             // SKP_Silk_tables_LTP.c:85:5:
 21533  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_0_Q14)))                                          // SKP_Silk_tables_LTP.c:317:5:
 21534  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_1_Q14)))                                          // SKP_Silk_tables_LTP.c:318:5:
 21535  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_2_Q14)))                                          // SKP_Silk_tables_LTP.c:319:5:
 21536  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_0_Q14)))                                          // SKP_Silk_tables_LTP.c:317:5:
 21537  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_1_Q14)))                                          // SKP_Silk_tables_LTP.c:318:5:
 21538  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_LTP_vq_ptrs_Q14)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_LTP_gain_vq_2_Q14)))                                          // SKP_Silk_tables_LTP.c:319:5:
 21539  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 4 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info))                           // SKP_Silk_tables_NLSF_CB0_10.c:884:9:
 21540  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 8 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_ndelta_min_Q15))             // SKP_Silk_tables_NLSF_CB0_10.c:885:9:
 21541  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 12 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF))                                 // SKP_Silk_tables_NLSF_CB0_10.c:886:9:
 21542  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 16 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB0_10.c:887:9:
 21543  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 20 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB0_10.c:888:9:
 21544  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 4 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info))                           // SKP_Silk_tables_NLSF_CB0_10.c:884:9:
 21545  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 8 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_ndelta_min_Q15))             // SKP_Silk_tables_NLSF_CB0_10.c:885:9:
 21546  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 12 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF))                                 // SKP_Silk_tables_NLSF_CB0_10.c:886:9:
 21547  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 16 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB0_10.c:887:9:
 21548  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10)) + 20 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB0_10.c:888:9:
 21549  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 4 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)))             // SKP_Silk_tables_NLSF_CB0_10.c:873:16:
 21550  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 8 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)))           // SKP_Silk_tables_NLSF_CB0_10.c:873:60:
 21551  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 16 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 640*2)    // SKP_Silk_tables_NLSF_CB0_10.c:874:16:
 21552  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 20 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 64*2)   // SKP_Silk_tables_NLSF_CB0_10.c:874:60:
 21553  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 28 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 800*2)    // SKP_Silk_tables_NLSF_CB0_10.c:875:16:
 21554  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 32 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 80*2)   // SKP_Silk_tables_NLSF_CB0_10.c:875:60:
 21555  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 40 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 880*2)    // SKP_Silk_tables_NLSF_CB0_10.c:876:16:
 21556  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 44 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 88*2)   // SKP_Silk_tables_NLSF_CB0_10.c:876:60:
 21557  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 52 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 960*2)    // SKP_Silk_tables_NLSF_CB0_10.c:877:16:
 21558  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 56 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 96*2)   // SKP_Silk_tables_NLSF_CB0_10.c:877:60:
 21559  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 64 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_Q15)) + 1040*2)   // SKP_Silk_tables_NLSF_CB0_10.c:878:16:
 21560  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_10_Stage_info)) + 68 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_rates_Q5)) + 104*2)  // SKP_Silk_tables_NLSF_CB0_10.c:878:60:
 21561  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 4 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info))                           // SKP_Silk_tables_NLSF_CB0_16.c:1314:9:
 21562  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 8 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_ndelta_min_Q15))             // SKP_Silk_tables_NLSF_CB0_16.c:1315:9:
 21563  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 12 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF))                                 // SKP_Silk_tables_NLSF_CB0_16.c:1316:9:
 21564  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 16 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB0_16.c:1317:9:
 21565  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 20 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB0_16.c:1318:9:
 21566  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 4 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info))                           // SKP_Silk_tables_NLSF_CB0_16.c:1314:9:
 21567  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 8 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_ndelta_min_Q15))             // SKP_Silk_tables_NLSF_CB0_16.c:1315:9:
 21568  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 12 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF))                                 // SKP_Silk_tables_NLSF_CB0_16.c:1316:9:
 21569  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 16 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB0_16.c:1317:9:
 21570  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16)) + 20 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB0_16.c:1318:9:
 21571  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 4 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)))             // SKP_Silk_tables_NLSF_CB0_16.c:1299:16:
 21572  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 8 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)))           // SKP_Silk_tables_NLSF_CB0_16.c:1299:60:
 21573  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 16 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2048*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1300:16:
 21574  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 20 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 128*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1300:60:
 21575  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 28 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2304*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1301:16:
 21576  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 32 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 144*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1301:60:
 21577  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 40 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2432*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1302:16:
 21578  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 44 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 152*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1302:60:
 21579  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 52 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2560*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1303:16:
 21580  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 56 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 160*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1303:60:
 21581  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 64 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2688*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1304:16:
 21582  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 68 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 168*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1304:60:
 21583  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 76 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2816*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1305:16:
 21584  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 80 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 176*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1305:60:
 21585  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 88 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 2944*2)   // SKP_Silk_tables_NLSF_CB0_16.c:1306:16:
 21586  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 92 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 184*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1306:60:
 21587  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 100 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 3072*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1307:16:
 21588  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 104 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 192*2) // SKP_Silk_tables_NLSF_CB0_16.c:1307:60:
 21589  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 112 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_Q15)) + 3200*2)  // SKP_Silk_tables_NLSF_CB0_16.c:1308:16:
 21590  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB0_16_Stage_info)) + 116 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_rates_Q5)) + 200*2) // SKP_Silk_tables_NLSF_CB0_16.c:1308:60:
 21591  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 4 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info))                           // SKP_Silk_tables_NLSF_CB1_10.c:572:9:
 21592  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 8 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_ndelta_min_Q15))             // SKP_Silk_tables_NLSF_CB1_10.c:573:9:
 21593  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 12 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF))                                 // SKP_Silk_tables_NLSF_CB1_10.c:574:9:
 21594  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 16 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB1_10.c:575:9:
 21595  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 20 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB1_10.c:576:9:
 21596  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 4 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info))                           // SKP_Silk_tables_NLSF_CB1_10.c:572:9:
 21597  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 8 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_ndelta_min_Q15))             // SKP_Silk_tables_NLSF_CB1_10.c:573:9:
 21598  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 12 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF))                                 // SKP_Silk_tables_NLSF_CB1_10.c:574:9:
 21599  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 16 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB1_10.c:575:9:
 21600  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10)) + 20 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB1_10.c:576:9:
 21601  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 4 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)))             // SKP_Silk_tables_NLSF_CB1_10.c:561:16:
 21602  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 8 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)))           // SKP_Silk_tables_NLSF_CB1_10.c:561:60:
 21603  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 16 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 320*2)    // SKP_Silk_tables_NLSF_CB1_10.c:562:16:
 21604  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 20 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 32*2)   // SKP_Silk_tables_NLSF_CB1_10.c:562:60:
 21605  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 28 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 400*2)    // SKP_Silk_tables_NLSF_CB1_10.c:563:16:
 21606  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 32 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 40*2)   // SKP_Silk_tables_NLSF_CB1_10.c:563:60:
 21607  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 40 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 480*2)    // SKP_Silk_tables_NLSF_CB1_10.c:564:16:
 21608  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 44 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 48*2)   // SKP_Silk_tables_NLSF_CB1_10.c:564:60:
 21609  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 52 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 560*2)    // SKP_Silk_tables_NLSF_CB1_10.c:565:16:
 21610  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 56 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 56*2)   // SKP_Silk_tables_NLSF_CB1_10.c:565:60:
 21611  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 64 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_Q15)) + 640*2)    // SKP_Silk_tables_NLSF_CB1_10.c:566:16:
 21612  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_10_Stage_info)) + 68 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_rates_Q5)) + 64*2)   // SKP_Silk_tables_NLSF_CB1_10.c:566:60:
 21613  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 4 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info))                           // SKP_Silk_tables_NLSF_CB1_16.c:698:9:
 21614  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 8 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_ndelta_min_Q15))             // SKP_Silk_tables_NLSF_CB1_16.c:699:9:
 21615  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 12 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF))                                 // SKP_Silk_tables_NLSF_CB1_16.c:700:9:
 21616  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 16 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB1_16.c:701:9:
 21617  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 20 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB1_16.c:702:9:
 21618  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 4 /* .CBStages */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info))                           // SKP_Silk_tables_NLSF_CB1_16.c:698:9:
 21619  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 8 /* .NDeltaMin_Q15 */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_ndelta_min_Q15))             // SKP_Silk_tables_NLSF_CB1_16.c:699:9:
 21620  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 12 /* .CDF */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF))                                 // SKP_Silk_tables_NLSF_CB1_16.c:700:9:
 21621  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 16 /* .StartPtr */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr))                  // SKP_Silk_tables_NLSF_CB1_16.c:701:9:
 21622  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16)) + 20 /* .MiddleIx */)) = uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_middle_idx))                 // SKP_Silk_tables_NLSF_CB1_16.c:702:9:
 21623  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 4 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)))             // SKP_Silk_tables_NLSF_CB1_16.c:683:16:
 21624  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 8 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)))           // SKP_Silk_tables_NLSF_CB1_16.c:683:60:
 21625  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 16 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 512*2)    // SKP_Silk_tables_NLSF_CB1_16.c:684:16:
 21626  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 20 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 32*2)   // SKP_Silk_tables_NLSF_CB1_16.c:684:60:
 21627  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 28 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 640*2)    // SKP_Silk_tables_NLSF_CB1_16.c:685:16:
 21628  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 32 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 40*2)   // SKP_Silk_tables_NLSF_CB1_16.c:685:60:
 21629  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 40 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 768*2)    // SKP_Silk_tables_NLSF_CB1_16.c:686:16:
 21630  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 44 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 48*2)   // SKP_Silk_tables_NLSF_CB1_16.c:686:60:
 21631  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 52 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 896*2)    // SKP_Silk_tables_NLSF_CB1_16.c:687:16:
 21632  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 56 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 56*2)   // SKP_Silk_tables_NLSF_CB1_16.c:687:60:
 21633  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 64 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1024*2)   // SKP_Silk_tables_NLSF_CB1_16.c:688:16:
 21634  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 68 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 64*2)   // SKP_Silk_tables_NLSF_CB1_16.c:688:60:
 21635  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 76 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1152*2)   // SKP_Silk_tables_NLSF_CB1_16.c:689:16:
 21636  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 80 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 72*2)   // SKP_Silk_tables_NLSF_CB1_16.c:689:60:
 21637  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 88 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1280*2)   // SKP_Silk_tables_NLSF_CB1_16.c:690:16:
 21638  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 92 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 80*2)   // SKP_Silk_tables_NLSF_CB1_16.c:690:60:
 21639  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 100 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1408*2)  // SKP_Silk_tables_NLSF_CB1_16.c:691:16:
 21640  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 104 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 88*2)  // SKP_Silk_tables_NLSF_CB1_16.c:691:60:
 21641  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 112 /* .CB_NLSF_Q15 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_Q15)) + 1536*2)  // SKP_Silk_tables_NLSF_CB1_16.c:692:16:
 21642  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_CB1_16_Stage_info)) + 116 /* .Rates_Q5 */)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_rates_Q5)) + 96*2)  // SKP_Silk_tables_NLSF_CB1_16.c:692:60:
 21643  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)))                        // SKP_Silk_tables_NLSF_CB0_10.c:170:6:
 21644  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 65*2)                 // SKP_Silk_tables_NLSF_CB0_10.c:171:6:
 21645  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 82*2)                 // SKP_Silk_tables_NLSF_CB0_10.c:172:6:
 21646  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 12)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 91*2)                // SKP_Silk_tables_NLSF_CB0_10.c:173:6:
 21647  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 100*2)               // SKP_Silk_tables_NLSF_CB0_10.c:174:6:
 21648  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 20)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 109*2)               // SKP_Silk_tables_NLSF_CB0_10.c:175:6:
 21649  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)))                        // SKP_Silk_tables_NLSF_CB0_10.c:170:6:
 21650  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 65*2)                 // SKP_Silk_tables_NLSF_CB0_10.c:171:6:
 21651  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 82*2)                 // SKP_Silk_tables_NLSF_CB0_10.c:172:6:
 21652  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 12)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 91*2)                // SKP_Silk_tables_NLSF_CB0_10.c:173:6:
 21653  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 100*2)               // SKP_Silk_tables_NLSF_CB0_10.c:174:6:
 21654  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF_start_ptr)) + 20)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_10_CDF)) + 109*2)               // SKP_Silk_tables_NLSF_CB0_10.c:175:6:
 21655  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)))                        // SKP_Silk_tables_NLSF_CB0_16.c:270:6:
 21656  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 129*2)                // SKP_Silk_tables_NLSF_CB0_16.c:271:6:
 21657  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 146*2)                // SKP_Silk_tables_NLSF_CB0_16.c:272:6:
 21658  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 12)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 155*2)               // SKP_Silk_tables_NLSF_CB0_16.c:273:6:
 21659  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 164*2)               // SKP_Silk_tables_NLSF_CB0_16.c:274:6:
 21660  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 20)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 173*2)               // SKP_Silk_tables_NLSF_CB0_16.c:275:6:
 21661  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 182*2)               // SKP_Silk_tables_NLSF_CB0_16.c:276:6:
 21662  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 28)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 191*2)               // SKP_Silk_tables_NLSF_CB0_16.c:277:6:
 21663  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 200*2)               // SKP_Silk_tables_NLSF_CB0_16.c:278:6:
 21664  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 36)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 209*2)               // SKP_Silk_tables_NLSF_CB0_16.c:279:6:
 21665  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)))                        // SKP_Silk_tables_NLSF_CB0_16.c:270:6:
 21666  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 129*2)                // SKP_Silk_tables_NLSF_CB0_16.c:271:6:
 21667  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 146*2)                // SKP_Silk_tables_NLSF_CB0_16.c:272:6:
 21668  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 12)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 155*2)               // SKP_Silk_tables_NLSF_CB0_16.c:273:6:
 21669  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 164*2)               // SKP_Silk_tables_NLSF_CB0_16.c:274:6:
 21670  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 20)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 173*2)               // SKP_Silk_tables_NLSF_CB0_16.c:275:6:
 21671  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 182*2)               // SKP_Silk_tables_NLSF_CB0_16.c:276:6:
 21672  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 28)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 191*2)               // SKP_Silk_tables_NLSF_CB0_16.c:277:6:
 21673  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 200*2)               // SKP_Silk_tables_NLSF_CB0_16.c:278:6:
 21674  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF_start_ptr)) + 36)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB0_16_CDF)) + 209*2)               // SKP_Silk_tables_NLSF_CB0_16.c:279:6:
 21675  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)))                        // SKP_Silk_tables_NLSF_CB1_10.c:122:6:
 21676  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 33*2)                 // SKP_Silk_tables_NLSF_CB1_10.c:123:6:
 21677  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 42*2)                 // SKP_Silk_tables_NLSF_CB1_10.c:124:6:
 21678  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 12)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 51*2)                // SKP_Silk_tables_NLSF_CB1_10.c:125:6:
 21679  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 60*2)                // SKP_Silk_tables_NLSF_CB1_10.c:126:6:
 21680  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 20)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 69*2)                // SKP_Silk_tables_NLSF_CB1_10.c:127:6:
 21681  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)))                        // SKP_Silk_tables_NLSF_CB1_10.c:122:6:
 21682  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 33*2)                 // SKP_Silk_tables_NLSF_CB1_10.c:123:6:
 21683  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 42*2)                 // SKP_Silk_tables_NLSF_CB1_10.c:124:6:
 21684  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 12)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 51*2)                // SKP_Silk_tables_NLSF_CB1_10.c:125:6:
 21685  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 60*2)                // SKP_Silk_tables_NLSF_CB1_10.c:126:6:
 21686  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF_start_ptr)) + 20)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_10_CDF)) + 69*2)                // SKP_Silk_tables_NLSF_CB1_10.c:127:6:
 21687  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)))                        // SKP_Silk_tables_NLSF_CB1_16.c:158:6:
 21688  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 33*2)                 // SKP_Silk_tables_NLSF_CB1_16.c:159:6:
 21689  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 42*2)                 // SKP_Silk_tables_NLSF_CB1_16.c:160:6:
 21690  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 12)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 51*2)                // SKP_Silk_tables_NLSF_CB1_16.c:161:6:
 21691  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 60*2)                // SKP_Silk_tables_NLSF_CB1_16.c:162:6:
 21692  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 20)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 69*2)                // SKP_Silk_tables_NLSF_CB1_16.c:163:6:
 21693  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 78*2)                // SKP_Silk_tables_NLSF_CB1_16.c:164:6:
 21694  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 28)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 87*2)                // SKP_Silk_tables_NLSF_CB1_16.c:165:6:
 21695  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 96*2)                // SKP_Silk_tables_NLSF_CB1_16.c:166:6:
 21696  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 36)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 105*2)               // SKP_Silk_tables_NLSF_CB1_16.c:167:6:
 21697  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 0)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)))                        // SKP_Silk_tables_NLSF_CB1_16.c:158:6:
 21698  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 4)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 33*2)                 // SKP_Silk_tables_NLSF_CB1_16.c:159:6:
 21699  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 8)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 42*2)                 // SKP_Silk_tables_NLSF_CB1_16.c:160:6:
 21700  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 12)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 51*2)                // SKP_Silk_tables_NLSF_CB1_16.c:161:6:
 21701  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 16)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 60*2)                // SKP_Silk_tables_NLSF_CB1_16.c:162:6:
 21702  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 20)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 69*2)                // SKP_Silk_tables_NLSF_CB1_16.c:163:6:
 21703  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 24)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 78*2)                // SKP_Silk_tables_NLSF_CB1_16.c:164:6:
 21704  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 28)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 87*2)                // SKP_Silk_tables_NLSF_CB1_16.c:165:6:
 21705  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 32)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 96*2)                // SKP_Silk_tables_NLSF_CB1_16.c:166:6:
 21706  	*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF_start_ptr)) + 36)) = (uintptr(unsafe.Pointer(&SKP_Silk_NLSF_MSVQ_CB1_16_CDF)) + 105*2)               // SKP_Silk_tables_NLSF_CB1_16.c:167:6:
 21707  }
 21708  
 21709  var ts1 = "1.0.9\x00"
 21710  var ts = (*reflect.StringHeader)(unsafe.Pointer(&ts1)).Data