Browse Source

init移植原有仓库代码重构

kwl 3 weeks ago
commit
df5061f7b5
53 changed files with 12622 additions and 0 deletions
  1. 78 0
      .gitignore
  2. 813 0
      native/include/fdk-aac/FDK_audio.h
  3. 1082 0
      native/include/fdk-aac/aacdecoder_lib.h
  4. 1709 0
      native/include/fdk-aac/aacenc_lib.h
  5. 584 0
      native/include/fdk-aac/genericStds.h
  6. 411 0
      native/include/fdk-aac/machine_type.h
  7. 202 0
      native/include/fdk-aac/syslib_channelMapDescr.h
  8. 68 0
      pom.xml
  9. 32 0
      src/main/java/com/jttserver/Server.java
  10. 717 0
      src/main/java/com/jttserver/codec/FlvPacketizer.java
  11. 152 0
      src/main/java/com/jttserver/codec/Jtt1078MessageDecoder.java
  12. 250 0
      src/main/java/com/jttserver/codec/audio/AudioDecoder.java
  13. 46 0
      src/main/java/com/jttserver/codec/audio/AudioUtils.java
  14. 69 0
      src/main/java/com/jttserver/codec/nativeaac/AacEncoderNative.java
  15. 95 0
      src/main/java/com/jttserver/config/ConfigManager.java
  16. 161 0
      src/main/java/com/jttserver/device/DeviceManager.java
  17. 171 0
      src/main/java/com/jttserver/protocol/Jtt1078NaluPacket.java
  18. 67 0
      src/main/java/com/jttserver/protocol/Jtt1078PacketParams.java
  19. 301 0
      src/main/java/com/jttserver/protocol/Jtt1078PacketParser.java
  20. 129 0
      src/main/java/com/jttserver/protocol/JttConstants.java
  21. 268 0
      src/main/java/com/jttserver/relay/FlvStreamRelay.java
  22. 63 0
      src/main/java/com/jttserver/relay/StreamRelay.java
  23. 111 0
      src/main/java/com/jttserver/relay/workerthreads/BroadcastWorker.java
  24. 147 0
      src/main/java/com/jttserver/relay/workerthreads/VideoPublishWorker.java
  25. 37 0
      src/main/java/com/jttserver/service/publisher/PublishServer.java
  26. 289 0
      src/main/java/com/jttserver/service/publisher/WebsockServer.java
  27. 373 0
      src/main/java/com/jttserver/service/receiver/JttVideoRecvServer.java
  28. 14 0
      src/main/java/com/jttserver/service/receiver/RecvSever.java
  29. 130 0
      src/main/java/com/jttserver/utils/CommonUtils.java
  30. 56 0
      src/main/java/com/jttserver/utils/SimCardUtils.java
  31. 19 0
      src/main/resources/app.properties
  32. 35 0
      src/main/resources/logback.xml
  33. 274 0
      src/main/resources/web/devices.html
  34. 0 0
      src/main/resources/web/jessibuca/decoder.js
  35. BIN
      src/main/resources/web/jessibuca/decoder.wasm
  36. 190 0
      src/main/resources/web/jessibuca/demo.html
  37. 683 0
      src/main/resources/web/jessibuca/jessibuca.d.ts
  38. 0 0
      src/main/resources/web/jessibuca/jessibuca.js
  39. 0 0
      src/main/resources/web/mpegts.js
  40. 0 0
      src/main/resources/web/mpegts.js.map
  41. 172 0
      src/main/resources/web/offline_player.html
  42. 338 0
      src/main/resources/web/player.html
  43. 232 0
      src/main/resources/web/playerWhthoutAudio.html
  44. 150 0
      src/native/CMakeLists.txt
  45. 98 0
      src/native/aac_jni.c
  46. 681 0
      src/test/java/com/jttserver/codec/FlvPacketizerTest.java
  47. 205 0
      src/test/java/com/jttserver/codec/Jtt1078MessageDecoderTest.java
  48. 36 0
      src/test/java/com/jttserver/codec/nativeaac/AacSmokeTest.java
  49. 64 0
      src/test/java/com/jttserver/config/ConfigManagerTest.java
  50. 206 0
      src/test/java/com/jttserver/device/DeviceManagerTest.java
  51. 228 0
      src/test/java/com/jttserver/protocol/Jtt1078NaluPacketTest.java
  52. 247 0
      src/test/java/com/jttserver/protocol/Jtt1078PacketParserTest.java
  53. 139 0
      src/test/java/com/jttserver/relay/FlvStreamRelayTest.java

+ 78 - 0
.gitignore

@@ -0,0 +1,78 @@
+# Java/Maven
+*.class
+*.log
+hs_err_pid*
+replay_pid*.log
+
+# Maven build
+target/
+!.mvn/wrapper/maven-wrapper.jar
+
+dependency-reduced-pom.xml
+
+# IDE settings
+.idea/
+*.iml
+*.ipr
+*.iws
+.vscode/
+# Eclipse
+.classpath
+.project
+.settings/
+.metadata
+bin/
+
+# NetBeans
+nbproject/private/
+build/
+nbbuild/
+dist/
+nbdist/
+nbactions.xml
+nb-gradle/
+
+# OS junk
+.DS_Store
+Thumbs.db
+
+# Libraries and vendor bundles (optional)
+lib/
+.libs/
+
+# Env files
+.env
+.env.local
+.env.development.local
+.env.test.local
+.env.production.local
+src/main/resources/app-local.properties
+
+# Native/CMake/Visual Studio build outputs
+build_jni/
+# any nested CMake build files
+**/CMakeFiles/
+**/CMakeCache.txt
+**/cmake_install.cmake
+**/Makefile
+# common VS/C++ outputs
+**/Debug/
+**/Release/
+**/x64/
+**/x86/
+*.obj
+*.o
+*.pdb
+*.lib
+*.exp
+*.dll
+*.so
+*.dylib
+
+# JNI generated headers (from javac -h)
+src/native/com_jttserver_codec_nativeaac_*.h
+
+# Audio test artifacts
+test.aac
+test_audio_*.aac
+*.aac

+ 813 - 0
native/include/fdk-aac/FDK_audio.h

@@ -0,0 +1,813 @@
+/* -----------------------------------------------------------------------------
+Software License for The Fraunhofer FDK AAC Codec Library for Android
+
+© Copyright  1995 - 2018 Fraunhofer-Gesellschaft zur Förderung der angewandten
+Forschung e.V. All rights reserved.
+
+ 1.    INTRODUCTION
+The Fraunhofer FDK AAC Codec Library for Android ("FDK AAC Codec") is software
+that implements the MPEG Advanced Audio Coding ("AAC") encoding and decoding
+scheme for digital audio. This FDK AAC Codec software is intended to be used on
+a wide variety of Android devices.
+
+AAC's HE-AAC and HE-AAC v2 versions are regarded as today's most efficient
+general perceptual audio codecs. AAC-ELD is considered the best-performing
+full-bandwidth communications codec by independent studies and is widely
+deployed. AAC has been standardized by ISO and IEC as part of the MPEG
+specifications.
+
+Patent licenses for necessary patent claims for the FDK AAC Codec (including
+those of Fraunhofer) may be obtained through Via Licensing
+(www.vialicensing.com) or through the respective patent owners individually for
+the purpose of encoding or decoding bit streams in products that are compliant
+with the ISO/IEC MPEG audio standards. Please note that most manufacturers of
+Android devices already license these patent claims through Via Licensing or
+directly from the patent owners, and therefore FDK AAC Codec software may
+already be covered under those patent licenses when it is used for those
+licensed purposes only.
+
+Commercially-licensed AAC software libraries, including floating-point versions
+with enhanced sound quality, are also available from Fraunhofer. Users are
+encouraged to check the Fraunhofer website for additional applications
+information and documentation.
+
+2.    COPYRIGHT LICENSE
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted without payment of copyright license fees provided that you
+satisfy the following conditions:
+
+You must retain the complete text of this software license in redistributions of
+the FDK AAC Codec or your modifications thereto in source code form.
+
+You must retain the complete text of this software license in the documentation
+and/or other materials provided with redistributions of the FDK AAC Codec or
+your modifications thereto in binary form. You must make available free of
+charge copies of the complete source code of the FDK AAC Codec and your
+modifications thereto to recipients of copies in binary form.
+
+The name of Fraunhofer may not be used to endorse or promote products derived
+from this library without prior written permission.
+
+You may not charge copyright license fees for anyone to use, copy or distribute
+the FDK AAC Codec software or your modifications thereto.
+
+Your modified versions of the FDK AAC Codec must carry prominent notices stating
+that you changed the software and the date of any change. For modified versions
+of the FDK AAC Codec, the term "Fraunhofer FDK AAC Codec Library for Android"
+must be replaced by the term "Third-Party Modified Version of the Fraunhofer FDK
+AAC Codec Library for Android."
+
+3.    NO PATENT LICENSE
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PATENT CLAIMS, including without
+limitation the patents of Fraunhofer, ARE GRANTED BY THIS SOFTWARE LICENSE.
+Fraunhofer provides no warranty of patent non-infringement with respect to this
+software.
+
+You may use this FDK AAC Codec software or modifications thereto only for
+purposes that are authorized by appropriate patent licenses.
+
+4.    DISCLAIMER
+
+This FDK AAC Codec software is provided by Fraunhofer on behalf of the copyright
+holders and contributors "AS IS" and WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
+including but not limited to the implied warranties of merchantability and
+fitness for a particular purpose. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE for any direct, indirect, incidental, special, exemplary,
+or consequential damages, including but not limited to procurement of substitute
+goods or services; loss of use, data, or profits, or business interruption,
+however caused and on any theory of liability, whether in contract, strict
+liability, or tort (including negligence), arising in any way out of the use of
+this software, even if advised of the possibility of such damage.
+
+5.    CONTACT INFORMATION
+
+Fraunhofer Institute for Integrated Circuits IIS
+Attention: Audio and Multimedia Departments - FDK AAC LL
+Am Wolfsmantel 33
+91058 Erlangen, Germany
+
+www.iis.fraunhofer.de/amm
+amm-info@iis.fraunhofer.de
+----------------------------------------------------------------------------- */
+
+/************************* System integration library **************************
+
+   Author(s):   Manuel Jander
+
+   Description:
+
+*******************************************************************************/
+
+/** \file   FDK_audio.h
+ *  \brief  Global audio struct and constant definitions.
+ */
+
+#ifndef FDK_AUDIO_H
+#define FDK_AUDIO_H
+
+#include "machine_type.h"
+#include "genericStds.h"
+#include "syslib_channelMapDescr.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * File format identifiers.
+ */
+typedef enum {
+  FF_UNKNOWN = -1, /**< Unknown format.        */
+  FF_RAW = 0,      /**< No container, bit stream data conveyed "as is". */
+
+  FF_MP4_3GPP = 3, /**< 3GPP file format.      */
+  FF_MP4_MP4F = 4, /**< MPEG-4 File format.     */
+
+  FF_RAWPACKETS = 5 /**< Proprietary raw packet file. */
+
+} FILE_FORMAT;
+
+/**
+ * Transport type identifiers.
+ */
+typedef enum {
+  TT_UNKNOWN = -1, /**< Unknown format.            */
+  TT_MP4_RAW = 0,  /**< "as is" access units (packet based since there is
+                      obviously no sync layer) */
+  TT_MP4_ADIF = 1, /**< ADIF bitstream format.     */
+  TT_MP4_ADTS = 2, /**< ADTS bitstream format.     */
+
+  TT_MP4_LATM_MCP1 = 6, /**< Audio Mux Elements with muxConfigPresent = 1 */
+  TT_MP4_LATM_MCP0 = 7, /**< Audio Mux Elements with muxConfigPresent = 0, out
+                           of band StreamMuxConfig */
+
+  TT_MP4_LOAS = 10, /**< Audio Sync Stream.         */
+
+  TT_DRM = 12 /**< Digital Radio Mondial (DRM30/DRM+) bitstream format. */
+
+} TRANSPORT_TYPE;
+
+#define TT_IS_PACKET(x)                                                   \
+  (((x) == TT_MP4_RAW) || ((x) == TT_DRM) || ((x) == TT_MP4_LATM_MCP0) || \
+   ((x) == TT_MP4_LATM_MCP1))
+
+/**
+ * Audio Object Type definitions.
+ */
+typedef enum {
+  AOT_NONE = -1,
+  AOT_NULL_OBJECT = 0,
+  AOT_AAC_MAIN = 1, /**< Main profile                              */
+  AOT_AAC_LC = 2,   /**< Low Complexity object                     */
+  AOT_AAC_SSR = 3,
+  AOT_AAC_LTP = 4,
+  AOT_SBR = 5,
+  AOT_AAC_SCAL = 6,
+  AOT_TWIN_VQ = 7,
+  AOT_CELP = 8,
+  AOT_HVXC = 9,
+  AOT_RSVD_10 = 10,          /**< (reserved)                                */
+  AOT_RSVD_11 = 11,          /**< (reserved)                                */
+  AOT_TTSI = 12,             /**< TTSI Object                               */
+  AOT_MAIN_SYNTH = 13,       /**< Main Synthetic object                     */
+  AOT_WAV_TAB_SYNTH = 14,    /**< Wavetable Synthesis object                */
+  AOT_GEN_MIDI = 15,         /**< General MIDI object                       */
+  AOT_ALG_SYNTH_AUD_FX = 16, /**< Algorithmic Synthesis and Audio FX object */
+  AOT_ER_AAC_LC = 17,        /**< Error Resilient(ER) AAC Low Complexity    */
+  AOT_RSVD_18 = 18,          /**< (reserved)                                */
+  AOT_ER_AAC_LTP = 19,       /**< Error Resilient(ER) AAC LTP object        */
+  AOT_ER_AAC_SCAL = 20,      /**< Error Resilient(ER) AAC Scalable object   */
+  AOT_ER_TWIN_VQ = 21,       /**< Error Resilient(ER) TwinVQ object         */
+  AOT_ER_BSAC = 22,          /**< Error Resilient(ER) BSAC object           */
+  AOT_ER_AAC_LD = 23,        /**< Error Resilient(ER) AAC LowDelay object   */
+  AOT_ER_CELP = 24,          /**< Error Resilient(ER) CELP object           */
+  AOT_ER_HVXC = 25,          /**< Error Resilient(ER) HVXC object           */
+  AOT_ER_HILN = 26,          /**< Error Resilient(ER) HILN object           */
+  AOT_ER_PARA = 27,          /**< Error Resilient(ER) Parametric object     */
+  AOT_RSVD_28 = 28,          /**< might become SSC                          */
+  AOT_PS = 29,               /**< PS, Parametric Stereo (includes SBR)      */
+  AOT_MPEGS = 30,            /**< MPEG Surround                             */
+
+  AOT_ESCAPE = 31, /**< Signal AOT uses more than 5 bits          */
+
+  AOT_MP3ONMP4_L1 = 32, /**< MPEG-Layer1 in mp4                        */
+  AOT_MP3ONMP4_L2 = 33, /**< MPEG-Layer2 in mp4                        */
+  AOT_MP3ONMP4_L3 = 34, /**< MPEG-Layer3 in mp4                        */
+  AOT_RSVD_35 = 35,     /**< might become DST                          */
+  AOT_RSVD_36 = 36,     /**< might become ALS                          */
+  AOT_AAC_SLS = 37,     /**< AAC + SLS                                 */
+  AOT_SLS = 38,         /**< SLS                                       */
+  AOT_ER_AAC_ELD = 39,  /**< AAC Enhanced Low Delay                    */
+
+  AOT_USAC = 42,     /**< USAC                                      */
+  AOT_SAOC = 43,     /**< SAOC                                      */
+  AOT_LD_MPEGS = 44, /**< Low Delay MPEG Surround                   */
+
+  /* Pseudo AOTs */
+  AOT_MP2_AAC_LC = 129, /**< Virtual AOT MP2 Low Complexity profile */
+  AOT_MP2_SBR = 132, /**< Virtual AOT MP2 Low Complexity Profile with SBR    */
+
+  AOT_DRM_AAC = 143, /**< Virtual AOT for DRM (ER-AAC-SCAL without SBR) */
+  AOT_DRM_SBR = 144, /**< Virtual AOT for DRM (ER-AAC-SCAL with SBR) */
+  AOT_DRM_MPEG_PS =
+      145, /**< Virtual AOT for DRM (ER-AAC-SCAL with SBR and MPEG-PS) */
+  AOT_DRM_SURROUND =
+      146, /**< Virtual AOT for DRM Surround (ER-AAC-SCAL (+SBR) +MPS) */
+  AOT_DRM_USAC = 147 /**< Virtual AOT for DRM with USAC */
+
+} AUDIO_OBJECT_TYPE;
+
+#define CAN_DO_PS(aot)                                           \
+  ((aot) == AOT_AAC_LC || (aot) == AOT_SBR || (aot) == AOT_PS || \
+   (aot) == AOT_ER_BSAC || (aot) == AOT_DRM_AAC)
+
+#define IS_USAC(aot) ((aot) == AOT_USAC)
+
+#define IS_LOWDELAY(aot) ((aot) == AOT_ER_AAC_LD || (aot) == AOT_ER_AAC_ELD)
+
+/** Channel Mode ( 1-7 equals MPEG channel configurations, others are
+ * arbitrary). */
+typedef enum {
+  MODE_INVALID = -1,
+  MODE_UNKNOWN = 0,
+  MODE_1 = 1,         /**< C */
+  MODE_2 = 2,         /**< L+R */
+  MODE_1_2 = 3,       /**< C, L+R */
+  MODE_1_2_1 = 4,     /**< C, L+R, Rear */
+  MODE_1_2_2 = 5,     /**< C, L+R, LS+RS */
+  MODE_1_2_2_1 = 6,   /**< C, L+R, LS+RS, LFE */
+  MODE_1_2_2_2_1 = 7, /**< C, LC+RC, L+R, LS+RS, LFE */
+
+  MODE_6_1 = 11,           /**< C, L+R, LS+RS, Crear, LFE */
+  MODE_7_1_BACK = 12,      /**< C, L+R, LS+RS, Lrear+Rrear, LFE */
+  MODE_7_1_TOP_FRONT = 14, /**< C, L+R, LS+RS, LFE, Ltop+Rtop */
+
+  MODE_7_1_REAR_SURROUND = 33, /**< C, L+R, LS+RS, Lrear+Rrear, LFE */
+  MODE_7_1_FRONT_CENTER = 34,  /**< C, LC+RC, L+R, LS+RS, LFE */
+
+  MODE_212 = 128 /**< 212 configuration, used in ELDv2 */
+
+} CHANNEL_MODE;
+
+/**
+ * Speaker description tags.
+ * Do not change the enumeration values unless it keeps the following
+ * segmentation:
+ * - Bit 0-3: Horizontal postion (0: none, 1: front, 2: side, 3: back, 4: lfe)
+ * - Bit 4-7: Vertical position (0: normal, 1: top, 2: bottom)
+ */
+typedef enum {
+  ACT_NONE = 0x00,
+  ACT_FRONT = 0x01, /*!< Front speaker position (at normal height) */
+  ACT_SIDE = 0x02,  /*!< Side speaker position (at normal height) */
+  ACT_BACK = 0x03,  /*!< Back speaker position (at normal height) */
+  ACT_LFE = 0x04,   /*!< Low frequency effect speaker postion (front) */
+
+  ACT_TOP =
+      0x10, /*!< Top speaker area (for combination with speaker positions) */
+  ACT_FRONT_TOP = 0x11, /*!< Top front speaker = (ACT_FRONT|ACT_TOP) */
+  ACT_SIDE_TOP = 0x12,  /*!< Top side speaker  = (ACT_SIDE |ACT_TOP) */
+  ACT_BACK_TOP = 0x13,  /*!< Top back speaker  = (ACT_BACK |ACT_TOP) */
+
+  ACT_BOTTOM =
+      0x20, /*!< Bottom speaker area (for combination with speaker positions) */
+  ACT_FRONT_BOTTOM = 0x21, /*!< Bottom front speaker = (ACT_FRONT|ACT_BOTTOM) */
+  ACT_SIDE_BOTTOM = 0x22,  /*!< Bottom side speaker  = (ACT_SIDE |ACT_BOTTOM) */
+  ACT_BACK_BOTTOM = 0x23   /*!< Bottom back speaker  = (ACT_BACK |ACT_BOTTOM) */
+
+} AUDIO_CHANNEL_TYPE;
+
+typedef enum {
+  SIG_UNKNOWN = -1,
+  SIG_IMPLICIT = 0,
+  SIG_EXPLICIT_BW_COMPATIBLE = 1,
+  SIG_EXPLICIT_HIERARCHICAL = 2
+
+} SBR_PS_SIGNALING;
+
+/**
+ * Audio Codec flags.
+ */
+#define AC_ER_VCB11                                                           \
+  0x000001 /*!< aacSectionDataResilienceFlag     flag (from ASC): 1 means use \
+              virtual codebooks  */
+#define AC_ER_RVLC                                                             \
+  0x000002 /*!< aacSpectralDataResilienceFlag     flag (from ASC): 1 means use \
+              huffman codeword reordering */
+#define AC_ER_HCR                                                             \
+  0x000004 /*!< aacSectionDataResilienceFlag     flag (from ASC): 1 means use \
+              virtual codebooks  */
+#define AC_SCALABLE 0x000008    /*!< AAC Scalable*/
+#define AC_ELD 0x000010         /*!< AAC-ELD */
+#define AC_LD 0x000020          /*!< AAC-LD */
+#define AC_ER 0x000040          /*!< ER syntax */
+#define AC_BSAC 0x000080        /*!< BSAC */
+#define AC_USAC 0x000100        /*!< USAC */
+#define AC_RSV603DA 0x000200    /*!< RSVD60 3D audio */
+#define AC_HDAAC 0x000400       /*!< HD-AAC */
+#define AC_RSVD50 0x004000      /*!< Rsvd50 */
+#define AC_SBR_PRESENT 0x008000 /*!< SBR present flag (from ASC) */
+#define AC_SBRCRC \
+  0x010000 /*!< SBR CRC present flag. Only relevant for AAC-ELD for now. */
+#define AC_PS_PRESENT 0x020000 /*!< PS present flag (from ASC or implicit)  */
+#define AC_MPS_PRESENT                                                     \
+  0x040000                    /*!< MPS present flag (from ASC or implicit) \
+                               */
+#define AC_DRM 0x080000       /*!< DRM bit stream syntax */
+#define AC_INDEP 0x100000     /*!< Independency flag */
+#define AC_MPEGD_RES 0x200000 /*!< MPEG-D residual individual channel data. */
+#define AC_SAOC_PRESENT 0x400000   /*!< SAOC Present Flag */
+#define AC_DAB 0x800000            /*!< DAB bit stream syntax */
+#define AC_ELD_DOWNSCALE 0x1000000 /*!< ELD Downscaled playout */
+#define AC_LD_MPS 0x2000000        /*!< Low Delay MPS. */
+#define AC_DRC_PRESENT                                   \
+  0x4000000 /*!< Dynamic Range Control (DRC) data found. \
+             */
+#define AC_USAC_SCFGI3 \
+  0x8000000 /*!< USAC flag: If stereoConfigIndex is 3 the flag is set. */
+/**
+ * Audio Codec flags (reconfiguration).
+ */
+#define AC_CM_DET_CFG_CHANGE                                                 \
+  0x000001 /*!< Config mode signalizes the callback to work in config change \
+              detection mode */
+#define AC_CM_ALLOC_MEM                                               \
+  0x000002 /*!< Config mode signalizes the callback to work in memory \
+              allocation mode */
+
+/**
+ * Audio Codec flags (element specific).
+ */
+#define AC_EL_USAC_TW 0x000001    /*!< USAC time warped filter bank is active */
+#define AC_EL_USAC_NOISE 0x000002 /*!< USAC noise filling is active */
+#define AC_EL_USAC_ITES 0x000004  /*!< USAC SBR inter-TES tool is active */
+#define AC_EL_USAC_PVC \
+  0x000008 /*!< USAC SBR predictive vector coding tool is active */
+#define AC_EL_USAC_MPS212 0x000010 /*!< USAC MPS212 tool is active */
+#define AC_EL_USAC_LFE 0x000020    /*!< USAC element is LFE */
+#define AC_EL_USAC_CP_POSSIBLE                                                 \
+  0x000040 /*!< USAC may use Complex Stereo Prediction in this channel element \
+            */
+#define AC_EL_ENHANCED_NOISE 0x000080   /*!< Enhanced noise filling*/
+#define AC_EL_IGF_AFTER_TNS 0x000100    /*!< IGF after TNS */
+#define AC_EL_IGF_INDEP_TILING 0x000200 /*!< IGF independent tiling */
+#define AC_EL_IGF_USE_ENF 0x000400      /*!< IGF use enhanced noise filling */
+#define AC_EL_FULLBANDLPD 0x000800      /*!< enable fullband LPD tools */
+#define AC_EL_LPDSTEREOIDX 0x001000     /*!< LPD-stereo-tool stereo index */
+#define AC_EL_LFE 0x002000              /*!< The element is of type LFE. */
+
+/* CODER_CONFIG::flags */
+#define CC_MPEG_ID 0x00100000
+#define CC_IS_BASELAYER 0x00200000
+#define CC_PROTECTION 0x00400000
+#define CC_SBR 0x00800000
+#define CC_SBRCRC 0x00010000
+#define CC_SAC 0x00020000
+#define CC_RVLC 0x01000000
+#define CC_VCB11 0x02000000
+#define CC_HCR 0x04000000
+#define CC_PSEUDO_SURROUND 0x08000000
+#define CC_USAC_NOISE 0x10000000
+#define CC_USAC_TW 0x20000000
+#define CC_USAC_HBE 0x40000000
+
+/** Generic audio coder configuration structure. */
+typedef struct {
+  AUDIO_OBJECT_TYPE aot;     /**< Audio Object Type (AOT).           */
+  AUDIO_OBJECT_TYPE extAOT;  /**< Extension Audio Object Type (SBR). */
+  CHANNEL_MODE channelMode;  /**< Channel mode.                      */
+  UCHAR channelConfigZero;   /**< Use channel config zero + pce although a
+                                standard channel config could be signaled. */
+  INT samplingRate;          /**< Sampling rate.                     */
+  INT extSamplingRate;       /**< Extended samplerate (SBR).         */
+  INT downscaleSamplingRate; /**< Downscale sampling rate (ELD downscaled mode)
+                              */
+  INT bitRate;               /**< Average bitrate.                   */
+  int samplesPerFrame; /**< Number of PCM samples per codec frame and audio
+                          channel. */
+  int noChannels;      /**< Number of audio channels.          */
+  int bitsFrame;
+  int nSubFrames; /**< Amount of encoder subframes. 1 means no subframing. */
+  int BSACnumOfSubFrame; /**< The number of the sub-frames which are grouped and
+                            transmitted in a super-frame (BSAC). */
+  int BSAClayerLength; /**< The average length of the large-step layers in bytes
+                          (BSAC).                            */
+  UINT flags;          /**< flags */
+  UCHAR matrixMixdownA; /**< Matrix mixdown index to put into PCE. Default value
+                           0 means no mixdown coefficient, valid values are 1-4
+                           which correspond to matrix_mixdown_idx 0-3. */
+  UCHAR headerPeriod;   /**< Frame period for sending in band configuration
+                           buffers in the transport layer. */
+
+  UCHAR stereoConfigIndex;       /**< USAC MPS stereo mode */
+  UCHAR sbrMode;                 /**< USAC SBR mode */
+  SBR_PS_SIGNALING sbrSignaling; /**< 0: implicit signaling, 1: backwards
+                                    compatible explicit signaling, 2:
+                                    hierarcical explicit signaling */
+
+  UCHAR rawConfig[64]; /**< raw codec specific config as bit stream */
+  int rawConfigBits;   /**< Size of rawConfig in bits */
+
+  UCHAR sbrPresent;
+  UCHAR psPresent;
+} CODER_CONFIG;
+
+#define USAC_ID_BIT 16 /** USAC element IDs start at USAC_ID_BIT */
+
+/** MP4 Element IDs. */
+typedef enum {
+  /* mp4 element IDs */
+  ID_NONE = -1, /**< Invalid Element helper ID.             */
+  ID_SCE = 0,   /**< Single Channel Element.                */
+  ID_CPE = 1,   /**< Channel Pair Element.                  */
+  ID_CCE = 2,   /**< Coupling Channel Element.              */
+  ID_LFE = 3,   /**< LFE Channel Element.                   */
+  ID_DSE = 4,   /**< Currently one Data Stream Element for ancillary data is
+                   supported. */
+  ID_PCE = 5,   /**< Program Config Element.                */
+  ID_FIL = 6,   /**< Fill Element.                          */
+  ID_END = 7,   /**< Arnie (End Element = Terminator).      */
+  ID_EXT = 8,   /**< Extension Payload (ER only).           */
+  ID_SCAL = 9,  /**< AAC scalable element (ER only).        */
+  /* USAC element IDs */
+  ID_USAC_SCE = 0 + USAC_ID_BIT, /**< Single Channel Element.                */
+  ID_USAC_CPE = 1 + USAC_ID_BIT, /**< Channel Pair Element.                  */
+  ID_USAC_LFE = 2 + USAC_ID_BIT, /**< LFE Channel Element.                   */
+  ID_USAC_EXT = 3 + USAC_ID_BIT, /**< Extension Element.                     */
+  ID_USAC_END = 4 + USAC_ID_BIT, /**< Arnie (End Element = Terminator).      */
+  ID_LAST
+} MP4_ELEMENT_ID;
+
+/* usacConfigExtType q.v. ISO/IEC DIS 23008-3 Table 52  and  ISO/IEC FDIS
+ * 23003-3:2011(E) Table 74*/
+typedef enum {
+  /* USAC and RSVD60 3DA */
+  ID_CONFIG_EXT_FILL = 0,
+  /* RSVD60 3DA */
+  ID_CONFIG_EXT_DOWNMIX = 1,
+  ID_CONFIG_EXT_LOUDNESS_INFO = 2,
+  ID_CONFIG_EXT_AUDIOSCENE_INFO = 3,
+  ID_CONFIG_EXT_HOA_MATRIX = 4,
+  ID_CONFIG_EXT_SIG_GROUP_INFO = 6
+  /* 5-127 => reserved for ISO use */
+  /* > 128 => reserved for use outside of ISO scope */
+} CONFIG_EXT_ID;
+
+#define IS_CHANNEL_ELEMENT(elementId)                                         \
+  ((elementId) == ID_SCE || (elementId) == ID_CPE || (elementId) == ID_LFE || \
+   (elementId) == ID_USAC_SCE || (elementId) == ID_USAC_CPE ||                \
+   (elementId) == ID_USAC_LFE)
+
+#define IS_MP4_CHANNEL_ELEMENT(elementId) \
+  ((elementId) == ID_SCE || (elementId) == ID_CPE || (elementId) == ID_LFE)
+
+#define EXT_ID_BITS 4 /**< Size in bits of extension payload type tags. */
+
+/** Extension payload types. */
+typedef enum {
+  EXT_FIL = 0x00,
+  EXT_FILL_DATA = 0x01,
+  EXT_DATA_ELEMENT = 0x02,
+  EXT_DATA_LENGTH = 0x03,
+  EXT_UNI_DRC = 0x04,
+  EXT_LDSAC_DATA = 0x09,
+  EXT_SAOC_DATA = 0x0a,
+  EXT_DYNAMIC_RANGE = 0x0b,
+  EXT_SAC_DATA = 0x0c,
+  EXT_SBR_DATA = 0x0d,
+  EXT_SBR_DATA_CRC = 0x0e
+} EXT_PAYLOAD_TYPE;
+
+#define IS_USAC_CHANNEL_ELEMENT(elementId)                     \
+  ((elementId) == ID_USAC_SCE || (elementId) == ID_USAC_CPE || \
+   (elementId) == ID_USAC_LFE)
+
+/** MPEG-D USAC & RSVD60 3D audio Extension Element Types. */
+typedef enum {
+  /* usac */
+  ID_EXT_ELE_FILL = 0x00,
+  ID_EXT_ELE_MPEGS = 0x01,
+  ID_EXT_ELE_SAOC = 0x02,
+  ID_EXT_ELE_AUDIOPREROLL = 0x03,
+  ID_EXT_ELE_UNI_DRC = 0x04,
+  /* rsv603da */
+  ID_EXT_ELE_OBJ_METADATA = 0x05,
+  ID_EXT_ELE_SAOC_3D = 0x06,
+  ID_EXT_ELE_HOA = 0x07,
+  ID_EXT_ELE_FMT_CNVRTR = 0x08,
+  ID_EXT_ELE_MCT = 0x09,
+  ID_EXT_ELE_ENHANCED_OBJ_METADATA = 0x0d,
+  /* reserved for use outside of ISO scope */
+  ID_EXT_ELE_VR_METADATA = 0x81,
+  ID_EXT_ELE_UNKNOWN = 0xFF
+} USAC_EXT_ELEMENT_TYPE;
+
+/**
+ * Proprietary raw packet file configuration data type identifier.
+ */
+typedef enum {
+  TC_NOTHING = 0,  /* No configuration available -> in-band configuration.   */
+  TC_RAW_ADTS = 2, /* Transfer type is ADTS. */
+  TC_RAW_LATM_MCP1 = 6, /* Transfer type is LATM with SMC present.    */
+  TC_RAW_SDC = 21       /* Configuration data field is Drm SDC.             */
+
+} TP_CONFIG_TYPE;
+
+/*
+ * ##############################################################################################
+ * Library identification and error handling
+ * ##############################################################################################
+ */
+/* \cond */
+
+typedef enum {
+  FDK_NONE = 0,
+  FDK_TOOLS = 1,
+  FDK_SYSLIB = 2,
+  FDK_AACDEC = 3,
+  FDK_AACENC = 4,
+  FDK_SBRDEC = 5,
+  FDK_SBRENC = 6,
+  FDK_TPDEC = 7,
+  FDK_TPENC = 8,
+  FDK_MPSDEC = 9,
+  FDK_MPEGFILEREAD = 10,
+  FDK_MPEGFILEWRITE = 11,
+  FDK_PCMDMX = 31,
+  FDK_MPSENC = 34,
+  FDK_TDLIMIT = 35,
+  FDK_UNIDRCDEC = 38,
+
+  FDK_MODULE_LAST
+
+} FDK_MODULE_ID;
+
+/* AAC capability flags */
+#define CAPF_AAC_LC 0x00000001 /**< Support flag for AAC Low Complexity. */
+#define CAPF_ER_AAC_LD                                                        \
+  0x00000002 /**< Support flag for AAC Low Delay with Error Resilience tools. \
+              */
+#define CAPF_ER_AAC_SCAL 0x00000004 /**< Support flag for AAC Scalable. */
+#define CAPF_ER_AAC_LC                                                      \
+  0x00000008 /**< Support flag for AAC Low Complexity with Error Resilience \
+                tools. */
+#define CAPF_AAC_480 \
+  0x00000010 /**< Support flag for AAC with 480 framelength.  */
+#define CAPF_AAC_512 \
+  0x00000020 /**< Support flag for AAC with 512 framelength.  */
+#define CAPF_AAC_960 \
+  0x00000040 /**< Support flag for AAC with 960 framelength.  */
+#define CAPF_AAC_1024 \
+  0x00000080 /**< Support flag for AAC with 1024 framelength. */
+#define CAPF_AAC_HCR \
+  0x00000100 /**< Support flag for AAC with Huffman Codeword Reordering.    */
+#define CAPF_AAC_VCB11 \
+  0x00000200 /**< Support flag for AAC Virtual Codebook 11.    */
+#define CAPF_AAC_RVLC \
+  0x00000400 /**< Support flag for AAC Reversible Variable Length Coding.   */
+#define CAPF_AAC_MPEG4 0x00000800 /**< Support flag for MPEG file format. */
+#define CAPF_AAC_DRC \
+  0x00001000 /**< Support flag for AAC Dynamic Range Control. */
+#define CAPF_AAC_CONCEALMENT \
+  0x00002000 /**< Support flag for AAC concealment.           */
+#define CAPF_AAC_DRM_BSFORMAT \
+  0x00004000 /**< Support flag for AAC DRM bistream format. */
+#define CAPF_ER_AAC_ELD                                              \
+  0x00008000 /**< Support flag for AAC Enhanced Low Delay with Error \
+                Resilience tools.  */
+#define CAPF_ER_AAC_BSAC \
+  0x00010000 /**< Support flag for AAC BSAC.                           */
+#define CAPF_AAC_ELD_DOWNSCALE \
+  0x00040000 /**< Support flag for AAC-ELD Downscaling           */
+#define CAPF_AAC_USAC_LP \
+  0x00100000 /**< Support flag for USAC low power mode. */
+#define CAPF_AAC_USAC \
+  0x00200000 /**< Support flag for Unified Speech and Audio Coding (USAC). */
+#define CAPF_ER_AAC_ELDV2 \
+  0x00800000 /**< Support flag for AAC Enhanced Low Delay with MPS 212.  */
+#define CAPF_AAC_UNIDRC \
+  0x01000000 /**< Support flag for MPEG-D Dynamic Range Control (uniDrc). */
+
+/* Transport capability flags */
+#define CAPF_ADTS \
+  0x00000001 /**< Support flag for ADTS transport format.        */
+#define CAPF_ADIF \
+  0x00000002 /**< Support flag for ADIF transport format.        */
+#define CAPF_LATM \
+  0x00000004 /**< Support flag for LATM transport format.        */
+#define CAPF_LOAS \
+  0x00000008 /**< Support flag for LOAS transport format.        */
+#define CAPF_RAWPACKETS \
+  0x00000010 /**< Support flag for RAW PACKETS transport format. */
+#define CAPF_DRM \
+  0x00000020 /**< Support flag for DRM/DRM+ transport format.    */
+#define CAPF_RSVD50 \
+  0x00000040 /**< Support flag for RSVD50 transport format       */
+
+/* SBR capability flags */
+#define CAPF_SBR_LP \
+  0x00000001 /**< Support flag for SBR Low Power mode.           */
+#define CAPF_SBR_HQ \
+  0x00000002 /**< Support flag for SBR High Quality mode.        */
+#define CAPF_SBR_DRM_BS \
+  0x00000004 /**< Support flag for                               */
+#define CAPF_SBR_CONCEALMENT \
+  0x00000008 /**< Support flag for SBR concealment.              */
+#define CAPF_SBR_DRC \
+  0x00000010 /**< Support flag for SBR Dynamic Range Control.    */
+#define CAPF_SBR_PS_MPEG \
+  0x00000020 /**< Support flag for MPEG Parametric Stereo.       */
+#define CAPF_SBR_PS_DRM \
+  0x00000040 /**< Support flag for DRM Parametric Stereo.        */
+#define CAPF_SBR_ELD_DOWNSCALE \
+  0x00000080 /**< Support flag for ELD reduced delay mode        */
+#define CAPF_SBR_HBEHQ \
+  0x00000100 /**< Support flag for HQ HBE                        */
+
+/* PCM utils capability flags */
+#define CAPF_DMX_BLIND \
+  0x00000001 /**< Support flag for blind downmixing.             */
+#define CAPF_DMX_PCE                                                      \
+  0x00000002 /**< Support flag for guided downmix with data from MPEG-2/4 \
+                Program Config Elements (PCE). */
+#define CAPF_DMX_ARIB                                                         \
+  0x00000004 /**< Support flag for PCE guided downmix with slightly different \
+                equations and levels to fulfill ARIB standard. */
+#define CAPF_DMX_DVB                                                           \
+  0x00000008 /**< Support flag for guided downmix with data from DVB ancillary \
+                data fields. */
+#define CAPF_DMX_CH_EXP                                                       \
+  0x00000010 /**< Support flag for simple upmixing by dublicating channels or \
+                adding zero channels. */
+#define CAPF_DMX_6_CH                                                   \
+  0x00000020 /**< Support flag for 5.1 channel configuration (input and \
+                output). */
+#define CAPF_DMX_8_CH                                                          \
+  0x00000040 /**< Support flag for 6 and 7.1 channel configurations (input and \
+                output). */
+#define CAPF_DMX_24_CH                                                   \
+  0x00000080 /**< Support flag for 22.2 channel configuration (input and \
+                output). */
+#define CAPF_LIMITER                                      \
+  0x00002000 /**< Support flag for signal level limiting. \
+              */
+
+/* MPEG Surround capability flags */
+#define CAPF_MPS_STD \
+  0x00000001 /**< Support flag for MPEG Surround.           */
+#define CAPF_MPS_LD                                         \
+  0x00000002 /**< Support flag for Low Delay MPEG Surround. \
+              */
+#define CAPF_MPS_USAC \
+  0x00000004 /**< Support flag for USAC MPEG Surround.      */
+#define CAPF_MPS_HQ                                                     \
+  0x00000010 /**< Support flag indicating if high quality processing is \
+                supported */
+#define CAPF_MPS_LP                                                        \
+  0x00000020 /**< Support flag indicating if partially complex (low power) \
+                processing is supported */
+#define CAPF_MPS_BLIND \
+  0x00000040 /**< Support flag indicating if blind processing is supported */
+#define CAPF_MPS_BINAURAL \
+  0x00000080 /**< Support flag indicating if binaural output is possible */
+#define CAPF_MPS_2CH_OUT \
+  0x00000100 /**< Support flag indicating if 2ch output is possible      */
+#define CAPF_MPS_6CH_OUT \
+  0x00000200 /**< Support flag indicating if 6ch output is possible      */
+#define CAPF_MPS_8CH_OUT \
+  0x00000400 /**< Support flag indicating if 8ch output is possible      */
+#define CAPF_MPS_1CH_IN \
+  0x00001000 /**< Support flag indicating if 1ch dmx input is possible   */
+#define CAPF_MPS_2CH_IN \
+  0x00002000 /**< Support flag indicating if 2ch dmx input is possible   */
+#define CAPF_MPS_6CH_IN \
+  0x00004000 /**< Support flag indicating if 5ch dmx input is possible   */
+
+/* \endcond */
+
+/*
+ * ##############################################################################################
+ * Library versioning
+ * ##############################################################################################
+ */
+
+/**
+ * Convert each member of version numbers to one single numeric version
+ * representation.
+ * \param lev0  1st level of version number.
+ * \param lev1  2nd level of version number.
+ * \param lev2  3rd level of version number.
+ */
+#define LIB_VERSION(lev0, lev1, lev2)                      \
+  ((lev0 << 24 & 0xff000000) | (lev1 << 16 & 0x00ff0000) | \
+   (lev2 << 8 & 0x0000ff00))
+
+/**
+ *  Build text string of version.
+ */
+#define LIB_VERSION_STRING(info)                                               \
+  FDKsprintf((info)->versionStr, "%d.%d.%d", (((info)->version >> 24) & 0xff), \
+             (((info)->version >> 16) & 0xff),                                 \
+             (((info)->version >> 8) & 0xff))
+
+/**
+ *  Library information.
+ */
+typedef struct LIB_INFO {
+  const char* title;
+  const char* build_date;
+  const char* build_time;
+  FDK_MODULE_ID module_id;
+  INT version;
+  UINT flags;
+  char versionStr[32];
+} LIB_INFO;
+
+#ifdef __cplusplus
+#define FDK_AUDIO_INLINE inline
+#else
+#define FDK_AUDIO_INLINE
+#endif
+
+/** Initialize library info. */
+static FDK_AUDIO_INLINE void FDKinitLibInfo(LIB_INFO* info) {
+  int i;
+
+  for (i = 0; i < FDK_MODULE_LAST; i++) {
+    info[i].module_id = FDK_NONE;
+  }
+}
+
+/** Aquire supported features of library. */
+static FDK_AUDIO_INLINE UINT
+FDKlibInfo_getCapabilities(const LIB_INFO* info, FDK_MODULE_ID module_id) {
+  int i;
+
+  for (i = 0; i < FDK_MODULE_LAST; i++) {
+    if (info[i].module_id == module_id) {
+      return info[i].flags;
+    }
+  }
+  return 0;
+}
+
+/** Search for next free tab. */
+static FDK_AUDIO_INLINE INT FDKlibInfo_lookup(const LIB_INFO* info,
+                                              FDK_MODULE_ID module_id) {
+  int i = -1;
+
+  for (i = 0; i < FDK_MODULE_LAST; i++) {
+    if (info[i].module_id == module_id) return -1;
+    if (info[i].module_id == FDK_NONE) break;
+  }
+  if (i == FDK_MODULE_LAST) return -1;
+
+  return i;
+}
+
+/*
+ * ##############################################################################################
+ * Buffer description
+ * ##############################################################################################
+ */
+
+/**
+ *  I/O buffer descriptor.
+ */
+typedef struct FDK_bufDescr {
+  void** ppBase;  /*!< Pointer to an array containing buffer base addresses.
+                       Set to NULL for buffer requirement info. */
+  UINT* pBufSize; /*!< Pointer to an array containing the number of elements
+                     that can be placed in the specific buffer. */
+  UINT* pEleSize; /*!< Pointer to an array containing the element size for each
+                     buffer in bytes. That is mostly the number returned by the
+                     sizeof() operator for the data type used for the specific
+                     buffer. */
+  UINT*
+      pBufType; /*!< Pointer to an array of bit fields containing a description
+                     for each buffer. See XXX below for more details.  */
+  UINT numBufs; /*!< Total number of buffers. */
+
+} FDK_bufDescr;
+
+/**
+ * Buffer type description field.
+ */
+#define FDK_BUF_TYPE_MASK_IO ((UINT)0x03 << 30)
+#define FDK_BUF_TYPE_MASK_DESCR ((UINT)0x3F << 16)
+#define FDK_BUF_TYPE_MASK_ID ((UINT)0xFF)
+
+#define FDK_BUF_TYPE_INPUT ((UINT)0x1 << 30)
+#define FDK_BUF_TYPE_OUTPUT ((UINT)0x2 << 30)
+
+#define FDK_BUF_TYPE_PCM_DATA ((UINT)0x1 << 16)
+#define FDK_BUF_TYPE_ANC_DATA ((UINT)0x2 << 16)
+#define FDK_BUF_TYPE_BS_DATA ((UINT)0x4 << 16)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FDK_AUDIO_H */

+ 1082 - 0
native/include/fdk-aac/aacdecoder_lib.h

@@ -0,0 +1,1082 @@
+/* -----------------------------------------------------------------------------
+Software License for The Fraunhofer FDK AAC Codec Library for Android
+
+© Copyright  1995 - 2019 Fraunhofer-Gesellschaft zur Förderung der angewandten
+Forschung e.V. All rights reserved.
+
+ 1.    INTRODUCTION
+The Fraunhofer FDK AAC Codec Library for Android ("FDK AAC Codec") is software
+that implements the MPEG Advanced Audio Coding ("AAC") encoding and decoding
+scheme for digital audio. This FDK AAC Codec software is intended to be used on
+a wide variety of Android devices.
+
+AAC's HE-AAC and HE-AAC v2 versions are regarded as today's most efficient
+general perceptual audio codecs. AAC-ELD is considered the best-performing
+full-bandwidth communications codec by independent studies and is widely
+deployed. AAC has been standardized by ISO and IEC as part of the MPEG
+specifications.
+
+Patent licenses for necessary patent claims for the FDK AAC Codec (including
+those of Fraunhofer) may be obtained through Via Licensing
+(www.vialicensing.com) or through the respective patent owners individually for
+the purpose of encoding or decoding bit streams in products that are compliant
+with the ISO/IEC MPEG audio standards. Please note that most manufacturers of
+Android devices already license these patent claims through Via Licensing or
+directly from the patent owners, and therefore FDK AAC Codec software may
+already be covered under those patent licenses when it is used for those
+licensed purposes only.
+
+Commercially-licensed AAC software libraries, including floating-point versions
+with enhanced sound quality, are also available from Fraunhofer. Users are
+encouraged to check the Fraunhofer website for additional applications
+information and documentation.
+
+2.    COPYRIGHT LICENSE
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted without payment of copyright license fees provided that you
+satisfy the following conditions:
+
+You must retain the complete text of this software license in redistributions of
+the FDK AAC Codec or your modifications thereto in source code form.
+
+You must retain the complete text of this software license in the documentation
+and/or other materials provided with redistributions of the FDK AAC Codec or
+your modifications thereto in binary form. You must make available free of
+charge copies of the complete source code of the FDK AAC Codec and your
+modifications thereto to recipients of copies in binary form.
+
+The name of Fraunhofer may not be used to endorse or promote products derived
+from this library without prior written permission.
+
+You may not charge copyright license fees for anyone to use, copy or distribute
+the FDK AAC Codec software or your modifications thereto.
+
+Your modified versions of the FDK AAC Codec must carry prominent notices stating
+that you changed the software and the date of any change. For modified versions
+of the FDK AAC Codec, the term "Fraunhofer FDK AAC Codec Library for Android"
+must be replaced by the term "Third-Party Modified Version of the Fraunhofer FDK
+AAC Codec Library for Android."
+
+3.    NO PATENT LICENSE
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PATENT CLAIMS, including without
+limitation the patents of Fraunhofer, ARE GRANTED BY THIS SOFTWARE LICENSE.
+Fraunhofer provides no warranty of patent non-infringement with respect to this
+software.
+
+You may use this FDK AAC Codec software or modifications thereto only for
+purposes that are authorized by appropriate patent licenses.
+
+4.    DISCLAIMER
+
+This FDK AAC Codec software is provided by Fraunhofer on behalf of the copyright
+holders and contributors "AS IS" and WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
+including but not limited to the implied warranties of merchantability and
+fitness for a particular purpose. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE for any direct, indirect, incidental, special, exemplary,
+or consequential damages, including but not limited to procurement of substitute
+goods or services; loss of use, data, or profits, or business interruption,
+however caused and on any theory of liability, whether in contract, strict
+liability, or tort (including negligence), arising in any way out of the use of
+this software, even if advised of the possibility of such damage.
+
+5.    CONTACT INFORMATION
+
+Fraunhofer Institute for Integrated Circuits IIS
+Attention: Audio and Multimedia Departments - FDK AAC LL
+Am Wolfsmantel 33
+91058 Erlangen, Germany
+
+www.iis.fraunhofer.de/amm
+amm-info@iis.fraunhofer.de
+----------------------------------------------------------------------------- */
+
+/**************************** AAC decoder library ******************************
+
+   Author(s):   Manuel Jander
+
+   Description:
+
+*******************************************************************************/
+
+#ifndef AACDECODER_LIB_H
+#define AACDECODER_LIB_H
+
+/**
+ * \file   aacdecoder_lib.h
+ * \brief  FDK AAC decoder library interface header file.
+ *
+
+\page INTRO Introduction
+
+
+\section SCOPE Scope
+
+This document describes the high-level application interface and usage of the
+ISO/MPEG-2/4 AAC Decoder library developed by the Fraunhofer Institute for
+Integrated Circuits (IIS). Depending on the library configuration, decoding of
+AAC-LC (Low-Complexity), HE-AAC (High-Efficiency AAC v1 and v2), AAC-LD
+(Low-Delay) and AAC-ELD (Enhanced Low-Delay) is implemented.
+
+All references to SBR (Spectral Band Replication) are only applicable to HE-AAC
+and AAC-ELD configurations of the FDK library. All references to PS (Parametric
+Stereo) are only applicable to HE-AAC v2 decoder configuration of the library.
+
+\section DecoderBasics Decoder Basics
+
+This document can only give a rough overview about the ISO/MPEG-2, ISO/MPEG-4
+AAC audio and MPEG-D USAC coding standards. To understand all details referenced
+in this document, you are encouraged to read the following documents.
+
+- ISO/IEC 13818-7 (MPEG-2 AAC) Standard, defines the syntax of MPEG-2 AAC audio
+bitstreams.
+- ISO/IEC 14496-3 (MPEG-4 AAC, subpart 1 and 4) Standard, defines the syntax of
+MPEG-4 AAC audio bitstreams.
+- ISO/IEC 23003-3 (MPEG-D USAC), defines MPEG-D USAC unified speech and audio
+codec.
+- Lutzky, Schuller, Gayer, Kr&auml;mer, Wabnik, "A guideline to audio codec
+delay", 116th AES Convention, May 8, 2004
+
+In short, MPEG Advanced Audio Coding is based on a time-to-frequency mapping of
+the signal. The signal is partitioned into overlapping time portions and
+transformed into frequency domain. The spectral components are then quantized
+and coded using a highly efficient coding scheme.\n Encoded MPEG-2 and MPEG-4
+AAC audio bitstreams are composed of frames. Contrary to MPEG-1/2 Layer-3 (mp3),
+the length of individual frames is not restricted to a fixed number of bytes,
+but can take any length between 1 and 768 bytes.
+
+In addition to the above mentioned frequency domain coding mode, MPEG-D USAC
+also employs a time domain Algebraic Code-Excited Linear Prediction (ACELP)
+speech coder core. This operating mode is selected by the encoder in order to
+achieve the optimum audio quality for different content type. Several
+enhancements allow achieving higher quality at lower bit rates compared to
+MPEG-4 HE-AAC.
+
+
+\page LIBUSE Library Usage
+
+
+\section InterfaceDescritpion API Description
+
+All API header files are located in the folder /include of the release package.
+The contents of each file is described in detail in this document. All header
+files are provided for usage in specific C/C++ programs. The main AAC decoder
+library API functions are located in aacdecoder_lib.h header file.
+
+
+\section Calling_Sequence Calling Sequence
+
+The following sequence is necessary for proper decoding of ISO/MPEG-2/4 AAC,
+HE-AAC v2, or MPEG-D USAC bitstreams. In the following description, input stream
+read and output write function details are left out, since they may be
+implemented in a variety of configurations depending on the user's specific
+requirements.
+
+
+-# Call aacDecoder_Open() to open and retrieve a handle to a new AAC decoder
+instance. \code aacDecoderInfo = aacDecoder_Open(transportType, nrOfLayers);
+\endcode
+-# If out-of-band config data (Audio Specific Config (ASC) or Stream Mux Config
+(SMC)) is available, call aacDecoder_ConfigRaw() to pass this data to the
+decoder before beginning the decoding process. If this data is not available in
+advance, the decoder will configure itself while decoding, during the
+aacDecoder_DecodeFrame() function call.
+-# Begin decoding loop.
+\code
+do {
+\endcode
+-# Read data from bitstream file or stream buffer in to the driver program
+working memory (a client-supplied input buffer "inBuffer" in framework). This
+buffer will be used to load AAC bitstream data to the decoder.  Only when all
+data in this buffer has been processed will the decoder signal an empty buffer.
+-# Call aacDecoder_Fill() to fill the decoder's internal bitstream input buffer
+with the client-supplied bitstream input buffer. Note, if the data loaded in to
+the internal buffer is not sufficient to decode a frame,
+aacDecoder_DecodeFrame() will return ::AAC_DEC_NOT_ENOUGH_BITS until a
+sufficient amount of data is loaded in to the internal buffer. For streaming
+formats (ADTS, LOAS), it is acceptable to load more than one frame to the
+decoder. However, for packed based formats, only one frame may be loaded to the
+decoder per aacDecoder_DecodeFrame() call. For least amount of communication
+delay, fill and decode should be performed on a frame by frame basis. \code
+    ErrorStatus = aacDecoder_Fill(aacDecoderInfo, inBuffer, bytesRead,
+bytesValid); \endcode
+-# Call aacDecoder_DecodeFrame(). This function decodes one frame and writes
+decoded PCM audio data to a client-supplied buffer. It is the client's
+responsibility to allocate a buffer which is large enough to hold the decoded
+output data. \code ErrorStatus = aacDecoder_DecodeFrame(aacDecoderInfo,
+TimeData, OUT_BUF_SIZE, flags); \endcode If the bitstream configuration (number
+of channels, sample rate, frame size) is not known a priori, you may call
+aacDecoder_GetStreamInfo() to retrieve a structure that contains this
+information. You may use this data to initialize an audio output device. \code
+    p_si = aacDecoder_GetStreamInfo(aacDecoderInfo);
+\endcode
+-# Repeat steps 5 to 7 until no data is available to decode any more, or in case
+of error. \code } while (bytesRead[0] > 0 || doFlush || doBsFlush ||
+forceContinue); \endcode
+-# Call aacDecoder_Close() to de-allocate all AAC decoder and transport layer
+structures. \code aacDecoder_Close(aacDecoderInfo); \endcode
+
+\image latex decode.png "Decode calling sequence" width=11cm
+
+\image latex change_source.png "Change data source sequence" width=5cm
+
+\image latex conceal.png "Error concealment sequence" width=14cm
+
+\subsection Error_Concealment_Sequence Error Concealment Sequence
+
+There are different strategies to handle bit stream errors. Depending on the
+system properties the product designer might choose to take different actions in
+case a bit error occurs. In many cases the decoder might be able to do
+reasonable error concealment without the need of any additional actions from the
+system. But in some cases its not even possible to know how many decoded PCM
+output samples are required to fill the gap due to the data error, then the
+software surrounding the decoder must deal with the situation. The most simple
+way would be to just stop audio playback and resume once enough bit stream data
+and/or buffered output samples are available. More sophisticated designs might
+also be able to deal with sender/receiver clock drifts or data drop outs by
+using a closed loop control of FIFO fulness levels. The chosen strategy depends
+on the final product requirements.
+
+The error concealment sequence diagram illustrates the general execution paths
+for error handling.
+
+The macro IS_OUTPUT_VALID(err) can be used to identify if the audio output
+buffer contains valid audio either from error free bit stream data or successful
+error concealment. In case the result is false, the decoder output buffer does
+not contain meaningful audio samples and should not be passed to any output as
+it is. Most likely in case that a continuous audio output PCM stream is
+required, the output buffer must be filled with audio data from the calling
+framework. This might be e.g. an appropriate number of samples all zero.
+
+If error code ::AAC_DEC_TRANSPORT_SYNC_ERROR is returned by the decoder, under
+some particular conditions it is possible to estimate lost frames due to the bit
+stream error. In that case the bit stream is required to have a constant
+bitrate, and compatible transport type. Audio samples for the lost frames can be
+obtained by calling aacDecoder_DecodeFrame() with flag ::AACDEC_CONCEAL set
+n-times where n is the count of lost frames. Please note that the decoder has to
+have encountered valid configuration data at least once to be able to generate
+concealed data, because at the minimum the sampling rate, frame size and amount
+of audio channels needs to be known.
+
+If it is not possible to get an estimation of lost frames then a constant
+fullness of the audio output buffer can be achieved by implementing different
+FIFO control techniques e.g. just stop taking of samples from the buffer to
+avoid underflow or stop filling new data to the buffer to avoid overflow. But
+this techniques are out of scope of this document.
+
+For a detailed description of a specific error code please refer also to
+::AAC_DECODER_ERROR.
+
+\section BufferSystem Buffer System
+
+There are three main buffers in an AAC decoder application. One external input
+buffer to hold bitstream data from file I/O or elsewhere, one decoder-internal
+input buffer, and one to hold the decoded output PCM sample data. In resource
+limited applications, the output buffer may be reused as an external input
+buffer prior to the subsequence aacDecoder_Fill() function call.
+
+To feed the data to the decoder-internal input buffer, use the
+function aacDecoder_Fill(). This function returns important information
+regarding the number of bytes in the external input buffer that have not yet
+been copied into the internal input buffer (variable bytesValid). Once the
+external buffer has been fully copied, it can be completely re-filled again. In
+case you wish to refill the buffer while there are unprocessed bytes (bytesValid
+is unequal 0), you should preserve the unconsumed data. However, we recommend to
+refill the buffer only when bytesValid returns 0.
+
+The bytesValid parameter is an input and output parameter to the FDK decoder. As
+an input, it signals how many valid bytes are available in the external buffer.
+After consumption of the external buffer using aacDecoder_Fill() function, the
+bytesValid parameter indicates if any of the bytes in the external buffer were
+not consumed.
+
+\image latex dec_buffer.png "Life cycle of the external input buffer" width=9cm
+
+\page OutputFormat Decoder audio output
+
+\section OutputFormatObtaining Obtaining channel mapping information
+
+The decoded audio output format is indicated by a set of variables of the
+CStreamInfo structure. While the struct members sampleRate, frameSize and
+numChannels might be self explanatory, pChannelType and pChannelIndices require
+some further explanation.
+
+These two arrays indicate the configuration of channel data within the output
+buffer. Both arrays have CStreamInfo::numChannels number of cells. Each cell of
+pChannelType indicates the channel type, which is described in the enum
+::AUDIO_CHANNEL_TYPE (defined in FDK_audio.h). The cells of pChannelIndices
+indicate the sub index among the channels starting with 0 among channels of the
+same audio channel type.
+
+The indexing scheme is structured as defined in MPEG-2/4 Standards. Indices
+start from the front direction (a center channel if available, will always be
+index 0) and increment, starting with the left side, pairwise (e.g. L, R) and
+from front to back (Front L, Front R, Surround L, Surround R). For detailed
+explanation, please refer to ISO/IEC 13818-7:2005(E), chapter 8.5.3.2.
+
+In case a Program Config is included in the audio configuration, the channel
+mapping described within it will be adopted.
+
+The examples below explain these aspects in detail.
+
+\section OutputFormatChange Changing the audio output format
+
+For MPEG-4 audio the channel order can be changed at runtime through the
+parameter
+::AAC_PCM_OUTPUT_CHANNEL_MAPPING. See the description of those
+parameters and the decoder library function aacDecoder_SetParam() for more
+detail.
+
+\section OutputFormatExample Channel mapping examples
+
+The following examples illustrate the location of individual audio samples in
+the audio buffer that is passed to aacDecoder_DecodeFrame() and the expected
+data in the CStreamInfo structure which can be obtained by calling
+aacDecoder_GetStreamInfo().
+
+\subsection ExamplesStereo Stereo
+
+In case of ::AAC_PCM_OUTPUT_CHANNEL_MAPPING set to 1,
+a AAC-LC bit stream which has channelConfiguration = 2 in its audio specific
+config would lead to the following values in CStreamInfo:
+
+CStreamInfo::numChannels = 2
+
+CStreamInfo::pChannelType = { ::ACT_FRONT, ::ACT_FRONT }
+
+CStreamInfo::pChannelIndices = { 0, 1 }
+
+The output buffer will be formatted as follows:
+
+\verbatim
+  <left sample 0>  <left sample 1>  <left sample 2>  ... <left sample N>
+  <right sample 0> <right sample 1> <right sample 2> ... <right sample N>
+\endverbatim
+
+Where N equals to CStreamInfo::frameSize .
+
+\subsection ExamplesSurround Surround 5.1
+
+In case of ::AAC_PCM_OUTPUT_CHANNEL_MAPPING set to 1,
+a AAC-LC bit stream which has channelConfiguration = 6 in its audio specific
+config, would lead to the following values in CStreamInfo:
+
+CStreamInfo::numChannels = 6
+
+CStreamInfo::pChannelType = { ::ACT_FRONT, ::ACT_FRONT, ::ACT_FRONT, ::ACT_LFE,
+::ACT_BACK, ::ACT_BACK }
+
+CStreamInfo::pChannelIndices = { 1, 2, 0, 0, 0, 1 }
+
+Since ::AAC_PCM_OUTPUT_CHANNEL_MAPPING is 1, WAV file channel ordering will be
+used. For a 5.1 channel scheme, thus the channels would be: front left, front
+right, center, LFE, surround left, surround right. Thus the third channel is the
+center channel, receiving the index 0. The other front channels are front left,
+front right being placed as first and second channels with indices 1 and 2
+correspondingly. There is only one LFE, placed as the fourth channel and index
+0. Finally both surround channels get the type definition ACT_BACK, and the
+indices 0 and 1.
+
+The output buffer will be formatted as follows:
+
+\verbatim
+<front left sample 0> <front right sample 0>
+<center sample 0> <LFE sample 0>
+<surround left sample 0> <surround right sample 0>
+
+<front left sample 1> <front right sample 1>
+<center sample 1> <LFE sample 1>
+<surround left sample 1> <surround right sample 1>
+
+...
+
+<front left sample N> <front right sample N>
+<center sample N> <LFE sample N>
+<surround left sample N> <surround right sample N>
+\endverbatim
+
+Where N equals to CStreamInfo::frameSize .
+
+\subsection ExamplesArib ARIB coding mode 2/1
+
+In case of ::AAC_PCM_OUTPUT_CHANNEL_MAPPING set to 1,
+in case of a ARIB bit stream using coding mode 2/1 as described in ARIB STD-B32
+Part 2 Version 2.1-E1, page 61, would lead to the following values in
+CStreamInfo:
+
+CStreamInfo::numChannels = 3
+
+CStreamInfo::pChannelType = { ::ACT_FRONT, ::ACT_FRONT, ::ACT_BACK }
+
+CStreamInfo::pChannelIndices = { 0, 1, 0 }
+
+The audio channels will be placed as follows in the audio output buffer:
+
+\verbatim
+<front left sample 0> <front right sample 0>  <mid surround sample 0>
+
+<front left sample 1> <front right sample 1> <mid surround sample 1>
+
+...
+
+<front left sample N> <front right sample N> <mid surround sample N>
+
+Where N equals to CStreamInfo::frameSize .
+
+\endverbatim
+
+*/
+
+#include "machine_type.h"
+#include "FDK_audio.h"
+
+#define AACDECODER_LIB_VL0 3
+#define AACDECODER_LIB_VL1 2
+#define AACDECODER_LIB_VL2 0
+
+#include "genericStds.h"
+/**
+ * \brief  AAC decoder error codes.
+ */
+typedef enum {
+  AAC_DEC_OK =
+      0x0000, /*!< No error occurred. Output buffer is valid and error free. */
+  AAC_DEC_OUT_OF_MEMORY =
+      0x0002, /*!< Heap returned NULL pointer. Output buffer is invalid. */
+  AAC_DEC_UNKNOWN =
+      0x0005, /*!< Error condition is of unknown reason, or from a another
+                 module. Output buffer is invalid. */
+
+  /* Synchronization errors. Output buffer is invalid. */
+  aac_dec_sync_error_start = 0x1000,
+  AAC_DEC_TRANSPORT_SYNC_ERROR = 0x1001, /*!< The transport decoder had
+                                            synchronization problems. Do not
+                                            exit decoding. Just feed new
+                                              bitstream data. */
+  AAC_DEC_NOT_ENOUGH_BITS = 0x1002, /*!< The input buffer ran out of bits. */
+  aac_dec_sync_error_end = 0x1FFF,
+
+  /* Initialization errors. Output buffer is invalid. */
+  aac_dec_init_error_start = 0x2000,
+  AAC_DEC_INVALID_HANDLE =
+      0x2001, /*!< The handle passed to the function call was invalid (NULL). */
+  AAC_DEC_UNSUPPORTED_AOT =
+      0x2002, /*!< The AOT found in the configuration is not supported. */
+  AAC_DEC_UNSUPPORTED_FORMAT =
+      0x2003, /*!< The bitstream format is not supported.  */
+  AAC_DEC_UNSUPPORTED_ER_FORMAT =
+      0x2004, /*!< The error resilience tool format is not supported. */
+  AAC_DEC_UNSUPPORTED_EPCONFIG =
+      0x2005, /*!< The error protection format is not supported. */
+  AAC_DEC_UNSUPPORTED_MULTILAYER =
+      0x2006, /*!< More than one layer for AAC scalable is not supported. */
+  AAC_DEC_UNSUPPORTED_CHANNELCONFIG =
+      0x2007, /*!< The channel configuration (either number or arrangement) is
+                 not supported. */
+  AAC_DEC_UNSUPPORTED_SAMPLINGRATE = 0x2008, /*!< The sample rate specified in
+                                                the configuration is not
+                                                supported. */
+  AAC_DEC_INVALID_SBR_CONFIG =
+      0x2009, /*!< The SBR configuration is not supported. */
+  AAC_DEC_SET_PARAM_FAIL = 0x200A,  /*!< The parameter could not be set. Either
+                                       the value was out of range or the
+                                       parameter does  not exist. */
+  AAC_DEC_NEED_TO_RESTART = 0x200B, /*!< The decoder needs to be restarted,
+                                       since the required configuration change
+                                       cannot be performed. */
+  AAC_DEC_OUTPUT_BUFFER_TOO_SMALL =
+      0x200C, /*!< The provided output buffer is too small. */
+  aac_dec_init_error_end = 0x2FFF,
+
+  /* Decode errors. Output buffer is valid but concealed. */
+  aac_dec_decode_error_start = 0x4000,
+  AAC_DEC_TRANSPORT_ERROR =
+      0x4001, /*!< The transport decoder encountered an unexpected error. */
+  AAC_DEC_PARSE_ERROR = 0x4002, /*!< Error while parsing the bitstream. Most
+                                   probably it is corrupted, or the system
+                                   crashed. */
+  AAC_DEC_UNSUPPORTED_EXTENSION_PAYLOAD =
+      0x4003, /*!< Error while parsing the extension payload of the bitstream.
+                 The extension payload type found is not supported. */
+  AAC_DEC_DECODE_FRAME_ERROR = 0x4004, /*!< The parsed bitstream value is out of
+                                          range. Most probably the bitstream is
+                                          corrupt, or the system crashed. */
+  AAC_DEC_CRC_ERROR = 0x4005,          /*!< The embedded CRC did not match. */
+  AAC_DEC_INVALID_CODE_BOOK = 0x4006,  /*!< An invalid codebook was signaled.
+                                          Most probably the bitstream is corrupt,
+                                          or the system  crashed. */
+  AAC_DEC_UNSUPPORTED_PREDICTION =
+      0x4007, /*!< Predictor found, but not supported in the AAC Low Complexity
+                 profile. Most probably the bitstream is corrupt, or has a wrong
+                 format. */
+  AAC_DEC_UNSUPPORTED_CCE = 0x4008, /*!< A CCE element was found which is not
+                                       supported. Most probably the bitstream is
+                                       corrupt, or has a wrong format. */
+  AAC_DEC_UNSUPPORTED_LFE = 0x4009, /*!< A LFE element was found which is not
+                                       supported. Most probably the bitstream is
+                                       corrupt, or has a wrong format. */
+  AAC_DEC_UNSUPPORTED_GAIN_CONTROL_DATA =
+      0x400A, /*!< Gain control data found but not supported. Most probably the
+                 bitstream is corrupt, or has a wrong format. */
+  AAC_DEC_UNSUPPORTED_SBA =
+      0x400B, /*!< SBA found, but currently not supported in the BSAC profile.
+               */
+  AAC_DEC_TNS_READ_ERROR = 0x400C, /*!< Error while reading TNS data. Most
+                                      probably the bitstream is corrupt or the
+                                      system crashed. */
+  AAC_DEC_RVLC_ERROR =
+      0x400D, /*!< Error while decoding error resilient data. */
+  aac_dec_decode_error_end = 0x4FFF,
+  /* Ancillary data errors. Output buffer is valid. */
+  aac_dec_anc_data_error_start = 0x8000,
+  AAC_DEC_ANC_DATA_ERROR =
+      0x8001, /*!< Non severe error concerning the ancillary data handling. */
+  AAC_DEC_TOO_SMALL_ANC_BUFFER = 0x8002,  /*!< The registered ancillary data
+                                             buffer is too small to receive the
+                                             parsed data. */
+  AAC_DEC_TOO_MANY_ANC_ELEMENTS = 0x8003, /*!< More than the allowed number of
+                                             ancillary data elements should be
+                                             written to buffer. */
+  aac_dec_anc_data_error_end = 0x8FFF
+
+} AAC_DECODER_ERROR;
+
+/** Macro to identify initialization errors. Output buffer is invalid. */
+#define IS_INIT_ERROR(err)                                                    \
+  ((((err) >= aac_dec_init_error_start) && ((err) <= aac_dec_init_error_end)) \
+       ? 1                                                                    \
+       : 0)
+/** Macro to identify decode errors. Output buffer is valid but concealed. */
+#define IS_DECODE_ERROR(err)                 \
+  ((((err) >= aac_dec_decode_error_start) && \
+    ((err) <= aac_dec_decode_error_end))     \
+       ? 1                                   \
+       : 0)
+/**
+ * Macro to identify if the audio output buffer contains valid samples after
+ * calling aacDecoder_DecodeFrame(). Output buffer is valid but can be
+ * concealed.
+ */
+#define IS_OUTPUT_VALID(err) (((err) == AAC_DEC_OK) || IS_DECODE_ERROR(err))
+
+/*! \enum  AAC_MD_PROFILE
+ *  \brief The available metadata profiles which are mostly related to downmixing. The values define the arguments
+ *         for the use with parameter ::AAC_METADATA_PROFILE.
+ */
+typedef enum {
+  AAC_MD_PROFILE_MPEG_STANDARD =
+      0, /*!< The standard profile creates a mixdown signal based on the
+            advanced downmix metadata (from a DSE). The equations and default
+            values are defined in ISO/IEC 14496:3 Ammendment 4. Any other
+            (legacy) downmix metadata will be ignored. No other parameter will
+            be modified.         */
+  AAC_MD_PROFILE_MPEG_LEGACY =
+      1, /*!< This profile behaves identical to the standard profile if advanced
+              downmix metadata (from a DSE) is available. If not, the
+            matrix_mixdown information embedded in the program configuration
+            element (PCE) will be applied. If neither is the case, the module
+            creates a mixdown using the default coefficients as defined in
+            ISO/IEC 14496:3 AMD 4. The profile can be used to support legacy
+            digital TV (e.g. DVB) streams.           */
+  AAC_MD_PROFILE_MPEG_LEGACY_PRIO =
+      2, /*!< Similar to the ::AAC_MD_PROFILE_MPEG_LEGACY profile but if both
+            the advanced (ISO/IEC 14496:3 AMD 4) and the legacy (PCE) MPEG
+            downmix metadata are available the latter will be applied.
+          */
+  AAC_MD_PROFILE_ARIB_JAPAN =
+      3 /*!< Downmix creation as described in ABNT NBR 15602-2. But if advanced
+             downmix metadata (ISO/IEC 14496:3 AMD 4) is available it will be
+             preferred because of the higher resolutions. In addition the
+           metadata expiry time will be set to the value defined in the ARIB
+           standard (see ::AAC_METADATA_EXPIRY_TIME).
+         */
+} AAC_MD_PROFILE;
+
+/*! \enum  AAC_DRC_DEFAULT_PRESENTATION_MODE_OPTIONS
+ *  \brief Options for handling of DRC parameters, if presentation mode is not indicated in bitstream
+ */
+typedef enum {
+  AAC_DRC_PARAMETER_HANDLING_DISABLED = -1, /*!< DRC parameter handling
+                                               disabled, all parameters are
+                                               applied as requested. */
+  AAC_DRC_PARAMETER_HANDLING_ENABLED =
+      0, /*!< Apply changes to requested DRC parameters to prevent clipping. */
+  AAC_DRC_PRESENTATION_MODE_1_DEFAULT =
+      1, /*!< Use DRC presentation mode 1 as default (e.g. for Nordig) */
+  AAC_DRC_PRESENTATION_MODE_2_DEFAULT =
+      2 /*!< Use DRC presentation mode 2 as default (e.g. for DTG DBook) */
+} AAC_DRC_DEFAULT_PRESENTATION_MODE_OPTIONS;
+
+/**
+ * \brief AAC decoder setting parameters
+ */
+typedef enum {
+  AAC_PCM_DUAL_CHANNEL_OUTPUT_MODE =
+      0x0002, /*!< Defines how the decoder processes two channel signals: \n
+                   0: Leave both signals as they are (default). \n
+                   1: Create a dual mono output signal from channel 1. \n
+                   2: Create a dual mono output signal from channel 2. \n
+                   3: Create a dual mono output signal by mixing both channels
+                 (L' = R' = 0.5*Ch1 + 0.5*Ch2). */
+  AAC_PCM_OUTPUT_CHANNEL_MAPPING =
+      0x0003, /*!< Output buffer channel ordering. 0: MPEG PCE style order, 1:
+                 WAV file channel order (default). */
+  AAC_PCM_LIMITER_ENABLE =
+      0x0004,                           /*!< Enable signal level limiting. \n
+                                             -1: Auto-config. Enable limiter for all
+                                           non-lowdelay configurations by default. \n
+                                              0: Disable limiter in general. \n
+                                              1: Enable limiter always.
+                                             It is recommended to call the decoder
+                                           with a AACDEC_CLRHIST flag to reset all
+                                           states when      the limiter switch is changed
+                                           explicitly. */
+  AAC_PCM_LIMITER_ATTACK_TIME = 0x0005, /*!< Signal level limiting attack time
+                                           in ms. Default configuration is 15
+                                           ms. Adjustable range from 1 ms to 15
+                                           ms. */
+  AAC_PCM_LIMITER_RELEAS_TIME = 0x0006, /*!< Signal level limiting release time
+                                           in ms. Default configuration is 50
+                                           ms. Adjustable time must be larger
+                                           than 0 ms. */
+  AAC_PCM_MIN_OUTPUT_CHANNELS =
+      0x0011, /*!< Minimum number of PCM output channels. If higher than the
+                 number of encoded audio channels, a simple channel extension is
+                 applied (see note 4 for exceptions). \n -1, 0: Disable channel
+                 extension feature. The decoder output contains the same number
+                 of channels as the encoded bitstream. \n 1:    This value is
+                 currently needed only together with the mix-down feature. See
+                          ::AAC_PCM_MAX_OUTPUT_CHANNELS and note 2 below. \n
+                    2:    Encoded mono signals will be duplicated to achieve a
+                 2/0/0.0 channel output configuration. \n 6:    The decoder
+                 tries to reorder encoded signals with less than six channels to
+                 achieve a 3/0/2.1 channel output signal. Missing channels will
+                 be filled with a zero signal. If reordering is not possible the
+                 empty channels will simply be appended. Only available if
+                 instance is configured to support multichannel output. \n 8:
+                 The decoder tries to reorder encoded signals with less than
+                 eight channels to achieve a 3/0/4.1 channel output signal.
+                 Missing channels will be filled with a zero signal. If
+                 reordering is not possible the empty channels will simply be
+                          appended. Only available if instance is configured to
+                 support multichannel output.\n NOTE: \n
+                     1. The channel signaling (CStreamInfo::pChannelType and
+                 CStreamInfo::pChannelIndices) will not be modified. Added empty
+                 channels will be signaled with channel type
+                        AUDIO_CHANNEL_TYPE::ACT_NONE. \n
+                     2. If the parameter value is greater than that of
+                 ::AAC_PCM_MAX_OUTPUT_CHANNELS both will be set to the same
+                 value. \n
+                     3. This parameter will be ignored if the number of encoded
+                 audio channels is greater than 8. */
+  AAC_PCM_MAX_OUTPUT_CHANNELS =
+      0x0012, /*!< Maximum number of PCM output channels. If lower than the
+                 number of encoded audio channels, downmixing is applied
+                 accordingly (see note 5 for exceptions). If dedicated metadata
+                 is available in the stream it will be used to achieve better
+                 mixing results. \n -1, 0: Disable downmixing feature. The
+                 decoder output contains the same number of channels as the
+                 encoded bitstream. \n 1:    All encoded audio configurations
+                 with more than one channel will be mixed down to one mono
+                 output signal. \n 2:    The decoder performs a stereo mix-down
+                 if the number encoded audio channels is greater than two. \n 6:
+                 If the number of encoded audio channels is greater than six the
+                 decoder performs a mix-down to meet the target output
+                 configuration of 3/0/2.1 channels. Only available if instance
+                 is configured to support multichannel output. \n 8:    This
+                 value is currently needed only together with the channel
+                 extension feature. See ::AAC_PCM_MIN_OUTPUT_CHANNELS and note 2
+                 below. Only available if instance is configured to support
+                 multichannel output. \n NOTE: \n
+                     1. Down-mixing of any seven or eight channel configuration
+                 not defined in ISO/IEC 14496-3 PDAM 4 is not supported by this
+                 software version. \n
+                     2. If the parameter value is greater than zero but smaller
+                 than ::AAC_PCM_MIN_OUTPUT_CHANNELS both will be set to same
+                 value. \n
+                     3. This parameter will be ignored if the number of encoded
+                 audio channels is greater than 8. */
+  AAC_METADATA_PROFILE =
+      0x0020, /*!< See ::AAC_MD_PROFILE for all available values. */
+  AAC_METADATA_EXPIRY_TIME = 0x0021, /*!< Defines the time in ms after which all
+                                        the bitstream associated meta-data (DRC,
+                                        downmix coefficients, ...) will be reset
+                                        to default if no update has been
+                                        received. Negative values disable the
+                                        feature. */
+
+  AAC_CONCEAL_METHOD = 0x0100, /*!< Error concealment: Processing method. \n
+                                    0: Spectral muting. \n
+                                    1: Noise substitution (see ::CONCEAL_NOISE).
+                                  \n 2: Energy interpolation (adds additional
+                                  signal delay of one frame, see
+                                  ::CONCEAL_INTER. only some AOTs are
+                                  supported). \n */
+  AAC_DRC_BOOST_FACTOR =
+      0x0200, /*!< MPEG-4 / MPEG-D Dynamic Range Control (DRC): Scaling factor
+                 for boosting gain values. Defines how the boosting DRC factors
+                 (conveyed in the bitstream) will be applied to the decoded
+                 signal. The valid values range from 0 (don't apply boost
+                 factors) to 127 (fully apply boost factors). Default value is 0
+                 for MPEG-4 DRC and 127 for MPEG-D DRC. */
+  AAC_DRC_ATTENUATION_FACTOR = 0x0201, /*!< MPEG-4 / MPEG-D DRC: Scaling factor
+                                          for attenuating gain values. Same as
+                                            ::AAC_DRC_BOOST_FACTOR but for
+                                          attenuating DRC factors. */
+  AAC_DRC_REFERENCE_LEVEL =
+      0x0202, /*!< MPEG-4 / MPEG-D DRC: Target reference level / decoder target
+                 loudness.\n Defines the level below full-scale (quantized in
+                 steps of 0.25dB) to which the output audio signal will be
+                 normalized to by the DRC module.\n The parameter controls
+                 loudness normalization for both MPEG-4 DRC and MPEG-D DRC. The
+                 valid values range from 40 (-10 dBFS) to 127 (-31.75 dBFS).\n
+                   Example values:\n
+                   124 (-31 dBFS) for audio/video receivers (AVR) or other
+                 devices allowing audio playback with high dynamic range,\n 96
+                 (-24 dBFS) for TV sets or equivalent devices (default),\n 64
+                 (-16 dBFS) for mobile devices where the dynamic range of audio
+                 playback is restricted.\n Any value smaller than 0 switches off
+                 loudness normalization and MPEG-4 DRC. */
+  AAC_DRC_HEAVY_COMPRESSION =
+      0x0203, /*!< MPEG-4 DRC: En-/Disable DVB specific heavy compression (aka
+                 RF mode). If set to 1, the decoder will apply the compression
+                 values from the DVB specific ancillary data field. At the same
+                 time the MPEG-4 Dynamic Range Control tool will be disabled. By
+                   default, heavy compression is disabled. */
+  AAC_DRC_DEFAULT_PRESENTATION_MODE =
+      0x0204, /*!< MPEG-4 DRC: Default presentation mode (DRC parameter
+                 handling). \n Defines the handling of the DRC parameters boost
+                 factor, attenuation factor and heavy compression, if no
+                 presentation mode is indicated in the bitstream.\n For options,
+                 see ::AAC_DRC_DEFAULT_PRESENTATION_MODE_OPTIONS.\n Default:
+                 ::AAC_DRC_PARAMETER_HANDLING_DISABLED */
+  AAC_DRC_ENC_TARGET_LEVEL =
+      0x0205, /*!< MPEG-4 DRC: Encoder target level for light (i.e. not heavy)
+                 compression.\n If known, this declares the target reference
+                 level that was assumed at the encoder for calculation of
+                 limiting gains. The valid values range from 0 (full-scale) to
+                 127 (31.75 dB below full-scale). This parameter is used only
+                 with ::AAC_DRC_PARAMETER_HANDLING_ENABLED and ignored
+                 otherwise.\n Default: 127 (worst-case assumption).\n */
+  AAC_UNIDRC_SET_EFFECT = 0x0206, /*!< MPEG-D DRC: Request a DRC effect type for
+                                     selection of a DRC set.\n Supported indices
+                                     are:\n -1: DRC off. Completely disables
+                                     MPEG-D DRC.\n 0: None (default). Disables
+                                     MPEG-D DRC, but automatically enables DRC
+                                     if necessary to prevent clipping.\n 1: Late
+                                     night\n 2: Noisy environment\n 3: Limited
+                                     playback range\n 4: Low playback level\n 5:
+                                     Dialog enhancement\n 6: General
+                                     compression. Used for generally enabling
+                                     MPEG-D DRC without particular request.\n */
+  AAC_UNIDRC_ALBUM_MODE =
+      0x0207, /*!<  MPEG-D DRC: Enable album mode. 0: Disabled (default), 1:
+                 Enabled.\n Disabled album mode leads to application of gain
+                 sequences for fading in and out, if provided in the
+                 bitstream.\n Enabled album mode makes use of dedicated album
+                 loudness information, if provided in the bitstream.\n */
+  AAC_QMF_LOWPOWER =
+      0x0300, /*!< Quadrature Mirror Filter (QMF) Bank processing mode. \n
+                   -1: Use internal default. \n
+                    0: Use complex QMF data mode. \n
+                    1: Use real (low power) QMF data mode. \n */
+  AAC_TPDEC_CLEAR_BUFFER =
+      0x0603 /*!< Clear internal bit stream buffer of transport layers. The
+                decoder will start decoding at new data passed after this event
+                and any previous data is discarded. */
+
+} AACDEC_PARAM;
+
+/**
+ * \brief This structure gives information about the currently decoded audio
+ * data. All fields are read-only.
+ */
+typedef struct {
+  /* These five members are the only really relevant ones for the user. */
+  INT sampleRate; /*!< The sample rate in Hz of the decoded PCM audio signal. */
+  INT frameSize;  /*!< The frame size of the decoded PCM audio signal. \n
+                       Typically this is: \n
+                       1024 or 960 for AAC-LC \n
+                       2048 or 1920 for HE-AAC (v2) \n
+                       512 or 480 for AAC-LD and AAC-ELD \n
+                       768, 1024, 2048 or 4096 for USAC  */
+  INT numChannels; /*!< The number of output audio channels before the rendering
+                      module, i.e. the original channel configuration. */
+  AUDIO_CHANNEL_TYPE
+  *pChannelType; /*!< Audio channel type of each output audio channel. */
+  UCHAR *pChannelIndices; /*!< Audio channel index for each output audio
+                             channel. See ISO/IEC 13818-7:2005(E), 8.5.3.2
+                             Explicit channel mapping using a
+                             program_config_element() */
+  /* Decoder internal members. */
+  INT aacSampleRate; /*!< Sampling rate in Hz without SBR (from configuration
+                        info) divided by a (ELD) downscale factor if present. */
+  INT profile; /*!< MPEG-2 profile (from file header) (-1: not applicable (e. g.
+                  MPEG-4)).               */
+  AUDIO_OBJECT_TYPE
+  aot; /*!< Audio Object Type (from ASC): is set to the appropriate value
+          for MPEG-2 bitstreams (e. g. 2 for AAC-LC). */
+  INT channelConfig; /*!< Channel configuration (0: PCE defined, 1: mono, 2:
+                        stereo, ...                       */
+  INT bitRate;       /*!< Instantaneous bit rate.                   */
+  INT aacSamplesPerFrame;   /*!< Samples per frame for the AAC core (from ASC)
+                               divided by a (ELD) downscale factor if present. \n
+                                 Typically this is (with a downscale factor of 1):
+                               \n   1024 or 960 for AAC-LC \n   512 or 480 for
+                               AAC-LD   and AAC-ELD         */
+  INT aacNumChannels;       /*!< The number of audio channels after AAC core
+                               processing (before PS or MPS processing).       CAUTION: This
+                               are not the final number of output channels! */
+  AUDIO_OBJECT_TYPE extAot; /*!< Extension Audio Object Type (from ASC)   */
+  INT extSamplingRate; /*!< Extension sampling rate in Hz (from ASC) divided by
+                          a (ELD) downscale factor if present. */
+
+  UINT outputDelay; /*!< The number of samples the output is additionally
+                       delayed by.the decoder. */
+  UINT flags; /*!< Copy of internal flags. Only to be written by the decoder,
+                 and only to be read externally. */
+
+  SCHAR epConfig; /*!< epConfig level (from ASC): only level 0 supported, -1
+                     means no ER (e. g. AOT=2, MPEG-2 AAC, etc.)  */
+  /* Statistics */
+  INT numLostAccessUnits; /*!< This integer will reflect the estimated amount of
+                             lost access units in case aacDecoder_DecodeFrame()
+                               returns AAC_DEC_TRANSPORT_SYNC_ERROR. It will be
+                             < 0 if the estimation failed. */
+
+  INT64 numTotalBytes; /*!< This is the number of total bytes that have passed
+                          through the decoder. */
+  INT64
+  numBadBytes; /*!< This is the number of total bytes that were considered
+                  with errors from numTotalBytes. */
+  INT64
+  numTotalAccessUnits;     /*!< This is the number of total access units that
+                              have passed through the decoder. */
+  INT64 numBadAccessUnits; /*!< This is the number of total access units that
+                              were considered with errors from numTotalBytes. */
+
+  /* Metadata */
+  SCHAR drcProgRefLev; /*!< DRC program reference level. Defines the reference
+                          level below full-scale. It is quantized in steps of
+                          0.25dB. The valid values range from 0 (0 dBFS) to 127
+                          (-31.75 dBFS). It is used to reflect the average
+                          loudness of the audio in LKFS according to ITU-R BS
+                          1770. If no level has been found in the bitstream the
+                          value is -1. */
+  SCHAR
+  drcPresMode;        /*!< DRC presentation mode. According to ETSI TS 101 154,
+                         this field indicates whether   light (MPEG-4 Dynamic Range
+                         Control tool) or heavy compression (DVB heavy
+                         compression)   dynamic range control shall take priority
+                         on the outputs.   For details, see ETSI TS 101 154, table
+                         C.33. Possible values are: \n   -1: No corresponding
+                         metadata found in the bitstream \n   0: DRC presentation
+                         mode not indicated \n   1: DRC presentation mode 1 \n   2:
+                         DRC presentation mode 2 \n   3: Reserved */
+  INT outputLoudness; /*!< Audio output loudness in steps of -0.25 dB. Range: 0
+                         (0 dBFS) to 231 (-57.75 dBFS).\n  A value of -1
+                         indicates that no loudness metadata is present.\n  If
+                         loudness normalization is active, the value corresponds
+                         to the target loudness value set with
+                         ::AAC_DRC_REFERENCE_LEVEL.\n  If loudness normalization
+                         is not active, the output loudness value corresponds to
+                         the loudness metadata given in the bitstream.\n
+                           Loudness metadata can originate from MPEG-4 DRC or
+                         MPEG-D DRC. */
+
+} CStreamInfo;
+
+typedef struct AAC_DECODER_INSTANCE
+    *HANDLE_AACDECODER; /*!< Pointer to a AAC decoder instance. */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \brief Initialize ancillary data buffer.
+ *
+ * \param self    AAC decoder handle.
+ * \param buffer  Pointer to (external) ancillary data buffer.
+ * \param size    Size of the buffer pointed to by buffer.
+ * \return        Error code.
+ */
+LINKSPEC_H AAC_DECODER_ERROR aacDecoder_AncDataInit(HANDLE_AACDECODER self,
+                                                    UCHAR *buffer, int size);
+
+/**
+ * \brief Get one ancillary data element.
+ *
+ * \param self   AAC decoder handle.
+ * \param index  Index of the ancillary data element to get.
+ * \param ptr    Pointer to a buffer receiving a pointer to the requested
+ * ancillary data element.
+ * \param size   Pointer to a buffer receiving the length of the requested
+ * ancillary data element.
+ * \return       Error code.
+ */
+LINKSPEC_H AAC_DECODER_ERROR aacDecoder_AncDataGet(HANDLE_AACDECODER self,
+                                                   int index, UCHAR **ptr,
+                                                   int *size);
+
+/**
+ * \brief Set one single decoder parameter.
+ *
+ * \param self   AAC decoder handle.
+ * \param param  Parameter to be set.
+ * \param value  Parameter value.
+ * \return       Error code.
+ */
+LINKSPEC_H AAC_DECODER_ERROR aacDecoder_SetParam(const HANDLE_AACDECODER self,
+                                                 const AACDEC_PARAM param,
+                                                 const INT value);
+
+/**
+ * \brief              Get free bytes inside decoder internal buffer.
+ * \param self         Handle of AAC decoder instance.
+ * \param pFreeBytes   Pointer to variable receiving amount of free bytes inside
+ * decoder internal buffer.
+ * \return             Error code.
+ */
+LINKSPEC_H AAC_DECODER_ERROR
+aacDecoder_GetFreeBytes(const HANDLE_AACDECODER self, UINT *pFreeBytes);
+
+/**
+ * \brief               Open an AAC decoder instance.
+ * \param transportFmt  The transport type to be used.
+ * \param nrOfLayers    Number of transport layers.
+ * \return              AAC decoder handle.
+ */
+LINKSPEC_H HANDLE_AACDECODER aacDecoder_Open(TRANSPORT_TYPE transportFmt,
+                                             UINT nrOfLayers);
+
+/**
+ * \brief Explicitly configure the decoder by passing a raw AudioSpecificConfig
+ * (ASC) or a StreamMuxConfig (SMC), contained in a binary buffer. This is
+ * required for MPEG-4 and Raw Packets file format bitstreams as well as for
+ * LATM bitstreams with no in-band SMC. If the transport format is LATM with or
+ * without LOAS, configuration is assumed to be an SMC, for all other file
+ * formats an ASC.
+ *
+ * \param self    AAC decoder handle.
+ * \param conf    Pointer to an unsigned char buffer containing the binary
+ * configuration buffer (either ASC or SMC).
+ * \param length  Length of the configuration buffer in bytes.
+ * \return        Error code.
+ */
+LINKSPEC_H AAC_DECODER_ERROR aacDecoder_ConfigRaw(HANDLE_AACDECODER self,
+                                                  UCHAR *conf[],
+                                                  const UINT length[]);
+
+/**
+ * \brief Submit raw ISO base media file format boxes to decoder for parsing
+ * (only some box types are recognized).
+ *
+ * \param self    AAC decoder handle.
+ * \param buffer  Pointer to an unsigned char buffer containing the binary box
+ * data (including size and type, can be a sequence of multiple boxes).
+ * \param length  Length of the data in bytes.
+ * \return        Error code.
+ */
+LINKSPEC_H AAC_DECODER_ERROR aacDecoder_RawISOBMFFData(HANDLE_AACDECODER self,
+                                                       UCHAR *buffer,
+                                                       UINT length);
+
+/**
+ * \brief Fill AAC decoder's internal input buffer with bitstream data from the
+ * external input buffer. The function only copies such data as long as the
+ * decoder-internal input buffer is not full. So it grabs whatever it can from
+ * pBuffer and returns information (bytesValid) so that at a subsequent call of
+ * %aacDecoder_Fill(), the right position in pBuffer can be determined to grab
+ * the next data.
+ *
+ * \param self        AAC decoder handle.
+ * \param pBuffer     Pointer to external input buffer.
+ * \param bufferSize  Size of external input buffer. This argument is required
+ * because decoder-internally we need the information to calculate the offset to
+ * pBuffer, where the next available data is, which is then
+ * fed into the decoder-internal buffer (as much as
+ * possible). Our example framework implementation fills the
+ * buffer at pBuffer again, once it contains no available valid bytes anymore
+ * (meaning bytesValid equal 0).
+ * \param bytesValid  Number of bitstream bytes in the external bitstream buffer
+ * that have not yet been copied into the decoder's internal bitstream buffer by
+ * calling this function. The value is updated according to
+ * the amount of newly copied bytes.
+ * \return            Error code.
+ */
+LINKSPEC_H AAC_DECODER_ERROR aacDecoder_Fill(HANDLE_AACDECODER self,
+                                             UCHAR *pBuffer[],
+                                             const UINT bufferSize[],
+                                             UINT *bytesValid);
+
+/** Flag for aacDecoder_DecodeFrame(): Trigger the built-in error concealment
+ * module to generate a substitute signal for one lost frame. New input data
+ * will not be considered.
+ */
+#define AACDEC_CONCEAL 1
+/** Flag for aacDecoder_DecodeFrame(): Flush all filterbanks to get all delayed
+ * audio without having new input data. Thus new input data will not be
+ * considered.
+ */
+#define AACDEC_FLUSH 2
+/** Flag for aacDecoder_DecodeFrame(): Signal an input bit stream data
+ * discontinuity. Resync any internals as necessary.
+ */
+#define AACDEC_INTR 4
+/** Flag for aacDecoder_DecodeFrame(): Clear all signal delay lines and history
+ * buffers. CAUTION: This can cause discontinuities in the output signal.
+ */
+#define AACDEC_CLRHIST 8
+
+/**
+ * \brief               Decode one audio frame
+ *
+ * \param self          AAC decoder handle.
+ * \param pTimeData     Pointer to external output buffer where the decoded PCM
+ * samples will be stored into.
+ * \param timeDataSize  Size of external output buffer in PCM samples.
+ * \param flags         Bit field with flags for the decoder: \n
+ *                      (flags & AACDEC_CONCEAL) == 1: Do concealment. \n
+ *                      (flags & AACDEC_FLUSH) == 2: Discard input data. Flush
+ * filter banks (output delayed audio). \n (flags & AACDEC_INTR) == 4: Input
+ * data is discontinuous. Resynchronize any internals as
+ * necessary. \n (flags & AACDEC_CLRHIST) == 8: Clear all signal delay lines and
+ * history buffers.
+ * \return              Error code.
+ */
+LINKSPEC_H AAC_DECODER_ERROR aacDecoder_DecodeFrame(HANDLE_AACDECODER self,
+                                                    INT_PCM *pTimeData,
+                                                    const INT timeDataSize,
+                                                    const UINT flags);
+
+/**
+ * \brief       De-allocate all resources of an AAC decoder instance.
+ *
+ * \param self  AAC decoder handle.
+ * \return      void.
+ */
+LINKSPEC_H void aacDecoder_Close(HANDLE_AACDECODER self);
+
+/**
+ * \brief       Get CStreamInfo handle from decoder.
+ *
+ * \param self  AAC decoder handle.
+ * \return      Reference to requested CStreamInfo.
+ */
+LINKSPEC_H CStreamInfo *aacDecoder_GetStreamInfo(HANDLE_AACDECODER self);
+
+/**
+ * \brief       Get decoder library info.
+ *
+ * \param info  Pointer to an allocated LIB_INFO structure.
+ * \return      0 on success.
+ */
+LINKSPEC_H INT aacDecoder_GetLibInfo(LIB_INFO *info);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AACDECODER_LIB_H */

+ 1709 - 0
native/include/fdk-aac/aacenc_lib.h

@@ -0,0 +1,1709 @@
+/* -----------------------------------------------------------------------------
+Software License for The Fraunhofer FDK AAC Codec Library for Android
+
+© Copyright  1995 - 2021 Fraunhofer-Gesellschaft zur Förderung der angewandten
+Forschung e.V. All rights reserved.
+
+ 1.    INTRODUCTION
+The Fraunhofer FDK AAC Codec Library for Android ("FDK AAC Codec") is software
+that implements the MPEG Advanced Audio Coding ("AAC") encoding and decoding
+scheme for digital audio. This FDK AAC Codec software is intended to be used on
+a wide variety of Android devices.
+
+AAC's HE-AAC and HE-AAC v2 versions are regarded as today's most efficient
+general perceptual audio codecs. AAC-ELD is considered the best-performing
+full-bandwidth communications codec by independent studies and is widely
+deployed. AAC has been standardized by ISO and IEC as part of the MPEG
+specifications.
+
+Patent licenses for necessary patent claims for the FDK AAC Codec (including
+those of Fraunhofer) may be obtained through Via Licensing
+(www.vialicensing.com) or through the respective patent owners individually for
+the purpose of encoding or decoding bit streams in products that are compliant
+with the ISO/IEC MPEG audio standards. Please note that most manufacturers of
+Android devices already license these patent claims through Via Licensing or
+directly from the patent owners, and therefore FDK AAC Codec software may
+already be covered under those patent licenses when it is used for those
+licensed purposes only.
+
+Commercially-licensed AAC software libraries, including floating-point versions
+with enhanced sound quality, are also available from Fraunhofer. Users are
+encouraged to check the Fraunhofer website for additional applications
+information and documentation.
+
+2.    COPYRIGHT LICENSE
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted without payment of copyright license fees provided that you
+satisfy the following conditions:
+
+You must retain the complete text of this software license in redistributions of
+the FDK AAC Codec or your modifications thereto in source code form.
+
+You must retain the complete text of this software license in the documentation
+and/or other materials provided with redistributions of the FDK AAC Codec or
+your modifications thereto in binary form. You must make available free of
+charge copies of the complete source code of the FDK AAC Codec and your
+modifications thereto to recipients of copies in binary form.
+
+The name of Fraunhofer may not be used to endorse or promote products derived
+from this library without prior written permission.
+
+You may not charge copyright license fees for anyone to use, copy or distribute
+the FDK AAC Codec software or your modifications thereto.
+
+Your modified versions of the FDK AAC Codec must carry prominent notices stating
+that you changed the software and the date of any change. For modified versions
+of the FDK AAC Codec, the term "Fraunhofer FDK AAC Codec Library for Android"
+must be replaced by the term "Third-Party Modified Version of the Fraunhofer FDK
+AAC Codec Library for Android."
+
+3.    NO PATENT LICENSE
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PATENT CLAIMS, including without
+limitation the patents of Fraunhofer, ARE GRANTED BY THIS SOFTWARE LICENSE.
+Fraunhofer provides no warranty of patent non-infringement with respect to this
+software.
+
+You may use this FDK AAC Codec software or modifications thereto only for
+purposes that are authorized by appropriate patent licenses.
+
+4.    DISCLAIMER
+
+This FDK AAC Codec software is provided by Fraunhofer on behalf of the copyright
+holders and contributors "AS IS" and WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
+including but not limited to the implied warranties of merchantability and
+fitness for a particular purpose. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE for any direct, indirect, incidental, special, exemplary,
+or consequential damages, including but not limited to procurement of substitute
+goods or services; loss of use, data, or profits, or business interruption,
+however caused and on any theory of liability, whether in contract, strict
+liability, or tort (including negligence), arising in any way out of the use of
+this software, even if advised of the possibility of such damage.
+
+5.    CONTACT INFORMATION
+
+Fraunhofer Institute for Integrated Circuits IIS
+Attention: Audio and Multimedia Departments - FDK AAC LL
+Am Wolfsmantel 33
+91058 Erlangen, Germany
+
+www.iis.fraunhofer.de/amm
+amm-info@iis.fraunhofer.de
+----------------------------------------------------------------------------- */
+
+/**************************** AAC encoder library ******************************
+
+   Author(s):   M. Lohwasser
+
+   Description:
+
+*******************************************************************************/
+
+/**
+ * \file   aacenc_lib.h
+ * \brief  FDK AAC Encoder library interface header file.
+ *
+\mainpage  Introduction
+
+\section Scope
+
+This document describes the high-level interface and usage of the ISO/MPEG-2/4
+AAC Encoder library developed by the Fraunhofer Institute for Integrated
+Circuits (IIS).
+
+The library implements encoding on the basis of the MPEG-2 and MPEG-4 AAC
+Low-Complexity standard, and depending on the library's configuration, MPEG-4
+High-Efficiency AAC v2 and/or AAC-ELD standard.
+
+All references to SBR (Spectral Band Replication) are only applicable to HE-AAC
+or AAC-ELD versions of the library. All references to PS (Parametric Stereo) are
+only applicable to HE-AAC v2 versions of the library.
+
+\section encBasics Encoder Basics
+
+This document can only give a rough overview about the ISO/MPEG-2 and ISO/MPEG-4
+AAC audio coding standard. To understand all the terms in this document, you are
+encouraged to read the following documents.
+
+- ISO/IEC 13818-7 (MPEG-2 AAC), which defines the syntax of MPEG-2 AAC audio
+bitstreams.
+- ISO/IEC 14496-3 (MPEG-4 AAC, subparts 1 and 4), which defines the syntax of
+MPEG-4 AAC audio bitstreams.
+- Lutzky, Schuller, Gayer, Kr&auml;mer, Wabnik, "A guideline to audio codec
+delay", 116th AES Convention, May 8, 2004
+
+MPEG Advanced Audio Coding is based on a time-to-frequency mapping of the
+signal. The signal is partitioned into overlapping portions and transformed into
+frequency domain. The spectral components are then quantized and coded. \n An
+MPEG-2 or MPEG-4 AAC audio bitstream is composed of frames. Contrary to MPEG-1/2
+Layer-3 (mp3), the length of individual frames is not restricted to a fixed
+number of bytes, but can take on any length between 1 and 768 bytes.
+
+
+\page LIBUSE Library Usage
+
+\section InterfaceDescription API Files
+
+All API header files are located in the folder /include of the release package.
+All header files are provided for usage in C/C++ programs. The AAC encoder
+library API functions are located in aacenc_lib.h.
+
+\section CallingSequence Calling Sequence
+
+For encoding of ISO/MPEG-2/4 AAC bitstreams the following sequence is mandatory.
+Input read and output write functions as well as the corresponding open and
+close functions are left out, since they may be implemented differently
+according to the user's specific requirements. The example implementation uses
+file-based input/output.
+
+-# Call aacEncOpen() to allocate encoder instance with required \ref encOpen
+"configuration". \code HANDLE_AACENCODER hAacEncoder = NULL; if ( (ErrorStatus =
+aacEncOpen(&hAacEncoder,0,0)) != AACENC_OK ) { \endcode
+-# Call aacEncoder_SetParam() for each parameter to be set. AOT, samplingrate,
+channelMode, bitrate and transport type are \ref encParams "mandatory". \code
+ErrorStatus = aacEncoder_SetParam(hAacEncoder, parameter, value);
+\endcode
+-# Call aacEncEncode() with NULL parameters to \ref encReconf "initialize"
+encoder instance with present parameter set. \code ErrorStatus =
+aacEncEncode(hAacEncoder, NULL, NULL, NULL, NULL); \endcode
+-# Call aacEncInfo() to retrieve a configuration data block to be transmitted
+out of band. This is required when using RFC3640 or RFC3016 like transport.
+\code
+AACENC_InfoStruct encInfo;
+aacEncInfo(hAacEncoder, &encInfo);
+\endcode
+-# Encode input audio data in loop.
+\code
+do
+{
+\endcode
+Feed \ref feedInBuf "input buffer" with new audio data and provide input/output
+\ref bufDes "arguments" to aacEncEncode(). \code ErrorStatus =
+aacEncEncode(hAacEncoder, &inBufDesc, &outBufDesc, &inargs, &outargs); \endcode
+Write \ref writeOutData "output data" to file or audio device.
+\code
+} while (ErrorStatus==AACENC_OK);
+\endcode
+-# Call aacEncClose() and destroy encoder instance.
+\code
+aacEncClose(&hAacEncoder);
+\endcode
+
+
+\section encOpen Encoder Instance Allocation
+
+The assignment of the aacEncOpen() function is very flexible and can be used in
+the following way.
+- If the amount of memory consumption is not an issue, the encoder instance can
+be allocated for the maximum number of possible audio channels (for example 6 or
+8) with the full functional range supported by the library. This is the default
+open procedure for the AAC encoder if memory consumption does not need to be
+minimized. \code aacEncOpen(&hAacEncoder,0,0) \endcode
+- If the required MPEG-4 AOTs do not call for the full functional range of the
+library, encoder modules can be allocated selectively. \verbatim
+------------------------------------------------------
+ AAC | SBR |  PS | MD |         FLAGS         | value
+-----+-----+-----+----+-----------------------+-------
+  X  |  -  |  -  |  - | (0x01)                |  0x01
+  X  |  X  |  -  |  - | (0x01|0x02)           |  0x03
+  X  |  X  |  X  |  - | (0x01|0x02|0x04)      |  0x07
+  X  |  -  |  -  |  X | (0x01          |0x10) |  0x11
+  X  |  X  |  -  |  X | (0x01|0x02     |0x10) |  0x13
+  X  |  X  |  X  |  X | (0x01|0x02|0x04|0x10) |  0x17
+------------------------------------------------------
+ - AAC: Allocate AAC Core Encoder module.
+ - SBR: Allocate Spectral Band Replication module.
+ - PS: Allocate Parametric Stereo module.
+ - MD: Allocate Meta Data module within AAC encoder.
+\endverbatim
+\code aacEncOpen(&hAacEncoder,value,0) \endcode
+- Specifying the maximum number of channels to be supported in the encoder
+instance can be done as follows.
+ - For example allocate an encoder instance which supports 2 channels for all
+supported AOTs. The library itself may be capable of encoding up to 6 or 8
+channels but in this example only 2 channel encoding is required and thus only
+buffers for 2 channels are allocated to save data memory. \code
+aacEncOpen(&hAacEncoder,0,2) \endcode
+ - Additionally the maximum number of supported channels in the SBR module can
+be denoted separately.\n In this example the encoder instance provides a maximum
+of 6 channels out of which up to 2 channels support SBR. This encoder instance
+can produce for example 5.1 channel AAC-LC streams or stereo HE-AAC (v2)
+streams. HE-AAC 5.1 multi channel is not possible since only 2 out of 6 channels
+support SBR, which saves data memory. \code aacEncOpen(&hAacEncoder,0,6|(2<<8))
+\endcode \n
+
+\section bufDes Input/Output Arguments
+
+\subsection allocIOBufs Provide Buffer Descriptors
+In the present encoder API, the input and output buffers are described with \ref
+AACENC_BufDesc "buffer descriptors". This mechanism allows a flexible handling
+of input and output buffers without impact to the actual encoding call. Optional
+buffers are necessary e.g. for ancillary data, meta data input or additional
+output buffers describing superframing data in DAB+ or DRM+.\n At least one
+input buffer for audio input data and one output buffer for bitstream data must
+be allocated. The input buffer size can be a user defined multiple of the number
+of input channels. PCM input data will be copied from the user defined PCM
+buffer to an internal input buffer and so input data can be less than one AAC
+audio frame. The output buffer size should be 6144 bits per channel excluding
+the LFE channel. If the output data does not fit into the provided buffer, an
+AACENC_ERROR will be returned by aacEncEncode(). \code static INT_PCM
+inputBuffer[8*2048]; static UCHAR            ancillaryBuffer[50]; static
+AACENC_MetaData  metaDataSetup; static UCHAR            outputBuffer[8192];
+\endcode
+
+All input and output buffer must be clustered in input and output buffer arrays.
+\code
+static void* inBuffer[]        = { inputBuffer, ancillaryBuffer, &metaDataSetup
+}; static INT   inBufferIds[]     = { IN_AUDIO_DATA, IN_ANCILLRY_DATA,
+IN_METADATA_SETUP }; static INT   inBufferSize[]    = { sizeof(inputBuffer),
+sizeof(ancillaryBuffer), sizeof(metaDataSetup) }; static INT   inBufferElSize[]
+= { sizeof(INT_PCM), sizeof(UCHAR), sizeof(AACENC_MetaData) };
+
+static void* outBuffer[]       = { outputBuffer };
+static INT   outBufferIds[]    = { OUT_BITSTREAM_DATA };
+static INT   outBufferSize[]   = { sizeof(outputBuffer) };
+static INT   outBufferElSize[] = { sizeof(UCHAR) };
+\endcode
+
+Allocate buffer descriptors
+\code
+AACENC_BufDesc inBufDesc;
+AACENC_BufDesc outBufDesc;
+\endcode
+
+Initialize input buffer descriptor
+\code
+inBufDesc.numBufs            = sizeof(inBuffer)/sizeof(void*);
+inBufDesc.bufs              = (void**)&inBuffer;
+inBufDesc.bufferIdentifiers = inBufferIds;
+inBufDesc.bufSizes          = inBufferSize;
+inBufDesc.bufElSizes        = inBufferElSize;
+\endcode
+
+Initialize output buffer descriptor
+\code
+outBufDesc.numBufs           = sizeof(outBuffer)/sizeof(void*);
+outBufDesc.bufs              = (void**)&outBuffer;
+outBufDesc.bufferIdentifiers = outBufferIds;
+outBufDesc.bufSizes          = outBufferSize;
+outBufDesc.bufElSizes        = outBufferElSize;
+\endcode
+
+\subsection argLists Provide Input/Output Argument Lists
+The input and output arguments of an aacEncEncode() call are described in
+argument structures. \code AACENC_InArgs     inargs; AACENC_OutArgs    outargs;
+\endcode
+
+\section feedInBuf Feed Input Buffer
+The input buffer should be handled as a modulo buffer. New audio data in the
+form of pulse-code- modulated samples (PCM) must be read from external and be
+fed to the input buffer depending on its fill level. The required sample bitrate
+(represented by the data type INT_PCM which is 16, 24 or 32 bits wide) is fixed
+and depends on library configuration (usually 16 bit). \code inargs.numInSamples
++= WAV_InputRead ( wavIn, &inputBuffer[inargs.numInSamples],
+                                       FDKmin(encInfo.inputChannels*encInfo.frameLength,
+                                              sizeof(inputBuffer) /
+                                              sizeof(INT_PCM)-inargs.numInSamples),
+                                       SAMPLE_BITS
+                                     );
+\endcode
+
+After the encoder's internal buffer is fed with incoming audio samples, and
+aacEncEncode() processed the new input data, update/move remaining samples in
+input buffer, simulating a modulo buffer: \code if (outargs.numInSamples>0) {
+    FDKmemmove( inputBuffer,
+                &inputBuffer[outargs.numInSamples],
+                sizeof(INT_PCM)*(inargs.numInSamples-outargs.numInSamples) );
+    inargs.numInSamples -= outargs.numInSamples;
+}
+\endcode
+
+\section writeOutData Output Bitstream Data
+If any AAC bitstream data is available, write it to output file or device as
+follows. \code if (outargs.numOutBytes>0) { FDKfwrite(outputBuffer,
+outargs.numOutBytes, 1, pOutFile);
+}
+\endcode
+
+\section cfgMetaData Meta Data Configuration
+
+If the present library is configured with Metadata support, it is possible to
+insert meta data side info into the generated audio bitstream while encoding.
+
+To work with meta data the encoder instance has to be \ref encOpen "allocated"
+with meta data support. The meta data mode must be configured with the
+::AACENC_METADATA_MODE parameter and aacEncoder_SetParam() function. \code
+aacEncoder_SetParam(hAacEncoder, AACENC_METADATA_MODE, 0-3); \endcode
+
+This configuration indicates how to embed meta data into bitstrem. Either no
+insertion, MPEG or ETSI style. The meta data itself must be specified within the
+meta data setup structure AACENC_MetaData.
+
+Changing one of the AACENC_MetaData setup parameters can be achieved from
+outside the library within ::IN_METADATA_SETUP input buffer. There is no need to
+supply meta data setup structure every frame. If there is no new meta setup data
+available, the encoder uses the previous setup or the default configuration in
+initial state.
+
+In general the audio compressor and limiter within the encoder library can be
+configured with the ::AACENC_METADATA_DRC_PROFILE parameter
+AACENC_MetaData::drc_profile and and AACENC_MetaData::comp_profile.
+\n
+
+\section encReconf Encoder Reconfiguration
+
+The encoder library allows reconfiguration of the encoder instance with new
+settings continuously between encoding frames. Each parameter to be changed must
+be set with a single aacEncoder_SetParam() call. The internal status of each
+parameter can be retrieved with an aacEncoder_GetParam() call.\n There is no
+stand-alone reconfiguration function available. When parameters were modified
+from outside the library, an internal control mechanism triggers the necessary
+reconfiguration process which will be applied at the beginning of the following
+aacEncEncode() call. This state can be observed from external via the
+AACENC_INIT_STATUS and aacEncoder_GetParam() function. The reconfiguration
+process can also be applied immediately when all parameters of an aacEncEncode()
+call are NULL with a valid encoder handle.\n\n The internal reconfiguration
+process can be controlled from extern with the following access. \code
+aacEncoder_SetParam(hAacEncoder, AACENC_CONTROL_STATE, AACENC_CTRLFLAGS);
+\endcode
+
+
+\section encParams Encoder Parametrization
+
+All parameteres listed in ::AACENC_PARAM can be modified within an encoder
+instance.
+
+\subsection encMandatory Mandatory Encoder Parameters
+The following parameters must be specified when the encoder instance is
+initialized. \code aacEncoder_SetParam(hAacEncoder, AACENC_AOT, value);
+aacEncoder_SetParam(hAacEncoder, AACENC_BITRATE, value);
+aacEncoder_SetParam(hAacEncoder, AACENC_SAMPLERATE, value);
+aacEncoder_SetParam(hAacEncoder, AACENC_CHANNELMODE, value);
+\endcode
+Beyond that is an internal auto mode which preinitizializes the ::AACENC_BITRATE
+parameter if the parameter was not set from extern. The bitrate depends on the
+number of effective channels and sampling rate and is determined as follows.
+\code
+AAC-LC (AOT_AAC_LC): 1.5 bits per sample
+HE-AAC (AOT_SBR): 0.625 bits per sample (dualrate sbr)
+HE-AAC (AOT_SBR): 1.125 bits per sample (downsampled sbr)
+HE-AAC v2 (AOT_PS): 0.5 bits per sample
+\endcode
+
+\subsection channelMode Channel Mode Configuration
+The input audio data is described with the ::AACENC_CHANNELMODE parameter in the
+aacEncoder_SetParam() call. It is not possible to use the encoder instance with
+a 'number of input channels' argument. Instead, the channelMode must be set as
+follows. \code aacEncoder_SetParam(hAacEncoder, AACENC_CHANNELMODE, value);
+\endcode The parameter is specified in ::CHANNEL_MODE and can be mapped from the
+number of input channels in the following way. \code CHANNEL_MODE chMode =
+MODE_INVALID;
+
+switch (nChannels) {
+  case 1:  chMode = MODE_1;          break;
+  case 2:  chMode = MODE_2;          break;
+  case 3:  chMode = MODE_1_2;        break;
+  case 4:  chMode = MODE_1_2_1;      break;
+  case 5:  chMode = MODE_1_2_2;      break;
+  case 6:  chMode = MODE_1_2_2_1;    break;
+  case 7:  chMode = MODE_6_1;        break;
+  case 8:  chMode = MODE_7_1_BACK;   break;
+  default:
+    chMode = MODE_INVALID;
+}
+return chMode;
+\endcode
+
+\subsection peakbitrate Peak Bitrate Configuration
+In AAC, the default bitreservoir configuration depends on the chosen bitrate per
+frame and the number of effective channels. The size can be determined as below.
+\f[
+bitreservoir = nEffChannels*6144 - (bitrate*framelength/samplerate)
+\f]
+Due to audio quality concerns it is not recommended to change the bitreservoir
+size to a lower value than the default setting! However, for minimizing the
+delay for streaming applications or for achieving a constant size of the
+bitstream packages in each frame, it may be necessaray to limit the maximum bits
+per frame size. This can be done with the ::AACENC_PEAK_BITRATE parameter. \code
+aacEncoder_SetParam(hAacEncoder, AACENC_PEAK_BITRATE, value);
+\endcode
+
+To achieve acceptable audio quality with a reduced bitreservoir size setting at
+least 1000 bits per audio channel is recommended. For a multichannel audio file
+with 5.1 channels the bitreservoir reduced to 5000 bits results in acceptable
+audio quality.
+
+
+\subsection vbrmode Variable Bitrate Mode
+The variable bitrate (VBR) mode coding adapts the bit consumption to the
+psychoacoustic requirements of the signal. The encoder ignores the user-defined
+bit rate and selects a suitable pre-defined configuration based on the provided
+AOT. The VBR mode 1 is tuned for HE-AACv2, for VBR mode 2, HE-AACv1 should be
+used. VBR modes 3-5 should be used with Low-Complexity AAC. When encoding
+AAC-ELD, the best mode is selected automatically.
+
+The bitrates given in the table are averages over time and different encoder
+settings. They strongly depend on the type of audio signal. The VBR
+configurations can be adjusted with the ::AACENC_BITRATEMODE encoder parameter.
+\verbatim
+-----------------------------------------------
+ VBR_MODE | Approx. Bitrate in kbps for stereo
+          |     AAC-LC    |      AAC-ELD
+----------+---------------+--------------------
+    VBR_1 | 32 (HE-AACv2) |         48
+    VBR_2 | 72 (HE-AACv1) |         56
+    VBR_3 |      112      |         72
+    VBR_4 |      148      |        148
+    VBR_5 |      228      |        224
+--------------------------------------------
+\endverbatim
+Note that these figures are valid for stereo encoding only. VBR modes 2-5 will
+yield much lower bit rates when encoding single-channel input. For
+configurations which are making use of downmix modules the AAC core channels
+respectively downmix channels shall be considered.
+
+\subsection encQual Audio Quality Considerations
+The default encoder configuration is suggested to be used. Encoder tools such as
+TNS and PNS are activated by default and are internally controlled (see \ref
+BEHAVIOUR_TOOLS).
+
+There is an additional quality parameter called ::AACENC_AFTERBURNER. In the
+default configuration this quality switch is deactivated because it would cause
+a workload increase which might be significant. If workload is not an issue in
+the application we recommended to activate this feature. \code
+aacEncoder_SetParam(hAacEncoder, AACENC_AFTERBURNER, 0/1); \endcode
+
+\subsection encELD ELD Auto Configuration Mode
+For ELD configuration a so called auto configurator is available which
+configures SBR and the SBR ratio by itself. The configurator is used when the
+encoder parameter ::AACENC_SBR_MODE and ::AACENC_SBR_RATIO are not set
+explicitly.
+
+Based on sampling rate and chosen bitrate a reasonable SBR configuration will be
+used. \verbatim
+------------------------------------------------------------------
+ Sampling Rate |   Total Bitrate | No. of | SBR |       SBR Ratio
+     [kHz]     |      [bit/s]    |  Chan  |     |
+               |                 |        |     |
+---------------+-----------------+--------+-----+-----------------
+     ]min, 16[ |    min -    max |      1 | off |             ---
+---------------+-----------------+--------------+-----------------
+          [16] |    min -  27999 |      1 |  on | downsampled SBR
+               |  28000 -    max |      1 | off |             ---
+---------------+-----------------+--------------+-----------------
+     ]16 - 24] |    min -  39999 |      1 |  on | downsampled SBR
+               |  40000 -    max |      1 | off |             ---
+---------------+-----------------+--------------+-----------------
+     ]24 - 32] |    min -  27999 |      1 |  on |    dualrate SBR
+               |  28000 -  55999 |      1 |  on | downsampled SBR
+               |  56000 -    max |      1 | off |             ---
+---------------+-----------------+--------------+-----------------
+   ]32 - 44.1] |    min -  63999 |      1 |  on |    dualrate SBR
+               |  64000 -    max |      1 | off |             ---
+---------------+-----------------+--------------+-----------------
+   ]44.1 - 48] |    min -  63999 |      1 |  on |    dualrate SBR
+               |  64000 -  max   |      1 | off |             ---
+               |                 |        |     |
+---------------+-----------------+--------+-----+-----------------
+     ]min, 16[ |    min -    max |      2 | off |             ---
+---------------+-----------------+--------------+-----------------
+          [16] |    min -  31999 |      2 |  on | downsampled SBR
+               |  32000 -  63999 |      2 |  on | downsampled SBR
+               |  64000 -    max |      2 | off |             ---
+---------------+-----------------+--------------+-----------------
+     ]16 - 24] |    min -  47999 |      2 |  on | downsampled SBR
+               |  48000 -  79999 |      2 |  on | downsampled SBR
+               |  80000 -    max |      2 | off |             ---
+---------------+-----------------+--------------+-----------------
+     ]24 - 32] |    min -  31999 |      2 |  on |    dualrate SBR
+               |  32000 -  67999 |      2 |  on |    dualrate SBR
+               |  68000 -  95999 |      2 |  on | downsampled SBR
+               |  96000 -    max |      2 | off |             ---
+---------------+-----------------+--------------+-----------------
+   ]32 - 44.1] |    min -  43999 |      2 |  on |    dualrate SBR
+               |  44000 - 127999 |      2 |  on |    dualrate SBR
+               | 128000 -    max |      2 | off |             ---
+---------------+-----------------+--------------+-----------------
+   ]44.1 - 48] |    min -  43999 |      2 |  on |    dualrate SBR
+               |  44000 - 127999 |      2 |  on |    dualrate SBR
+               | 128000 -  max   |      2 | off |             ---
+               |                 |              |
+------------------------------------------------------------------
+\endverbatim
+
+\subsection encDsELD Reduced Delay (Downscaled) Mode
+The downscaled mode of AAC-ELD reduces the algorithmic delay of AAC-ELD by
+virtually increasing the sampling rate. When using the downscaled mode, the
+bitrate should be increased for keeping the same audio quality level. For common
+signals, the bitrate should be increased by 25% for a downscale factor of 2.
+
+Currently, downscaling factors 2 and 4 are supported.
+To enable the downscaled mode in the encoder, the framelength parameter
+AACENC_GRANULE_LENGTH must be set accordingly to 256 or 240 for a downscale
+factor of 2 or 128 or 120 for a downscale factor of 4. The default values of 512
+or 480 mean that no downscaling is applied. \code
+aacEncoder_SetParam(hAacEncoder, AACENC_GRANULE_LENGTH, 256);
+aacEncoder_SetParam(hAacEncoder, AACENC_GRANULE_LENGTH, 128);
+\endcode
+
+Downscaled bitstreams are fully backwards compatible. However, the legacy
+decoder needs to support high sample rate, e.g. 96kHz. The signaled sampling
+rate is multiplied by the downscale factor. Although not required, downscaling
+should be applied when decoding downscaled bitstreams. It reduces CPU workload
+and the output will have the same sampling rate as the input. In an ideal
+configuration both encoder and decoder should run with the same downscale
+factor.
+
+The following table shows approximate filter bank delays in ms for common
+sampling rates(sr) at framesize(fs), and downscale factor(dsf), based on this
+formula: \f[ 1000 * fs / (dsf * sr) \f]
+
+\verbatim
+--------------------------------------
+      | 512/2 | 512/4 | 480/2 | 480/4
+------+-------+-------+-------+-------
+22050 | 17.41 |  8.71 | 16.33 |  8.16
+32000 | 12.00 |  6.00 | 11.25 |  5.62
+44100 |  8.71 |  4.35 |  8.16 |  4.08
+48000 |  8.00 |  4.00 |  7.50 |  3.75
+--------------------------------------
+\endverbatim
+
+\section audiochCfg Audio Channel Configuration
+The MPEG standard refers often to the so-called Channel Configuration. This
+Channel Configuration is used for a fixed Channel Mapping. The configurations
+1-7 and 11,12,14 are predefined in MPEG standard and used for implicit
+signalling within the encoded bitstream. For user defined Configurations the
+Channel Configuration is set to 0 and the Channel Mapping must be explecitly
+described with an appropriate Program Config Element. The present Encoder
+implementation does not allow the user to configure this Channel Configuration
+from extern. The Encoder implementation supports fixed Channel Modes which are
+mapped to Channel Configuration as follow. \verbatim
+----------------------------------------------------------------------------------------
+ ChannelMode           | ChCfg | Height | front_El      | side_El  | back_El  |
+lfe_El
+-----------------------+-------+--------+---------------+----------+----------+---------
+MODE_1                 |     1 | NORM   | SCE           |          |          |
+MODE_2                 |     2 | NORM   | CPE           |          |          |
+MODE_1_2               |     3 | NORM   | SCE, CPE      |          |          |
+MODE_1_2_1             |     4 | NORM   | SCE, CPE      |          | SCE      |
+MODE_1_2_2             |     5 | NORM   | SCE, CPE      |          | CPE      |
+MODE_1_2_2_1           |     6 | NORM   | SCE, CPE      |          | CPE      |
+LFE MODE_1_2_2_2_1         |     7 | NORM   | SCE, CPE, CPE |          | CPE
+| LFE MODE_6_1               |    11 | NORM   | SCE, CPE      |          | CPE,
+SCE | LFE MODE_7_1_BACK          |    12 | NORM   | SCE, CPE      |          |
+CPE, CPE | LFE
+-----------------------+-------+--------+---------------+----------+----------+---------
+MODE_7_1_TOP_FRONT     |    14 | NORM   | SCE, CPE      |          | CPE      |
+LFE |       | TOP    | CPE           |          |          |
+-----------------------+-------+--------+---------------+----------+----------+---------
+MODE_7_1_REAR_SURROUND |     0 | NORM   | SCE, CPE      |          | CPE, CPE |
+LFE MODE_7_1_FRONT_CENTER  |     0 | NORM   | SCE, CPE, CPE |          | CPE
+| LFE
+----------------------------------------------------------------------------------------
+- NORM: Normal Height Layer.     - TOP: Top Height Layer.  - BTM: Bottom Height
+Layer.
+- SCE: Single Channel Element.   - CPE: Channel Pair.      - LFE: Low Frequency
+Element. \endverbatim
+
+The Table describes all fixed Channel Elements for each Channel Mode which are
+assigned to a speaker arrangement. The arrangement includes front, side, back
+and lfe Audio Channel Elements in the normal height layer, possibly followed by
+front, side, and back elements in the top and bottom layer (Channel
+Configuration 14). \n This mapping of Audio Channel Elements is defined in MPEG
+standard for Channel Config 1-7 and 11,12,14.\n In case of Channel Config 0 or
+writing matrix mixdown coefficients, the encoder enables the writing of Program
+Config Element itself as described in \ref encPCE. The configuration used in
+Program Config Element refers to the denoted Table.\n Beside the Channel Element
+assignment the Channel Modes are resposible for audio input data channel
+mapping. The Channel Mapping of the audio data depends on the selected
+::AACENC_CHANNELORDER which can be MPEG or WAV like order.\n Following table
+describes the complete channel mapping for both Channel Order configurations.
+\verbatim
+---------------------------------------------------------------------------------------
+ChannelMode            |  MPEG-Channelorder            |  WAV-Channelorder
+-----------------------+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---
+MODE_1                 | 0 |   |   |   |   |   |   |   | 0 |   |   |   |   |   |
+| MODE_2                 | 0 | 1 |   |   |   |   |   |   | 0 | 1 |   |   |   |
+|   | MODE_1_2               | 0 | 1 | 2 |   |   |   |   |   | 2 | 0 | 1 |   |
+|   |   | MODE_1_2_1             | 0 | 1 | 2 | 3 |   |   |   |   | 2 | 0 | 1 | 3
+|   |   |   | MODE_1_2_2             | 0 | 1 | 2 | 3 | 4 |   |   |   | 2 | 0 | 1
+| 3 | 4 |   |   | MODE_1_2_2_1           | 0 | 1 | 2 | 3 | 4 | 5 |   |   | 2 | 0
+| 1 | 4 | 5 | 3 |   | MODE_1_2_2_2_1         | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 2
+| 6 | 7 | 0 | 1 | 4 | 5 | 3 MODE_6_1               | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
+| 2 | 0 | 1 | 4 | 5 | 6 | 3 | MODE_7_1_BACK          | 0 | 1 | 2 | 3 | 4 | 5 | 6
+| 7 | 2 | 0 | 1 | 6 | 7 | 4 | 5 | 3 MODE_7_1_TOP_FRONT     | 0 | 1 | 2 | 3 | 4 |
+5 | 6 | 7 | 2 | 0 | 1 | 4 | 5 | 3 | 6 | 7
+-----------------------+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---
+MODE_7_1_REAR_SURROUND | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 2 | 0 | 1 | 6 | 7 | 4 |
+5 | 3 MODE_7_1_FRONT_CENTER  | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 2 | 6 | 7 | 0 | 1
+| 4 | 5 | 3
+---------------------------------------------------------------------------------------
+\endverbatim
+
+The denoted mapping is important for correct audio channel assignment when using
+MPEG or WAV ordering. The incoming audio channels are distributed MPEG like
+starting at the front channels and ending at the back channels. The distribution
+is used as described in Table concering Channel Config and fix channel elements.
+Please see the following example for clarification.
+
+\verbatim
+Example: MODE_1_2_2_1 - WAV-Channelorder 5.1
+------------------------------------------
+ Input Channel      | Coder Channel
+--------------------+---------------------
+ 2 (front center)   | 0 (SCE channel)
+ 0 (left center)    | 1 (1st of 1st CPE)
+ 1 (right center)   | 2 (2nd of 1st CPE)
+ 4 (left surround)  | 3 (1st of 2nd CPE)
+ 5 (right surround) | 4 (2nd of 2nd CPE)
+ 3 (LFE)            | 5 (LFE)
+------------------------------------------
+\endverbatim
+
+
+\section suppBitrates Supported Bitrates
+
+The FDK AAC Encoder provides a wide range of supported bitrates.
+The minimum and maximum allowed bitrate depends on the Audio Object Type. For
+AAC-LC the minimum bitrate is the bitrate that is required to write the most
+basic and minimal valid bitstream. It consists of the bitstream format header
+information and other static/mandatory information within the AAC payload. The
+maximum AAC framesize allowed by the MPEG-4 standard determines the maximum
+allowed bitrate for AAC-LC. For HE-AAC and HE-AAC v2 a library internal look-up
+table is used.
+
+A good working point in terms of audio quality, sampling rate and bitrate, is at
+1 to 1.5 bits/audio sample for AAC-LC, 0.625 bits/audio sample for dualrate
+HE-AAC, 1.125 bits/audio sample for downsampled HE-AAC and 0.5 bits/audio sample
+for HE-AAC v2. For example for one channel with a sampling frequency of 48 kHz,
+the range from 48 kbit/s to 72 kbit/s achieves reasonable audio quality for
+AAC-LC.
+
+For HE-AAC and HE-AAC v2 the lowest possible audio input sampling frequency is
+16 kHz because then the AAC-LC core encoder operates in dual rate mode at its
+lowest possible sampling frequency, which is 8 kHz. HE-AAC v2 requires stereo
+input audio data.
+
+Please note that in HE-AAC or HE-AAC v2 mode the encoder supports much higher
+bitrates than are appropriate for HE-AAC or HE-AAC v2. For example, at a bitrate
+of more than 64 kbit/s for a stereo audio signal at 44.1 kHz it usually makes
+sense to use AAC-LC, which will produce better audio quality at that bitrate
+than HE-AAC or HE-AAC v2.
+
+\section reommendedConfig Recommended Sampling Rate and Bitrate Combinations
+
+The following table provides an overview of recommended encoder configuration
+parameters which we determined by virtue of numerous listening tests.
+
+\subsection reommendedConfigLC AAC-LC, HE-AAC, HE-AACv2 in Dualrate SBR mode.
+\verbatim
+-----------------------------------------------------------------------------------
+Audio Object Type  |  Bit Rate Range  |            Supported  | Preferred  | No.
+of |         [bit/s]  |       Sampling Rates  |    Sampl.  |  Chan. |
+|                [kHz]  |      Rate  | |                  |
+|     [kHz]  |
+-------------------+------------------+-----------------------+------------+-------
+AAC LC + SBR + PS  |   8000 -  11999  |         22.05, 24.00  |     24.00  | 2
+AAC LC + SBR + PS  |  12000 -  17999  |                32.00  |     32.00  | 2
+AAC LC + SBR + PS  |  18000 -  39999  |  32.00, 44.10, 48.00  |     44.10  | 2
+AAC LC + SBR + PS  |  40000 -  64000  |  32.00, 44.10, 48.00  |     48.00  | 2
+-------------------+------------------+-----------------------+------------+-------
+AAC LC + SBR       |   8000 -  11999  |         22.05, 24.00  |     24.00  | 1
+AAC LC + SBR       |  12000 -  17999  |                32.00  |     32.00  | 1
+AAC LC + SBR       |  18000 -  39999  |  32.00, 44.10, 48.00  |     44.10  | 1
+AAC LC + SBR       |  40000 -  64000  |  32.00, 44.10, 48.00  |     48.00  | 1
+-------------------+------------------+-----------------------+------------+-------
+AAC LC + SBR       |  16000 -  27999  |  32.00, 44.10, 48.00  |     32.00  | 2
+AAC LC + SBR       |  28000 -  63999  |  32.00, 44.10, 48.00  |     44.10  | 2
+AAC LC + SBR       |  64000 - 128000  |  32.00, 44.10, 48.00  |     48.00  | 2
+-------------------+------------------+-----------------------+------------+-------
+AAC LC + SBR       |  64000 -  69999  |  32.00, 44.10, 48.00  |     32.00  |
+5, 5.1 AAC LC + SBR       |  70000 - 239999  |  32.00, 44.10, 48.00  |     44.10
+| 5, 5.1 AAC LC + SBR       | 240000 - 319999  |  32.00, 44.10, 48.00  |
+48.00  | 5, 5.1
+-------------------+------------------+-----------------------+------------+-------
+AAC LC             |   8000 -  15999  | 11.025, 12.00, 16.00  |     12.00  | 1
+AAC LC             |  16000 -  23999  |                16.00  |     16.00  | 1
+AAC LC             |  24000 -  31999  |  16.00, 22.05, 24.00  |     24.00  | 1
+AAC LC             |  32000 -  55999  |                32.00  |     32.00  | 1
+AAC LC             |  56000 - 160000  |  32.00, 44.10, 48.00  |     44.10  | 1
+AAC LC             | 160001 - 288000  |                48.00  |     48.00  | 1
+-------------------+------------------+-----------------------+------------+-------
+AAC LC             |  16000 -  23999  | 11.025, 12.00, 16.00  |     12.00  | 2
+AAC LC             |  24000 -  31999  |                16.00  |     16.00  | 2
+AAC LC             |  32000 -  39999  |  16.00, 22.05, 24.00  |     22.05  | 2
+AAC LC             |  40000 -  95999  |                32.00  |     32.00  | 2
+AAC LC             |  96000 - 111999  |  32.00, 44.10, 48.00  |     32.00  | 2
+AAC LC             | 112000 - 320001  |  32.00, 44.10, 48.00  |     44.10  | 2
+AAC LC             | 320002 - 576000  |                48.00  |     48.00  | 2
+-------------------+------------------+-----------------------+------------+-------
+AAC LC             | 160000 - 239999  |                32.00  |     32.00  |
+5, 5.1 AAC LC             | 240000 - 279999  |  32.00, 44.10, 48.00  |     32.00
+| 5, 5.1 AAC LC             | 280000 - 800000  |  32.00, 44.10, 48.00  |
+44.10  | 5, 5.1
+-----------------------------------------------------------------------------------
+\endverbatim \n
+
+\subsection reommendedConfigLD AAC-LD, AAC-ELD, AAC-ELD with SBR in Dualrate SBR
+mode. Unlike to HE-AAC configuration the SBR is not covered by ELD audio object
+type and needs to be enabled explicitly. Use ::AACENC_SBR_MODE to configure SBR
+and its samplingrate ratio with ::AACENC_SBR_RATIO parameter. \verbatim
+-----------------------------------------------------------------------------------
+Audio Object Type  |  Bit Rate Range  |            Supported  | Preferred  | No.
+of |         [bit/s]  |       Sampling Rates  |    Sampl.  |  Chan. |
+|                [kHz]  |      Rate  | |                  |
+|     [kHz]  |
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          |  18000 -  24999  |        32.00 - 44.10  |     32.00  | 1
+ELD + SBR          |  25000 -  31999  |        32.00 - 48.00  |     32.00  | 1
+ELD + SBR          |  32000 -  64000  |        32.00 - 48.00  |     48.00  | 1
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          |  32000 -  51999  |        32.00 - 48.00  |     44.10  | 2
+ELD + SBR          |  52000 - 128000  |        32.00 - 48.00  |     48.00  | 2
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          |  78000 - 160000  |        32.00 - 48.00  |     48.00  | 3
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          | 104000 - 212000  |        32.00 - 48.00  |     48.00  | 4
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          | 130000 - 246000  |        32.00 - 48.00  |     48.00  |
+5, 5.1
+-------------------+------------------+-----------------------+------------+-------
+LD, ELD            |  16000 -  19999  |        16.00 - 24.00  |     16.00  | 1
+LD, ELD            |  20000 -  39999  |        16.00 - 32.00  |     24.00  | 1
+LD, ELD            |  40000 -  49999  |        22.05 - 32.00  |     32.00  | 1
+LD, ELD            |  50000 -  61999  |        24.00 - 44.10  |     32.00  | 1
+LD, ELD            |  62000 -  84999  |        32.00 - 48.00  |     44.10  | 1
+LD, ELD            |  85000 - 192000  |        44.10 - 48.00  |     48.00  | 1
+-------------------+------------------+-----------------------+------------+-------
+LD, ELD            |  64000 -  75999  |        24.00 - 32.00  |     32.00  | 2
+LD, ELD            |  76000 -  97999  |        24.00 - 44.10  |     32.00  | 2
+LD, ELD            |  98000 - 135999  |        32.00 - 48.00  |     44.10  | 2
+LD, ELD            | 136000 - 384000  |        44.10 - 48.00  |     48.00  | 2
+-------------------+------------------+-----------------------+------------+-------
+LD, ELD            |  96000 - 113999  |        24.00 - 32.00  |     32.00  | 3
+LD, ELD            | 114000 - 146999  |        24.00 - 44.10  |     32.00  | 3
+LD, ELD            | 147000 - 203999  |        32.00 - 48.00  |     44.10  | 3
+LD, ELD            | 204000 - 576000  |        44.10 - 48.00  |     48.00  | 3
+-------------------+------------------+-----------------------+------------+-------
+LD, ELD            | 128000 - 151999  |        24.00 - 32.00  |     32.00  | 4
+LD, ELD            | 152000 - 195999  |        24.00 - 44.10  |     32.00  | 4
+LD, ELD            | 196000 - 271999  |        32.00 - 48.00  |     44.10  | 4
+LD, ELD            | 272000 - 768000  |        44.10 - 48.00  |     48.00  | 4
+-------------------+------------------+-----------------------+------------+-------
+LD, ELD            | 160000 - 189999  |        24.00 - 32.00  |     32.00  |
+5, 5.1 LD, ELD            | 190000 - 244999  |        24.00 - 44.10  |     32.00
+| 5, 5.1 LD, ELD            | 245000 - 339999  |        32.00 - 48.00  |
+44.10  | 5, 5.1 LD, ELD            | 340000 - 960000  |        44.10 - 48.00  |
+48.00  | 5, 5.1
+-----------------------------------------------------------------------------------
+\endverbatim \n
+
+\subsection reommendedConfigELD AAC-ELD with SBR in Downsampled SBR mode.
+\verbatim
+-----------------------------------------------------------------------------------
+Audio Object Type  |  Bit Rate Range  |            Supported  | Preferred  | No.
+of |         [bit/s]  |       Sampling Rates  |    Sampl.  |  Chan. |
+|                [kHz]  |      Rate  | |                  |
+|     [kHz]  |
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          |  18000 - 24999   |        16.00 - 22.05  |     22.05  | 1
+(downsampled SBR)  |  25000 - 31999   |        16.00 - 24.00  |     24.00  | 1
+                   |  32000 - 47999   |        22.05 - 32.00  |     32.00  | 1
+                   |  48000 - 64000   |        22.05 - 48.00  |     32.00  | 1
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          |  32000 - 51999   |        16.00 - 24.00  |     24.00  | 2
+(downsampled SBR)  |  52000 - 59999   |        22.05 - 24.00  |     24.00  | 2
+                   |  60000 - 95999   |        22.05 - 32.00  |     32.00  | 2
+                   |  96000 - 128000  |        22.05 - 48.00  |     32.00  | 2
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          |  78000 -  99999  |        22.05 - 24.00  |     24.00  | 3
+(downsampled SBR)  | 100000 - 143999  |        22.05 - 32.00  |     32.00  | 3
+                   | 144000 - 159999  |        22.05 - 48.00  |     32.00  | 3
+                   | 160000 - 192000  |        32.00 - 48.00  |     32.00  | 3
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          | 104000 - 149999  |        22.05 - 24.00  |     24.00  | 4
+(downsampled SBR)  | 150000 - 191999  |        22.05 - 32.00  |     32.00  | 4
+                   | 192000 - 211999  |        22.05 - 48.00  |     32.00  | 4
+                   | 212000 - 256000  |        32.00 - 48.00  |     32.00  | 4
+-------------------+------------------+-----------------------+------------+-------
+ELD + SBR          | 130000 - 171999  |        22.05 - 24.00  |     24.00  |
+5, 5.1 (downsampled SBR)  | 172000 - 239999  |        22.05 - 32.00  |     32.00
+| 5, 5.1 | 240000 - 320000  |        32.00 - 48.00  |     32.00  | 5, 5.1
+-----------------------------------------------------------------------------------
+\endverbatim \n
+
+\subsection reommendedConfigELDv2 AAC-ELD v2, AAC-ELD v2 with SBR.
+The ELD v2 212 configuration must be configured explicitly with
+::AACENC_CHANNELMODE parameter according MODE_212 value. SBR can be configured
+separately through ::AACENC_SBR_MODE and ::AACENC_SBR_RATIO parameter. Following
+configurations shall apply to both framelengths 480 and 512. For ELD v2
+configuration without SBR and framelength 480 the supported sampling rate is
+restricted to the range from 16 kHz up to 24 kHz. \verbatim
+-----------------------------------------------------------------------------------
+Audio Object Type  |  Bit Rate Range  |            Supported  | Preferred  | No.
+of |         [bit/s]  |       Sampling Rates  |    Sampl.  |  Chan. |
+|                [kHz]  |      Rate  | |                  |
+|     [kHz]  |
+-------------------+------------------+-----------------------+------------+-------
+ELD-212            |  16000 -  19999  |        16.00 - 24.00  |     16.00  | 2
+(without SBR)      |  20000 -  39999  |        16.00 - 32.00  |     24.00  | 2
+                   |  40000 -  49999  |        22.05 - 32.00  |     32.00  | 2
+                   |  50000 -  61999  |        24.00 - 44.10  |     32.00  | 2
+                   |  62000 -  84999  |        32.00 - 48.00  |     44.10  | 2
+                   |  85000 - 192000  |        44.10 - 48.00  |     48.00  | 2
+-------------------+------------------+-----------------------+------------+-------
+ELD-212 + SBR      |  18000 -  20999  |                32.00  |     32.00  | 2
+(dualrate SBR)     |  21000 -  25999  |        32.00 - 44.10  |     32.00  | 2
+                   |  26000 -  31999  |        32.00 - 48.00  |     44.10  | 2
+                   |  32000 -  64000  |        32.00 - 48.00  |     48.00  | 2
+-------------------+------------------+-----------------------+------------+-------
+ELD-212 + SBR      |  18000 -  19999  |        16.00 - 22.05  |     22.05  | 2
+(downsampled SBR)  |  20000 -  24999  |        16.00 - 24.00  |     22.05  | 2
+                   |  25000 -  31999  |        16.00 - 24.00  |     24.00  | 2
+                   |  32000 -  64000  |        24.00 - 24.00  |     24.00  | 2
+-------------------+------------------+-----------------------+------------+-------
+\endverbatim \n
+
+\page ENCODERBEHAVIOUR Encoder Behaviour
+
+\section BEHAVIOUR_BANDWIDTH Bandwidth
+
+The FDK AAC encoder usually does not use the full frequency range of the input
+signal, but restricts the bandwidth according to certain library-internal
+settings. They can be changed in the table "bandWidthTable" in the file
+bandwidth.cpp (if available).
+
+The encoder API provides the ::AACENC_BANDWIDTH parameter to adjust the
+bandwidth explicitly. \code aacEncoder_SetParam(hAacEncoder, AACENC_BANDWIDTH,
+value); \endcode
+
+However it is not recommended to change these settings, because they are based
+on numerous listening tests and careful tweaks to ensure the best overall
+encoding quality. Also, the maximum bandwidth that can be set manually by the
+user is 20kHz or fs/2, whichever value is smaller.
+
+Theoretically a signal of for example 48 kHz can contain frequencies up to 24
+kHz, but to use this full range in an audio encoder usually does not make sense.
+Usually the encoder has a very limited amount of bits to spend (typically 128
+kbit/s for stereo 48 kHz content) and to allow full range bandwidth would waste
+a lot of these bits for frequencies the human ear is hardly able to perceive
+anyway, if at all. Hence it is wise to use the available bits for the really
+important frequency range and just skip the rest. At lower bitrates (e. g. <= 80
+kbit/s for stereo 48 kHz content) the encoder will choose an even smaller
+bandwidth, because an encoded signal with smaller bandwidth and hence less
+artifacts sounds better than a signal with higher bandwidth but then more coding
+artefacts across all frequencies. These artefacts would occur if small bitrates
+and high bandwidths are chosen because the available bits are just not enough to
+encode all frequencies well.
+
+Unfortunately some people evaluate encoding quality based on possible bandwidth
+as well, but it is a double-edged sword considering the trade-off described
+above.
+
+Another aspect is workload consumption. The higher the allowed bandwidth, the
+more frequency lines have to be processed, which in turn increases the workload.
+
+\section FRAMESIZES_AND_BIT_RESERVOIR Frame Sizes & Bit Reservoir
+
+For AAC there is a difference between constant bit rate and constant frame
+length due to the so-called bit reservoir technique, which allows the encoder to
+use less bits in an AAC frame for those audio signal sections which are easy to
+encode, and then spend them at a later point in time for more complex audio
+sections. The extent to which this "bit exchange" is done is limited to allow
+for reliable and relatively low delay real time streaming. Therefore, for
+AAC-ELD, the bitreservoir is limited. It varies between 500 and 4000 bits/frame,
+depending on the bitrate/channel.
+- For a bitrate of 12kbps/channel and below, the AAC-ELD bitreservoir is 500
+bits/frame.
+- For a bitrate of 70kbps/channel and above, the AAC-ELD bitreservoir is 4000
+bits/frame.
+- Between 12kbps/channel and 70kbps/channel, the AAC-ELD bitrervoir is increased
+linearly.
+- For AAC-LC, the bitrate is only limited by the maximum AAC frame length. It
+is, regardless of the available bit reservoir, defined as 6144 bits per channel.
+
+Over a longer period in time the bitrate will be constant in the AAC constant
+bitrate mode, e.g. for ISDN transmission. This means that in AAC each bitstream
+frame will in general have a different length in bytes but over time it
+will reach the target bitrate.
+
+
+One could also make an MPEG compliant
+AAC encoder which always produces constant length packages for each AAC frame,
+but the audio quality would be considerably worse since the bit reservoir
+technique would have to be switched off completely. A higher bit rate would have
+to be used to get the same audio quality as with an enabled bit reservoir.
+
+For mp3 by the way, the same bit reservoir technique exists, but there each bit
+stream frame has a constant length for a given bit rate (ignoring the
+padding byte). In mp3 there is a so-called "back pointer" which tells
+the decoder which bits belong to the current mp3 frame - and in general some or
+many bits have been transmitted in an earlier mp3 frame. Basically this leads to
+the same "bit exchange between mp3 frames" as in AAC but with virtually constant
+length frames.
+
+This variable frame length at "constant bit rate" is not something special
+in this Fraunhofer IIS AAC encoder. AAC has been designed in that way.
+
+\subsection BEHAVIOUR_ESTIM_AVG_FRAMESIZES Estimating Average Frame Sizes
+
+A HE-AAC v1 or v2 audio frame contains 2048 PCM samples per channel.
+
+The number of HE-AAC frames \f$N\_FRAMES\f$ per second at 44.1 kHz is:
+
+\f[
+N\_FRAMES = 44100 / 2048 = 21.5332
+\f]
+
+At a bit rate of 8 kbps the average number of bits per frame
+\f$N\_BITS\_PER\_FRAME\f$ is:
+
+\f[
+N\_BITS\_PER\_FRAME = 8000 / 21.5332 = 371.52
+\f]
+
+which is about 46.44 bytes per encoded frame.
+
+At a bit rate of 32 kbps, which is quite high for single channel HE-AAC v1, it
+is:
+
+\f[
+N\_BITS\_PER\_FRAME = 32000 / 21.5332 = 1486
+\f]
+
+which is about 185.76 bytes per encoded frame.
+
+These bits/frame figures are average figures where each AAC frame generally has
+a different size in bytes. To calculate the same for AAC-LC just use 1024
+instead of 2048 PCM samples per frame and channel. For AAC-LD/ELD it is either
+480 or 512 PCM samples per frame and channel.
+
+
+\section BEHAVIOUR_TOOLS Encoder Tools
+
+The AAC encoder supports TNS, PNS, MS, Intensity and activates these tools
+depending on the audio signal and the encoder configuration (i.e. bitrate or
+AOT). It is not required to configure these tools manually.
+
+PNS improves encoding quality only for certain bitrates. Therefore it makes
+sense to activate PNS only for these bitrates and save the processing power
+required for PNS (about 10 % of the encoder) when using other bitrates. This is
+done automatically inside the encoder library. PNS is disabled inside the
+encoder library if an MPEG-2 AOT is choosen since PNS is an MPEG-4 AAC feature.
+
+If SBR is activated, the encoder automatically deactivates PNS internally. If
+TNS is disabled but PNS is allowed, the encoder deactivates PNS calculation
+internally.
+
+*/
+
+#ifndef AACENC_LIB_H
+#define AACENC_LIB_H
+
+#include "machine_type.h"
+#include "FDK_audio.h"
+
+#define AACENCODER_LIB_VL0 4
+#define AACENCODER_LIB_VL1 0
+#define AACENCODER_LIB_VL2 1
+
+/**
+ *  AAC encoder error codes.
+ */
+typedef enum {
+  AACENC_OK = 0x0000, /*!< No error happened. All fine. */
+
+  AACENC_INVALID_HANDLE =
+      0x0020, /*!< Handle passed to function call was invalid. */
+  AACENC_MEMORY_ERROR = 0x0021,          /*!< Memory allocation failed. */
+  AACENC_UNSUPPORTED_PARAMETER = 0x0022, /*!< Parameter not available. */
+  AACENC_INVALID_CONFIG = 0x0023,        /*!< Configuration not provided. */
+
+  AACENC_INIT_ERROR = 0x0040,     /*!< General initialization error. */
+  AACENC_INIT_AAC_ERROR = 0x0041, /*!< AAC library initialization error. */
+  AACENC_INIT_SBR_ERROR = 0x0042, /*!< SBR library initialization error. */
+  AACENC_INIT_TP_ERROR = 0x0043, /*!< Transport library initialization error. */
+  AACENC_INIT_META_ERROR =
+      0x0044, /*!< Meta data library initialization error. */
+  AACENC_INIT_MPS_ERROR = 0x0045, /*!< MPS library initialization error. */
+
+  AACENC_ENCODE_ERROR = 0x0060, /*!< The encoding process was interrupted by an
+                                   unexpected error. */
+
+  AACENC_ENCODE_EOF = 0x0080 /*!< End of file reached. */
+
+} AACENC_ERROR;
+
+/**
+ *  AAC encoder buffer descriptors identifier.
+ *  This identifier are used within buffer descriptors
+ * AACENC_BufDesc::bufferIdentifiers.
+ */
+typedef enum {
+  /* Input buffer identifier. */
+  IN_AUDIO_DATA = 0,    /*!< Audio input buffer, interleaved INT_PCM samples. */
+  IN_ANCILLRY_DATA = 1, /*!< Ancillary data to be embedded into bitstream. */
+  IN_METADATA_SETUP = 2, /*!< Setup structure for embedding meta data. */
+
+  /* Output buffer identifier. */
+  OUT_BITSTREAM_DATA = 3, /*!< Buffer holds bitstream output data. */
+  OUT_AU_SIZES =
+      4 /*!< Buffer contains sizes of each access unit. This information
+             is necessary for superframing. */
+
+} AACENC_BufferIdentifier;
+
+/**
+ *  AAC encoder handle.
+ */
+typedef struct AACENCODER *HANDLE_AACENCODER;
+
+/**
+ *  Provides some info about the encoder configuration.
+ */
+typedef struct {
+  UINT maxOutBufBytes; /*!< Maximum number of encoder bitstream bytes within one
+                          frame. Size depends on maximum number of supported
+                          channels in encoder instance. */
+
+  UINT maxAncBytes; /*!< Maximum number of ancillary data bytes which can be
+                       inserted into bitstream within one frame. */
+
+  UINT inBufFillLevel; /*!< Internal input buffer fill level in samples per
+                          channel. This parameter will automatically be cleared
+                          if samplingrate or channel(Mode/Order) changes. */
+
+  UINT inputChannels; /*!< Number of input channels expected in encoding
+                         process. */
+
+  UINT frameLength; /*!< Amount of input audio samples consumed each frame per
+                       channel, depending on audio object type configuration. */
+
+  UINT nDelay; /*!< Codec delay in PCM samples/channel. Depends on framelength
+                  and AOT. Does not include framing delay for filling up encoder
+                  PCM input buffer. */
+
+  UINT nDelayCore; /*!< Codec delay in PCM samples/channel, w/o delay caused by
+                      the decoder SBR module. This delay is needed to correctly
+                      write edit lists for gapless playback. The decoder may not
+                      know how much delay is introdcued by SBR, since it may not
+                      know if SBR is active at all (implicit signaling),
+                      therefore the decoder must take into account any delay
+                      caused by the SBR module. */
+
+  UCHAR confBuf[64]; /*!< Configuration buffer in binary format as an
+                        AudioSpecificConfig or StreamMuxConfig according to the
+                        selected transport type. */
+
+  UINT confSize; /*!< Number of valid bytes in confBuf. */
+
+} AACENC_InfoStruct;
+
+/**
+ *  Describes the input and output buffers for an aacEncEncode() call.
+ */
+typedef struct {
+  INT numBufs;            /*!< Number of buffers. */
+  void **bufs;            /*!< Pointer to vector containing buffer addresses. */
+  INT *bufferIdentifiers; /*!< Identifier of each buffer element. See
+                             ::AACENC_BufferIdentifier. */
+  INT *bufSizes;          /*!< Size of each buffer in 8-bit bytes. */
+  INT *bufElSizes;        /*!< Size of each buffer element in bytes. */
+
+} AACENC_BufDesc;
+
+/**
+ *  Defines the input arguments for an aacEncEncode() call.
+ */
+typedef struct {
+  INT numInSamples; /*!< Number of valid input audio samples (multiple of input
+                       channels). */
+  INT numAncBytes;  /*!< Number of ancillary data bytes to be encoded. */
+
+} AACENC_InArgs;
+
+/**
+ *  Defines the output arguments for an aacEncEncode() call.
+ */
+typedef struct {
+  INT numOutBytes;  /*!< Number of valid bitstream bytes generated during
+                       aacEncEncode(). */
+  INT numInSamples; /*!< Number of input audio samples consumed by the encoder.
+                     */
+  INT numAncBytes;  /*!< Number of ancillary data bytes consumed by the encoder.
+                     */
+  INT bitResState;  /*!< State of the bit reservoir in bits. */
+
+} AACENC_OutArgs;
+
+/**
+ *  Meta Data Compression Profiles.
+ */
+typedef enum {
+  AACENC_METADATA_DRC_NONE = 0,          /*!< None. */
+  AACENC_METADATA_DRC_FILMSTANDARD = 1,  /*!< Film standard. */
+  AACENC_METADATA_DRC_FILMLIGHT = 2,     /*!< Film light. */
+  AACENC_METADATA_DRC_MUSICSTANDARD = 3, /*!< Music standard. */
+  AACENC_METADATA_DRC_MUSICLIGHT = 4,    /*!< Music light. */
+  AACENC_METADATA_DRC_SPEECH = 5,        /*!< Speech. */
+  AACENC_METADATA_DRC_NOT_PRESENT =
+      256 /*!< Disable writing gain factor (used for comp_profile only). */
+
+} AACENC_METADATA_DRC_PROFILE;
+
+/**
+ *  Meta Data setup structure.
+ */
+typedef struct {
+  AACENC_METADATA_DRC_PROFILE
+  drc_profile; /*!< MPEG DRC compression profile. See
+                  ::AACENC_METADATA_DRC_PROFILE. */
+  AACENC_METADATA_DRC_PROFILE
+  comp_profile; /*!< ETSI heavy compression profile. See
+                   ::AACENC_METADATA_DRC_PROFILE. */
+
+  INT drc_TargetRefLevel;  /*!< Used to define expected level to:
+                                Scaled with 16 bit. x*2^16. */
+  INT comp_TargetRefLevel; /*!< Adjust limiter to avoid overload.
+                                Scaled with 16 bit. x*2^16. */
+
+  INT prog_ref_level_present; /*!< Flag, if prog_ref_level is present */
+  INT prog_ref_level;         /*!< Programme Reference Level = Dialogue Level:
+                                   -31.75dB .. 0 dB ; stepsize: 0.25dB
+                                   Scaled with 16 bit. x*2^16.*/
+
+  UCHAR PCE_mixdown_idx_present; /*!< Flag, if dmx-idx should be written in
+                                    programme config element */
+  UCHAR ETSI_DmxLvl_present;     /*!< Flag, if dmx-lvl should be written in
+                                    ETSI-ancData */
+
+  SCHAR centerMixLevel; /*!< Center downmix level (0...7, according to table) */
+  SCHAR surroundMixLevel; /*!< Surround downmix level (0...7, according to
+                             table) */
+
+  UCHAR
+  dolbySurroundMode; /*!< Indication for Dolby Surround Encoding Mode.
+                          - 0: Dolby Surround mode not indicated
+                          - 1: 2-ch audio part is not Dolby surround encoded
+                          - 2: 2-ch audio part is Dolby surround encoded */
+
+  UCHAR drcPresentationMode; /*!< Indicatin for DRC Presentation Mode.
+                                  - 0: Presentation mode not inticated
+                                  - 1: Presentation mode 1
+                                  - 2: Presentation mode 2 */
+
+  struct {
+    /* extended ancillary data */
+    UCHAR extAncDataEnable; /*< Indicates if MPEG4_ext_ancillary_data() exists.
+                                - 0: No MPEG4_ext_ancillary_data().
+                                - 1: Insert MPEG4_ext_ancillary_data(). */
+
+    UCHAR
+    extDownmixLevelEnable;   /*< Indicates if ext_downmixing_levels() exists.
+                                 - 0: No ext_downmixing_levels().
+                                 - 1: Insert ext_downmixing_levels(). */
+    UCHAR extDownmixLevel_A; /*< Downmix level index A (0...7, according to
+                                table) */
+    UCHAR extDownmixLevel_B; /*< Downmix level index B (0...7, according to
+                                table) */
+
+    UCHAR dmxGainEnable; /*< Indicates if ext_downmixing_global_gains() exists.
+                             - 0: No ext_downmixing_global_gains().
+                             - 1: Insert ext_downmixing_global_gains(). */
+    INT dmxGain5;        /*< Gain factor for downmix to 5 channels.
+                              -15.75dB .. -15.75dB; stepsize: 0.25dB
+                              Scaled with 16 bit. x*2^16.*/
+    INT dmxGain2;        /*< Gain factor for downmix to 2 channels.
+                              -15.75dB .. -15.75dB; stepsize: 0.25dB
+                              Scaled with 16 bit. x*2^16.*/
+
+    UCHAR lfeDmxEnable; /*< Indicates if ext_downmixing_lfe_level() exists.
+                            - 0: No ext_downmixing_lfe_level().
+                            - 1: Insert ext_downmixing_lfe_level(). */
+    UCHAR lfeDmxLevel;  /*< Downmix level index for LFE (0..15, according to
+                           table) */
+
+  } ExtMetaData;
+
+} AACENC_MetaData;
+
+/**
+ * AAC encoder control flags.
+ *
+ * In interaction with the ::AACENC_CONTROL_STATE parameter it is possible to
+ * get information about the internal initialization process. It is also
+ * possible to overwrite the internal state from extern when necessary.
+ */
+typedef enum {
+  AACENC_INIT_NONE = 0x0000, /*!< Do not trigger initialization. */
+  AACENC_INIT_CONFIG =
+      0x0001, /*!< Initialize all encoder modules configuration. */
+  AACENC_INIT_STATES = 0x0002, /*!< Reset all encoder modules history buffer. */
+  AACENC_INIT_TRANSPORT =
+      0x1000, /*!< Initialize transport lib with new parameters. */
+  AACENC_RESET_INBUFFER =
+      0x2000,              /*!< Reset fill level of internal input buffer. */
+  AACENC_INIT_ALL = 0xFFFF /*!< Initialize all. */
+} AACENC_CTRLFLAGS;
+
+/**
+ * \brief  AAC encoder setting parameters.
+ *
+ * Use aacEncoder_SetParam() function to configure, or use aacEncoder_GetParam()
+ * function to read the internal status of the following parameters.
+ */
+typedef enum {
+  AACENC_AOT =
+      0x0100, /*!< Audio object type. See ::AUDIO_OBJECT_TYPE in FDK_audio.h.
+                   - 2: MPEG-4 AAC Low Complexity.
+                   - 5: MPEG-4 AAC Low Complexity with Spectral Band Replication
+                 (HE-AAC).
+                   - 29: MPEG-4 AAC Low Complexity with Spectral Band
+                 Replication and Parametric Stereo (HE-AAC v2). This
+                 configuration can be used only with stereo input audio data.
+                   - 23: MPEG-4 AAC Low-Delay.
+                   - 39: MPEG-4 AAC Enhanced Low-Delay. Since there is no
+                 ::AUDIO_OBJECT_TYPE for ELD in combination with SBR defined,
+                 enable SBR explicitely by ::AACENC_SBR_MODE parameter. The ELD
+                 v2 212 configuration can be configured by ::AACENC_CHANNELMODE
+                 parameter.
+                   - 129: MPEG-2 AAC Low Complexity.
+                   - 132: MPEG-2 AAC Low Complexity with Spectral Band
+                 Replication (HE-AAC).
+
+                   Please note that the virtual MPEG-2 AOT's basically disables
+                 non-existing Perceptual Noise Substitution tool in AAC encoder
+                 and controls the MPEG_ID flag in adts header. The virtual
+                 MPEG-2 AOT doesn't prohibit specific transport formats. */
+
+  AACENC_BITRATE = 0x0101, /*!< Total encoder bitrate. This parameter is
+                              mandatory and interacts with ::AACENC_BITRATEMODE.
+                                - CBR: Bitrate in bits/second.
+                                - VBR: Variable bitrate. Bitrate argument will
+                              be ignored. See \ref suppBitrates for details. */
+
+  AACENC_BITRATEMODE = 0x0102, /*!< Bitrate mode. Configuration can be different
+                                  kind of bitrate configurations:
+                                    - 0: Constant bitrate, use bitrate according
+                                  to ::AACENC_BITRATE. (default) Within none
+                                  LD/ELD ::AUDIO_OBJECT_TYPE, the CBR mode makes
+                                  use of full allowed bitreservoir. In contrast,
+                                  at Low-Delay ::AUDIO_OBJECT_TYPE the
+                                  bitreservoir is kept very small.
+                                    - 1: Variable bitrate mode, \ref vbrmode
+                                  "very low bitrate".
+                                    - 2: Variable bitrate mode, \ref vbrmode
+                                  "low bitrate".
+                                    - 3: Variable bitrate mode, \ref vbrmode
+                                  "medium bitrate".
+                                    - 4: Variable bitrate mode, \ref vbrmode
+                                  "high bitrate".
+                                    - 5: Variable bitrate mode, \ref vbrmode
+                                  "very high bitrate". */
+
+  AACENC_SAMPLERATE = 0x0103, /*!< Audio input data sampling rate. Encoder
+                                 supports following sampling rates: 8000, 11025,
+                                 12000, 16000, 22050, 24000, 32000, 44100,
+                                 48000, 64000, 88200, 96000 */
+
+  AACENC_SBR_MODE = 0x0104, /*!< Configure SBR independently of the chosen Audio
+                               Object Type ::AUDIO_OBJECT_TYPE. This parameter
+                               is for ELD audio object type only.
+                                 - -1: Use ELD SBR auto configurator (default).
+                                 - 0: Disable Spectral Band Replication.
+                                 - 1: Enable Spectral Band Replication. */
+
+  AACENC_GRANULE_LENGTH =
+      0x0105, /*!< Core encoder (AAC) audio frame length in samples:
+                   - 1024: Default configuration.
+                   - 512: Default length in LD/ELD configuration.
+                   - 480: Length in LD/ELD configuration.
+                   - 256: Length for ELD reduced delay mode (x2).
+                   - 240: Length for ELD reduced delay mode (x2).
+                   - 128: Length for ELD reduced delay mode (x4).
+                   - 120: Length for ELD reduced delay mode (x4). */
+
+  AACENC_CHANNELMODE = 0x0106, /*!< Set explicit channel mode. Channel mode must
+                                  match with number of input channels.
+                                    - 1-7, 11,12,14 and 33,34: MPEG channel
+                                  modes supported, see ::CHANNEL_MODE in
+                                  FDK_audio.h. */
+
+  AACENC_CHANNELORDER =
+      0x0107, /*!< Input audio data channel ordering scheme:
+                   - 0: MPEG channel ordering (e. g. 5.1: C, L, R, SL, SR, LFE).
+                 (default)
+                   - 1: WAVE file format channel ordering (e. g. 5.1: L, R, C,
+                 LFE, SL, SR). */
+
+  AACENC_SBR_RATIO =
+      0x0108, /*!<  Controls activation of downsampled SBR. With downsampled
+                 SBR, the delay will be shorter. On the other hand, for
+                 achieving the same quality level, downsampled SBR needs more
+                 bits than dual-rate SBR. With downsampled SBR, the AAC encoder
+                 will work at the same sampling rate as the SBR encoder (single
+                 rate). Downsampled SBR is supported for AAC-ELD and HE-AACv1.
+                    - 1: Downsampled SBR (default for ELD).
+                    - 2: Dual-rate SBR   (default for HE-AAC). */
+
+  AACENC_AFTERBURNER =
+      0x0200, /*!< This parameter controls the use of the afterburner feature.
+                   The afterburner is a type of analysis by synthesis algorithm
+                 which increases the audio quality but also the required
+                 processing power. It is recommended to always activate this if
+                 additional memory consumption and processing power consumption
+                   is not a problem. If increased MHz and memory consumption are
+                 an issue then the MHz and memory cost of this optional module
+                 need to be evaluated against the improvement in audio quality
+                 on a case by case basis.
+                   - 0: Disable afterburner (default).
+                   - 1: Enable afterburner. */
+
+  AACENC_BANDWIDTH = 0x0203, /*!< Core encoder audio bandwidth:
+                                  - 0: Determine audio bandwidth internally
+                                (default, see chapter \ref BEHAVIOUR_BANDWIDTH).
+                                  - 1 to fs/2: Audio bandwidth in Hertz. Limited
+                                to 20kHz max. Not usable if SBR is active. This
+                                setting is for experts only, better do not touch
+                                this value to avoid degraded audio quality. */
+
+  AACENC_PEAK_BITRATE =
+      0x0207, /*!< Peak bitrate configuration parameter to adjust maximum bits
+                 per audio frame. Bitrate is in bits/second. The peak bitrate
+                 will internally be limited to the chosen bitrate
+                 ::AACENC_BITRATE as lower limit and the
+                 number_of_effective_channels*6144 bit as upper limit.
+
+                   Setting the peak bitrate equal to ::AACENC_BITRATE does not
+                 necessarily mean that the audio frames will be of constant
+                 size. Since the peak bitate is in bits/second, the frame sizes
+                 can vary by one byte in one or the other direction over various
+                 frames. However, it is not recommended to reduce the peak
+                 pitrate to ::AACENC_BITRATE - it would disable the
+                 bitreservoir, which would affect the audio quality by a large
+                 amount. */
+
+  AACENC_TRANSMUX = 0x0300, /*!< Transport type to be used. See ::TRANSPORT_TYPE
+                               in FDK_audio.h. Following types can be configured
+                               in encoder library:
+                                 - 0: raw access units
+                                 - 1: ADIF bitstream format
+                                 - 2: ADTS bitstream format
+                                 - 6: Audio Mux Elements (LATM) with
+                               muxConfigPresent = 1
+                                 - 7: Audio Mux Elements (LATM) with
+                               muxConfigPresent = 0, out of band StreamMuxConfig
+                                 - 10: Audio Sync Stream (LOAS) */
+
+  AACENC_HEADER_PERIOD =
+      0x0301, /*!< Frame count period for sending in-band configuration buffers
+                 within LATM/LOAS transport layer. Additionally this parameter
+                 configures the PCE repetition period in raw_data_block(). See
+                 \ref encPCE.
+                   - 0xFF: auto-mode default 10 for TT_MP4_ADTS, TT_MP4_LOAS and
+                 TT_MP4_LATM_MCP1, otherwise 0.
+                   - n: Frame count period. */
+
+  AACENC_SIGNALING_MODE =
+      0x0302, /*!< Signaling mode of the extension AOT:
+                   - 0: Implicit backward compatible signaling (default for
+                 non-MPEG-4 based AOT's and for the transport formats ADIF and
+                 ADTS)
+                        - A stream that uses implicit signaling can be decoded
+                 by every AAC decoder, even AAC-LC-only decoders
+                        - An AAC-LC-only decoder will only decode the
+                 low-frequency part of the stream, resulting in a band-limited
+                 output
+                        - This method works with all transport formats
+                        - This method does not work with downsampled SBR
+                   - 1: Explicit backward compatible signaling
+                        - A stream that uses explicit backward compatible
+                 signaling can be decoded by every AAC decoder, even AAC-LC-only
+                 decoders
+                        - An AAC-LC-only decoder will only decode the
+                 low-frequency part of the stream, resulting in a band-limited
+                 output
+                        - A decoder not capable of decoding PS will only decode
+                 the AAC-LC+SBR part. If the stream contained PS, the result
+                 will be a a decoded mono downmix
+                        - This method does not work with ADIF or ADTS. For
+                 LOAS/LATM, it only works with AudioMuxVersion==1
+                        - This method does work with downsampled SBR
+                   - 2: Explicit hierarchical signaling (default for MPEG-4
+                 based AOT's and for all transport formats excluding ADIF and
+                 ADTS)
+                        - A stream that uses explicit hierarchical signaling can
+                 be decoded only by HE-AAC decoders
+                        - An AAC-LC-only decoder will not decode a stream that
+                 uses explicit hierarchical signaling
+                        - A decoder not capable of decoding PS will not decode
+                 the stream at all if it contained PS
+                        - This method does not work with ADIF or ADTS. It works
+                 with LOAS/LATM and the MPEG-4 File format
+                        - This method does work with downsampled SBR
+
+                    For making sure that the listener always experiences the
+                 best audio quality, explicit hierarchical signaling should be
+                 used. This makes sure that only a full HE-AAC-capable decoder
+                 will decode those streams. The audio is played at full
+                 bandwidth. For best backwards compatibility, it is recommended
+                 to encode with implicit SBR signaling. A decoder capable of
+                 AAC-LC only will then only decode the AAC part, which means the
+                 decoded audio will sound band-limited.
+
+                    For MPEG-2 transport types (ADTS,ADIF), only implicit
+                 signaling is possible.
+
+                    For LOAS and LATM, explicit backwards compatible signaling
+                 only works together with AudioMuxVersion==1. The reason is
+                 that, for explicit backwards compatible signaling, additional
+                 information will be appended to the ASC. A decoder that is only
+                 capable of decoding AAC-LC will skip this part. Nevertheless,
+                 for jumping to the end of the ASC, it needs to know the ASC
+                 length. Transmitting the length of the ASC is a feature of
+                 AudioMuxVersion==1, it is not possible to transmit the length
+                 of the ASC with AudioMuxVersion==0, therefore an AAC-LC-only
+                 decoder will not be able to parse a LOAS/LATM stream that was
+                 being encoded with AudioMuxVersion==0.
+
+                    For downsampled SBR, explicit signaling is mandatory. The
+                 reason for this is that the extension sampling frequency (which
+                 is in case of SBR the sampling frequqncy of the SBR part) can
+                 only be signaled in explicit mode.
+
+                    For AAC-ELD, the SBR information is transmitted in the
+                 ELDSpecific Config, which is part of the AudioSpecificConfig.
+                 Therefore, the settings here will have no effect on AAC-ELD.*/
+
+  AACENC_TPSUBFRAMES =
+      0x0303, /*!< Number of sub frames in a transport frame for LOAS/LATM or
+                 ADTS (default 1).
+                   - ADTS: Maximum number of sub frames restricted to 4.
+                   - LOAS/LATM: Maximum number of sub frames restricted to 2.*/
+
+  AACENC_AUDIOMUXVER =
+      0x0304, /*!< AudioMuxVersion to be used for LATM. (AudioMuxVersionA,
+                 currently not implemented):
+                   - 0: Default, no transmission of tara Buffer fullness, no ASC
+                 length and including actual latm Buffer fullnes.
+                   - 1: Transmission of tara Buffer fullness, ASC length and
+                 actual latm Buffer fullness.
+                   - 2: Transmission of tara Buffer fullness, ASC length and
+                 maximum level of latm Buffer fullness. */
+
+  AACENC_PROTECTION = 0x0306, /*!< Configure protection in transport layer:
+                                   - 0: No protection. (default)
+                                   - 1: CRC active for ADTS transport format. */
+
+  AACENC_ANCILLARY_BITRATE =
+      0x0500, /*!< Constant ancillary data bitrate in bits/second.
+                   - 0: Either no ancillary data or insert exact number of
+                 bytes, denoted via input parameter, numAncBytes in
+                 AACENC_InArgs.
+                   - else: Insert ancillary data with specified bitrate. */
+
+  AACENC_METADATA_MODE = 0x0600, /*!< Configure Meta Data. See ::AACENC_MetaData
+                                    for further details:
+                                      - 0: Do not embed any metadata.
+                                      - 1: Embed dynamic_range_info metadata.
+                                      - 2: Embed dynamic_range_info and
+                                    ancillary_data metadata.
+                                      - 3: Embed ancillary_data metadata. */
+
+  AACENC_CONTROL_STATE =
+      0xFF00, /*!< There is an automatic process which internally reconfigures
+                 the encoder instance when a configuration parameter changed or
+                 an error occured. This paramerter allows overwriting or getting
+                 the control status of this process. See ::AACENC_CTRLFLAGS. */
+
+  AACENC_NONE = 0xFFFF /*!< ------ */
+
+} AACENC_PARAM;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \brief  Open an instance of the encoder.
+ *
+ * Allocate memory for an encoder instance with a functional range denoted by
+ * the function parameters. Preinitialize encoder instance with default
+ * configuration.
+ *
+ * \param phAacEncoder  A pointer to an encoder handle. Initialized on return.
+ * \param encModules    Specify encoder modules to be supported in this encoder
+ * instance:
+ *                      - 0x0: Allocate memory for all available encoder
+ * modules.
+ *                      - else: Select memory allocation regarding encoder
+ * modules. Following flags are possible and can be combined.
+ *                              - 0x01: AAC module.
+ *                              - 0x02: SBR module.
+ *                              - 0x04: PS module.
+ *                              - 0x08: MPS module.
+ *                              - 0x10: Metadata module.
+ *                              - example: (0x01|0x02|0x04|0x08|0x10) allocates
+ * all modules and is equivalent to default configuration denotet by 0x0.
+ * \param maxChannels   Number of channels to be allocated. This parameter can
+ * be used in different ways:
+ *                      - 0: Allocate maximum number of AAC and SBR channels as
+ * supported by the library.
+ *                      - nChannels: Use same maximum number of channels for
+ * allocating memory in AAC and SBR module.
+ *                      - nChannels | (nSbrCh<<8): Number of SBR channels can be
+ * different to AAC channels to save data memory.
+ *
+ * \return
+ *          - AACENC_OK, on succes.
+ *          - AACENC_INVALID_HANDLE, AACENC_MEMORY_ERROR, AACENC_INVALID_CONFIG,
+ * on failure.
+ */
+AACENC_ERROR aacEncOpen(HANDLE_AACENCODER *phAacEncoder, const UINT encModules,
+                        const UINT maxChannels);
+
+/**
+ * \brief  Close the encoder instance.
+ *
+ * Deallocate encoder instance and free whole memory.
+ *
+ * \param phAacEncoder  Pointer to the encoder handle to be deallocated.
+ *
+ * \return
+ *          - AACENC_OK, on success.
+ *          - AACENC_INVALID_HANDLE, on failure.
+ */
+AACENC_ERROR aacEncClose(HANDLE_AACENCODER *phAacEncoder);
+
+/**
+ * \brief Encode audio data.
+ *
+ * This function is mainly for encoding audio data. In addition the function can
+ * be used for an encoder (re)configuration process.
+ * - PCM input data will be retrieved from external input buffer until the fill
+ * level allows encoding a single frame. This functionality allows an external
+ * buffer with reduced size in comparison to the AAC or HE-AAC audio frame
+ * length.
+ * - If the value of the input samples argument is zero, just internal
+ * reinitialization will be applied if it is requested.
+ * - At the end of a file the flushing process can be triggerd via setting the
+ * value of the input samples argument to -1. The encoder delay lines are fully
+ * flushed when the encoder returns no valid bitstream data
+ * AACENC_OutArgs::numOutBytes. Furthermore the end of file is signaled by the
+ * return value AACENC_ENCODE_EOF.
+ * - If an error occured in the previous frame or any of the encoder parameters
+ * changed, an internal reinitialization process will be applied before encoding
+ * the incoming audio samples.
+ * - The function can also be used for an independent reconfiguration process
+ * without encoding. The first parameter has to be a valid encoder handle and
+ * all other parameters can be set to NULL.
+ * - If the size of the external bitbuffer in outBufDesc is not sufficient for
+ * writing the whole bitstream, an internal error will be the return value and a
+ * reconfiguration will be triggered.
+ *
+ * \param hAacEncoder           A valid AAC encoder handle.
+ * \param inBufDesc             Input buffer descriptor, see AACENC_BufDesc:
+ *                              - At least one input buffer with audio data is
+ * expected.
+ *                              - Optionally a second input buffer with
+ * ancillary data can be fed.
+ * \param outBufDesc            Output buffer descriptor, see AACENC_BufDesc:
+ *                              - Provide one output buffer for the encoded
+ * bitstream.
+ * \param inargs                Input arguments, see AACENC_InArgs.
+ * \param outargs               Output arguments, AACENC_OutArgs.
+ *
+ * \return
+ *          - AACENC_OK, on success.
+ *          - AACENC_INVALID_HANDLE, AACENC_ENCODE_ERROR, on failure in encoding
+ * process.
+ *          - AACENC_INVALID_CONFIG, AACENC_INIT_ERROR, AACENC_INIT_AAC_ERROR,
+ * AACENC_INIT_SBR_ERROR, AACENC_INIT_TP_ERROR, AACENC_INIT_META_ERROR,
+ * AACENC_INIT_MPS_ERROR, on failure in encoder initialization.
+ *          - AACENC_UNSUPPORTED_PARAMETER, on incorrect input or output buffer
+ * descriptor initialization.
+ *          - AACENC_ENCODE_EOF, when flushing fully concluded.
+ */
+AACENC_ERROR aacEncEncode(const HANDLE_AACENCODER hAacEncoder,
+                          const AACENC_BufDesc *inBufDesc,
+                          const AACENC_BufDesc *outBufDesc,
+                          const AACENC_InArgs *inargs, AACENC_OutArgs *outargs);
+
+/**
+ * \brief  Acquire info about present encoder instance.
+ *
+ * This function retrieves information of the encoder configuration. In addition
+ * to informative internal states, a configuration data block of the current
+ * encoder settings will be returned. The format is either Audio Specific Config
+ * in case of Raw Packets transport format or StreamMuxConfig in case of
+ * LOAS/LATM transport format. The configuration data block is binary coded as
+ * specified in ISO/IEC 14496-3 (MPEG-4 audio), to be used directly for MPEG-4
+ * File Format or RFC3016 or RFC3640 applications.
+ *
+ * \param hAacEncoder           A valid AAC encoder handle.
+ * \param pInfo                 Pointer to AACENC_InfoStruct. Filled on return.
+ *
+ * \return
+ *          - AACENC_OK, on succes.
+ *          - AACENC_INVALID_HANDLE, AACENC_INIT_ERROR, on failure.
+ */
+AACENC_ERROR aacEncInfo(const HANDLE_AACENCODER hAacEncoder,
+                        AACENC_InfoStruct *pInfo);
+
+/**
+ * \brief  Set one single AAC encoder parameter.
+ *
+ * This function allows configuration of all encoder parameters specified in
+ * ::AACENC_PARAM. Each parameter must be set with a separate function call. An
+ * internal validation of the configuration value range will be done and an
+ * internal reconfiguration will be signaled. The actual configuration adoption
+ * is part of the subsequent aacEncEncode() call.
+ *
+ * \param hAacEncoder           A valid AAC encoder handle.
+ * \param param                 Parameter to be set. See ::AACENC_PARAM.
+ * \param value                 Parameter value. See parameter description in
+ * ::AACENC_PARAM.
+ *
+ * \return
+ *          - AACENC_OK, on success.
+ *          - AACENC_INVALID_HANDLE, AACENC_UNSUPPORTED_PARAMETER,
+ * AACENC_INVALID_CONFIG, on failure.
+ */
+AACENC_ERROR aacEncoder_SetParam(const HANDLE_AACENCODER hAacEncoder,
+                                 const AACENC_PARAM param, const UINT value);
+
+/**
+ * \brief  Get one single AAC encoder parameter.
+ *
+ * This function is the complement to aacEncoder_SetParam(). After encoder
+ * reinitialization with user defined settings, the internal status can be
+ * obtained of each parameter, specified with ::AACENC_PARAM.
+ *
+ * \param hAacEncoder           A valid AAC encoder handle.
+ * \param param                 Parameter to be returned. See ::AACENC_PARAM.
+ *
+ * \return  Internal configuration value of specifed parameter ::AACENC_PARAM.
+ */
+UINT aacEncoder_GetParam(const HANDLE_AACENCODER hAacEncoder,
+                         const AACENC_PARAM param);
+
+/**
+ * \brief  Get information about encoder library build.
+ *
+ * Fill a given LIB_INFO structure with library version information.
+ *
+ * \param info  Pointer to an allocated LIB_INFO struct.
+ *
+ * \return
+ *          - AACENC_OK, on success.
+ *          - AACENC_INVALID_HANDLE, AACENC_INIT_ERROR, on failure.
+ */
+AACENC_ERROR aacEncGetLibInfo(LIB_INFO *info);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AACENC_LIB_H */

+ 584 - 0
native/include/fdk-aac/genericStds.h

@@ -0,0 +1,584 @@
+/* -----------------------------------------------------------------------------
+Software License for The Fraunhofer FDK AAC Codec Library for Android
+
+© Copyright  1995 - 2018 Fraunhofer-Gesellschaft zur Förderung der angewandten
+Forschung e.V. All rights reserved.
+
+ 1.    INTRODUCTION
+The Fraunhofer FDK AAC Codec Library for Android ("FDK AAC Codec") is software
+that implements the MPEG Advanced Audio Coding ("AAC") encoding and decoding
+scheme for digital audio. This FDK AAC Codec software is intended to be used on
+a wide variety of Android devices.
+
+AAC's HE-AAC and HE-AAC v2 versions are regarded as today's most efficient
+general perceptual audio codecs. AAC-ELD is considered the best-performing
+full-bandwidth communications codec by independent studies and is widely
+deployed. AAC has been standardized by ISO and IEC as part of the MPEG
+specifications.
+
+Patent licenses for necessary patent claims for the FDK AAC Codec (including
+those of Fraunhofer) may be obtained through Via Licensing
+(www.vialicensing.com) or through the respective patent owners individually for
+the purpose of encoding or decoding bit streams in products that are compliant
+with the ISO/IEC MPEG audio standards. Please note that most manufacturers of
+Android devices already license these patent claims through Via Licensing or
+directly from the patent owners, and therefore FDK AAC Codec software may
+already be covered under those patent licenses when it is used for those
+licensed purposes only.
+
+Commercially-licensed AAC software libraries, including floating-point versions
+with enhanced sound quality, are also available from Fraunhofer. Users are
+encouraged to check the Fraunhofer website for additional applications
+information and documentation.
+
+2.    COPYRIGHT LICENSE
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted without payment of copyright license fees provided that you
+satisfy the following conditions:
+
+You must retain the complete text of this software license in redistributions of
+the FDK AAC Codec or your modifications thereto in source code form.
+
+You must retain the complete text of this software license in the documentation
+and/or other materials provided with redistributions of the FDK AAC Codec or
+your modifications thereto in binary form. You must make available free of
+charge copies of the complete source code of the FDK AAC Codec and your
+modifications thereto to recipients of copies in binary form.
+
+The name of Fraunhofer may not be used to endorse or promote products derived
+from this library without prior written permission.
+
+You may not charge copyright license fees for anyone to use, copy or distribute
+the FDK AAC Codec software or your modifications thereto.
+
+Your modified versions of the FDK AAC Codec must carry prominent notices stating
+that you changed the software and the date of any change. For modified versions
+of the FDK AAC Codec, the term "Fraunhofer FDK AAC Codec Library for Android"
+must be replaced by the term "Third-Party Modified Version of the Fraunhofer FDK
+AAC Codec Library for Android."
+
+3.    NO PATENT LICENSE
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PATENT CLAIMS, including without
+limitation the patents of Fraunhofer, ARE GRANTED BY THIS SOFTWARE LICENSE.
+Fraunhofer provides no warranty of patent non-infringement with respect to this
+software.
+
+You may use this FDK AAC Codec software or modifications thereto only for
+purposes that are authorized by appropriate patent licenses.
+
+4.    DISCLAIMER
+
+This FDK AAC Codec software is provided by Fraunhofer on behalf of the copyright
+holders and contributors "AS IS" and WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
+including but not limited to the implied warranties of merchantability and
+fitness for a particular purpose. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE for any direct, indirect, incidental, special, exemplary,
+or consequential damages, including but not limited to procurement of substitute
+goods or services; loss of use, data, or profits, or business interruption,
+however caused and on any theory of liability, whether in contract, strict
+liability, or tort (including negligence), arising in any way out of the use of
+this software, even if advised of the possibility of such damage.
+
+5.    CONTACT INFORMATION
+
+Fraunhofer Institute for Integrated Circuits IIS
+Attention: Audio and Multimedia Departments - FDK AAC LL
+Am Wolfsmantel 33
+91058 Erlangen, Germany
+
+www.iis.fraunhofer.de/amm
+amm-info@iis.fraunhofer.de
+----------------------------------------------------------------------------- */
+
+/************************* System integration library **************************
+
+   Author(s):
+
+   Description:
+
+*******************************************************************************/
+
+/** \file   genericStds.h
+    \brief  Generic Run-Time Support function wrappers and heap allocation
+   monitoring.
+ */
+
+#if !defined(GENERICSTDS_H)
+#define GENERICSTDS_H
+
+#include "machine_type.h"
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846 /*!< Pi. Only used in example projects. */
+#endif
+
+/**
+ * Identifiers for various memory locations. They are used along with memory
+ * allocation functions like FDKcalloc_L() to specify the requested memory's
+ * location.
+ */
+typedef enum {
+  /* Internal */
+  SECT_DATA_L1 = 0x2000,
+  SECT_DATA_L2,
+  SECT_DATA_L1_A,
+  SECT_DATA_L1_B,
+  SECT_CONSTDATA_L1,
+
+  /* External */
+  SECT_DATA_EXTERN = 0x4000,
+  SECT_CONSTDATA_EXTERN
+
+} MEMORY_SECTION;
+
+/*! \addtogroup SYSLIB_MEMORY_MACROS FDK memory macros
+ *
+ * The \c H_ prefix indicates that the macro is to be used in a header file, the
+ * \c C_ prefix indicates that the macro is to be used in a source file.
+ *
+ * Declaring memory areas requires to specify a unique name and a data type.
+ *
+ * For defining a memory area you require additionally one or two sizes,
+ * depending if the memory should be organized into one or two dimensions.
+ *
+ * The macros containing the keyword \c AALLOC instead of \c ALLOC additionally
+ * take care of returning aligned memory addresses (beyond the natural alignment
+ * of its type). The preprocesor macro
+ * ::ALIGNMENT_DEFAULT indicates the aligment to be used (this is hardware
+ * specific).
+ *
+ * The \c _L suffix indicates that the memory will be located in a specific
+ * section. This is useful to allocate critical memory section into fast
+ * internal SRAM for example.
+ *
+ * @{
+ */
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define H_ALLOC_MEM(name, type) \
+  type *Get##name(int n = 0);   \
+  void Free##name(type **p);    \
+  UINT GetRequiredMem##name(void);
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define H_ALLOC_MEM_OVERLAY(name, type) \
+  type *Get##name(int n = 0);           \
+  void Free##name(type **p);            \
+  UINT GetRequiredMem##name(void);
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_ALLOC_MEM(name, type, num)               \
+  type *Get##name(int n) {                         \
+    FDK_ASSERT((n) == 0);                          \
+    return ((type *)FDKcalloc(num, sizeof(type))); \
+  }                                                \
+  void Free##name(type **p) {                      \
+    if (p != NULL) {                               \
+      FDKfree(*p);                                 \
+      *p = NULL;                                   \
+    }                                              \
+  }                                                \
+  UINT GetRequiredMem##name(void) {                \
+    return ALGN_SIZE_EXTRES((num) * sizeof(type)); \
+  }
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_ALLOC_MEM2(name, type, n1, n2)                 \
+  type *Get##name(int n) {                               \
+    FDK_ASSERT((n) < (n2));                              \
+    return ((type *)FDKcalloc(n1, sizeof(type)));        \
+  }                                                      \
+  void Free##name(type **p) {                            \
+    if (p != NULL) {                                     \
+      FDKfree(*p);                                       \
+      *p = NULL;                                         \
+    }                                                    \
+  }                                                      \
+  UINT GetRequiredMem##name(void) {                      \
+    return ALGN_SIZE_EXTRES((n1) * sizeof(type)) * (n2); \
+  }
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_AALLOC_MEM(name, type, num)                                  \
+  type *Get##name(int n) {                                             \
+    type *ap;                                                          \
+    FDK_ASSERT((n) == 0);                                              \
+    ap = ((type *)FDKaalloc((num) * sizeof(type), ALIGNMENT_DEFAULT)); \
+    return ap;                                                         \
+  }                                                                    \
+  void Free##name(type **p) {                                          \
+    if (p != NULL) {                                                   \
+      FDKafree(*p);                                                    \
+      *p = NULL;                                                       \
+    }                                                                  \
+  }                                                                    \
+  UINT GetRequiredMem##name(void) {                                    \
+    return ALGN_SIZE_EXTRES((num) * sizeof(type) + ALIGNMENT_DEFAULT + \
+                            sizeof(void *));                           \
+  }
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_AALLOC_MEM2(name, type, n1, n2)                             \
+  type *Get##name(int n) {                                            \
+    type *ap;                                                         \
+    FDK_ASSERT((n) < (n2));                                           \
+    ap = ((type *)FDKaalloc((n1) * sizeof(type), ALIGNMENT_DEFAULT)); \
+    return ap;                                                        \
+  }                                                                   \
+  void Free##name(type **p) {                                         \
+    if (p != NULL) {                                                  \
+      FDKafree(*p);                                                   \
+      *p = NULL;                                                      \
+    }                                                                 \
+  }                                                                   \
+  UINT GetRequiredMem##name(void) {                                   \
+    return ALGN_SIZE_EXTRES((n1) * sizeof(type) + ALIGNMENT_DEFAULT + \
+                            sizeof(void *)) *                         \
+           (n2);                                                      \
+  }
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_ALLOC_MEM_L(name, type, num, s)               \
+  type *Get##name(int n) {                              \
+    FDK_ASSERT((n) == 0);                               \
+    return ((type *)FDKcalloc_L(num, sizeof(type), s)); \
+  }                                                     \
+  void Free##name(type **p) {                           \
+    if (p != NULL) {                                    \
+      FDKfree_L(*p);                                    \
+      *p = NULL;                                        \
+    }                                                   \
+  }                                                     \
+  UINT GetRequiredMem##name(void) {                     \
+    return ALGN_SIZE_EXTRES((num) * sizeof(type));      \
+  }
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_ALLOC_MEM2_L(name, type, n1, n2, s)            \
+  type *Get##name(int n) {                               \
+    FDK_ASSERT((n) < (n2));                              \
+    return (type *)FDKcalloc_L(n1, sizeof(type), s);     \
+  }                                                      \
+  void Free##name(type **p) {                            \
+    if (p != NULL) {                                     \
+      FDKfree_L(*p);                                     \
+      *p = NULL;                                         \
+    }                                                    \
+  }                                                      \
+  UINT GetRequiredMem##name(void) {                      \
+    return ALGN_SIZE_EXTRES((n1) * sizeof(type)) * (n2); \
+  }
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_AALLOC_MEM_L(name, type, num, s)                                  \
+  type *Get##name(int n) {                                                  \
+    type *ap;                                                               \
+    FDK_ASSERT((n) == 0);                                                   \
+    ap = ((type *)FDKaalloc_L((num) * sizeof(type), ALIGNMENT_DEFAULT, s)); \
+    return ap;                                                              \
+  }                                                                         \
+  void Free##name(type **p) {                                               \
+    if (p != NULL) {                                                        \
+      FDKafree_L(*p);                                                       \
+      *p = NULL;                                                            \
+    }                                                                       \
+  }                                                                         \
+  UINT GetRequiredMem##name(void) {                                         \
+    return ALGN_SIZE_EXTRES((num) * sizeof(type) + ALIGNMENT_DEFAULT +      \
+                            sizeof(void *));                                \
+  }
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_AALLOC_MEM2_L(name, type, n1, n2, s)                             \
+  type *Get##name(int n) {                                                 \
+    type *ap;                                                              \
+    FDK_ASSERT((n) < (n2));                                                \
+    ap = ((type *)FDKaalloc_L((n1) * sizeof(type), ALIGNMENT_DEFAULT, s)); \
+    return ap;                                                             \
+  }                                                                        \
+  void Free##name(type **p) {                                              \
+    if (p != NULL) {                                                       \
+      FDKafree_L(*p);                                                      \
+      *p = NULL;                                                           \
+    }                                                                      \
+  }                                                                        \
+  UINT GetRequiredMem##name(void) {                                        \
+    return ALGN_SIZE_EXTRES((n1) * sizeof(type) + ALIGNMENT_DEFAULT +      \
+                            sizeof(void *)) *                              \
+           (n2);                                                           \
+  }
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_ALLOC_MEM_OVERLAY(name, type, num, sect, tag) \
+  C_AALLOC_MEM_L(name, type, num, sect)
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_AALLOC_SCRATCH_START(name, type, n)                 \
+  type _##name[(n) + (ALIGNMENT_DEFAULT + sizeof(type) - 1)]; \
+  type *name = (type *)ALIGN_PTR(_##name);                    \
+  C_ALLOC_ALIGNED_REGISTER(name, (n) * sizeof(type));
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_ALLOC_SCRATCH_START(name, type, n) type name[n];
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_AALLOC_SCRATCH_END(name, type, n) C_ALLOC_ALIGNED_UNREGISTER(name);
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_ALLOC_SCRATCH_END(name, type, n)
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_AALLOC_STACK_START(name, type, n)                   \
+  type _##name[(n) + (ALIGNMENT_DEFAULT + sizeof(type) - 1)]; \
+  type *name = (type *)ALIGN_PTR(_##name);                    \
+  C_ALLOC_ALIGNED_REGISTER(name, (n) * sizeof(type));
+
+/** See \ref SYSLIB_MEMORY_MACROS for description. */
+#define C_AALLOC_STACK_END(name, type, n) C_ALLOC_ALIGNED_UNREGISTER(name);
+
+/*! @} */
+
+#define C_ALLOC_ALIGNED_REGISTER(x, size)
+#define C_ALLOC_ALIGNED_UNREGISTER(x)
+#define C_ALLOC_ALIGNED_CHECK(x)
+#define C_ALLOC_ALIGNED_CHECK2(x, y)
+#define FDK_showBacktrace(a, b)
+
+/*! \addtogroup SYSLIB_EXITCODES Unified exit codes
+ *  Exit codes to be used as return values of FDK software test and
+ * demonstration applications. Not as return values of product modules and/or
+ * libraries.
+ *  @{
+ */
+#define FDK_EXITCODE_OK 0 /*!< Successful termination. No errors. */
+#define FDK_EXITCODE_USAGE                                                  \
+  64 /*!< The command/application was used incorrectly, e.g. with the wrong \
+        number of arguments, a bad flag, a bad syntax in a parameter, or    \
+        whatever. */
+#define FDK_EXITCODE_DATAERROR                                               \
+  65 /*!< The input data was incorrect in some way. This should only be used \
+        for user data and not system files. */
+#define FDK_EXITCODE_NOINPUT                                                   \
+  66 /*!< An input file (not a system file) did not exist or was not readable. \
+      */
+#define FDK_EXITCODE_UNAVAILABLE                                              \
+  69 /*!< A service is unavailable. This can occur if a support program or    \
+        file does not exist. This can also be used as a catchall message when \
+        something you wanted to do doesn't work, but you don't know why. */
+#define FDK_EXITCODE_SOFTWARE                                                  \
+  70 /*!< An internal software error has been detected. This should be limited \
+        to non- operating system related errors as possible. */
+#define FDK_EXITCODE_CANTCREATE \
+  73 /*!< A (user specified) output file cannot be created. */
+#define FDK_EXITCODE_IOERROR \
+  74 /*!< An error occurred while doing I/O on some file. */
+/*! @} */
+
+/*--------------------------------------------
+ * Runtime support declarations
+ *---------------------------------------------*/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void FDKprintf(const char *szFmt, ...);
+
+void FDKprintfErr(const char *szFmt, ...);
+
+/** Wrapper for <stdio.h>'s getchar(). */
+int FDKgetchar(void);
+
+INT FDKfprintf(void *stream, const char *format, ...);
+INT FDKsprintf(char *str, const char *format, ...);
+
+char *FDKstrchr(char *s, INT c);
+const char *FDKstrstr(const char *haystack, const char *needle);
+char *FDKstrcpy(char *dest, const char *src);
+char *FDKstrncpy(char *dest, const char *src, const UINT n);
+
+#define FDK_MAX_OVERLAYS 8 /**< Maximum number of memory overlays. */
+
+void *FDKcalloc(const UINT n, const UINT size);
+void *FDKmalloc(const UINT size);
+void FDKfree(void *ptr);
+
+/**
+ *  Allocate and clear an aligned memory area. Use FDKafree() instead of
+ * FDKfree() for these memory areas.
+ *
+ * \param size       Size of requested memory in bytes.
+ * \param alignment  Alignment of requested memory in bytes.
+ * \return           Pointer to allocated memory.
+ */
+void *FDKaalloc(const UINT size, const UINT alignment);
+
+/**
+ *  Free an aligned memory area.
+ *
+ * \param ptr  Pointer to be freed.
+ */
+void FDKafree(void *ptr);
+
+/**
+ *  Allocate memory in a specific memory section.
+ *  Requests can be made for internal or external memory. If internal memory is
+ *  requested, FDKcalloc_L() first tries to use L1 memory, which sizes are
+ * defined by ::DATA_L1_A_SIZE and ::DATA_L1_B_SIZE. If no L1 memory is
+ * available, then FDKcalloc_L() tries to use L2 memory. If that fails as well,
+ * the requested memory is allocated at an extern location using the fallback
+ * FDKcalloc().
+ *
+ * \param n     See MSDN documentation on calloc().
+ * \param size  See MSDN documentation on calloc().
+ * \param s     Memory section.
+ * \return      See MSDN documentation on calloc().
+ */
+void *FDKcalloc_L(const UINT n, const UINT size, MEMORY_SECTION s);
+
+/**
+ *  Allocate aligned memory in a specific memory section.
+ *  See FDKcalloc_L() description for details - same applies here.
+ */
+void *FDKaalloc_L(const UINT size, const UINT alignment, MEMORY_SECTION s);
+
+/**
+ *  Free memory that was allocated in a specific memory section.
+ */
+void FDKfree_L(void *ptr);
+
+/**
+ *  Free aligned memory that was allocated in a specific memory section.
+ */
+void FDKafree_L(void *ptr);
+
+/**
+ * Copy memory. Source and destination memory must not overlap.
+ * Either use implementation from a Standard Library, or, if no Standard Library
+ * is available, a generic implementation.
+ * The define ::USE_BUILTIN_MEM_FUNCTIONS in genericStds.cpp controls what to
+ * use. The function arguments correspond to the standard memcpy(). Please see
+ * MSDN documentation for details on how to use it.
+ */
+void FDKmemcpy(void *dst, const void *src, const UINT size);
+
+/**
+ * Copy memory. Source and destination memory are allowed to overlap.
+ * Either use implementation from a Standard Library, or, if no Standard Library
+ * is available, a generic implementation.
+ * The define ::USE_BUILTIN_MEM_FUNCTIONS in genericStds.cpp controls what to
+ * use. The function arguments correspond to the standard memmove(). Please see
+ * MSDN documentation for details on how to use it.
+ */
+void FDKmemmove(void *dst, const void *src, const UINT size);
+
+/**
+ * Clear memory.
+ * Either use implementation from a Standard Library, or, if no Standard Library
+ * is available, a generic implementation.
+ * The define ::USE_BUILTIN_MEM_FUNCTIONS in genericStds.cpp controls what to
+ * use. The function arguments correspond to the standard memclear(). Please see
+ * MSDN documentation for details on how to use it.
+ */
+void FDKmemclear(void *memPtr, const UINT size);
+
+/**
+ * Fill memory with values.
+ * The function arguments correspond to the standard memset(). Please see MSDN
+ * documentation for details on how to use it.
+ */
+void FDKmemset(void *memPtr, const INT value, const UINT size);
+
+/* Compare function wrappers */
+INT FDKmemcmp(const void *s1, const void *s2, const UINT size);
+INT FDKstrcmp(const char *s1, const char *s2);
+INT FDKstrncmp(const char *s1, const char *s2, const UINT size);
+
+UINT FDKstrlen(const char *s);
+
+#define FDKmax(a, b) ((a) > (b) ? (a) : (b))
+#define FDKmin(a, b) ((a) < (b) ? (a) : (b))
+
+#define FDK_INT_MAX ((INT)0x7FFFFFFF)
+#define FDK_INT_MIN ((INT)0x80000000)
+
+/* FILE I/O */
+
+/*!
+ *  Check platform for endianess.
+ *
+ * \return  1 if platform is little endian, non-1 if platform is big endian.
+ */
+int IS_LITTLE_ENDIAN(void);
+
+/*!
+ *  Convert input value to little endian format.
+ *
+ * \param val  Value to be converted. It may be in both big or little endian.
+ * \return     Value in little endian format.
+ */
+UINT TO_LITTLE_ENDIAN(UINT val);
+
+/*!
+ * \fn     FDKFILE *FDKfopen(const char *filename, const char *mode);
+ *         Standard fopen() wrapper.
+ * \fn     INT FDKfclose(FDKFILE *FP);
+ *         Standard fclose() wrapper.
+ * \fn     INT FDKfseek(FDKFILE *FP, LONG OFFSET, int WHENCE);
+ *         Standard fseek() wrapper.
+ * \fn     INT FDKftell(FDKFILE *FP);
+ *         Standard ftell() wrapper.
+ * \fn     INT FDKfflush(FDKFILE *fp);
+ *         Standard fflush() wrapper.
+ * \fn     UINT FDKfwrite(const void *ptrf, INT size, UINT nmemb, FDKFILE *fp);
+ *         Standard fwrite() wrapper.
+ * \fn     UINT FDKfread(void *dst, INT size, UINT nmemb, FDKFILE *fp);
+ *         Standard fread() wrapper.
+ */
+typedef void FDKFILE;
+extern const INT FDKSEEK_SET, FDKSEEK_CUR, FDKSEEK_END;
+
+FDKFILE *FDKfopen(const char *filename, const char *mode);
+INT FDKfclose(FDKFILE *FP);
+INT FDKfseek(FDKFILE *FP, LONG OFFSET, int WHENCE);
+INT FDKftell(FDKFILE *FP);
+INT FDKfflush(FDKFILE *fp);
+UINT FDKfwrite(const void *ptrf, INT size, UINT nmemb, FDKFILE *fp);
+UINT FDKfread(void *dst, INT size, UINT nmemb, FDKFILE *fp);
+char *FDKfgets(void *dst, INT size, FDKFILE *fp);
+void FDKrewind(FDKFILE *fp);
+INT FDKfeof(FDKFILE *fp);
+
+/**
+ * \brief        Write each member in little endian order. Convert automatically
+ * to host endianess.
+ * \param ptrf   Pointer to memory where to read data from.
+ * \param size   Size of each item to be written.
+ * \param nmemb  Number of items to be written.
+ * \param fp     File pointer of type FDKFILE.
+ * \return       Number of items read on success and fread() error on failure.
+ */
+UINT FDKfwrite_EL(const void *ptrf, INT size, UINT nmemb, FDKFILE *fp);
+
+/**
+ * \brief        Read variable of size "size" as little endian. Convert
+ * automatically to host endianess. 4-byte alignment is enforced for 24 bit
+ * data, at 32 bit full scale.
+ * \param dst    Pointer to memory where to store data into.
+ * \param size   Size of each item to be read.
+ * \param nmemb  Number of items to be read.
+ * \param fp     File pointer of type FDKFILE.
+ * \return       Number of items read on success and fread() error on failure.
+ */
+UINT FDKfread_EL(void *dst, INT size, UINT nmemb, FDKFILE *fp);
+
+/**
+ * \brief  Print FDK software disclaimer.
+ */
+void FDKprintDisclaimer(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GENERICSTDS_H */

+ 411 - 0
native/include/fdk-aac/machine_type.h

@@ -0,0 +1,411 @@
+/* -----------------------------------------------------------------------------
+Software License for The Fraunhofer FDK AAC Codec Library for Android
+
+© Copyright  1995 - 2018 Fraunhofer-Gesellschaft zur Förderung der angewandten
+Forschung e.V. All rights reserved.
+
+ 1.    INTRODUCTION
+The Fraunhofer FDK AAC Codec Library for Android ("FDK AAC Codec") is software
+that implements the MPEG Advanced Audio Coding ("AAC") encoding and decoding
+scheme for digital audio. This FDK AAC Codec software is intended to be used on
+a wide variety of Android devices.
+
+AAC's HE-AAC and HE-AAC v2 versions are regarded as today's most efficient
+general perceptual audio codecs. AAC-ELD is considered the best-performing
+full-bandwidth communications codec by independent studies and is widely
+deployed. AAC has been standardized by ISO and IEC as part of the MPEG
+specifications.
+
+Patent licenses for necessary patent claims for the FDK AAC Codec (including
+those of Fraunhofer) may be obtained through Via Licensing
+(www.vialicensing.com) or through the respective patent owners individually for
+the purpose of encoding or decoding bit streams in products that are compliant
+with the ISO/IEC MPEG audio standards. Please note that most manufacturers of
+Android devices already license these patent claims through Via Licensing or
+directly from the patent owners, and therefore FDK AAC Codec software may
+already be covered under those patent licenses when it is used for those
+licensed purposes only.
+
+Commercially-licensed AAC software libraries, including floating-point versions
+with enhanced sound quality, are also available from Fraunhofer. Users are
+encouraged to check the Fraunhofer website for additional applications
+information and documentation.
+
+2.    COPYRIGHT LICENSE
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted without payment of copyright license fees provided that you
+satisfy the following conditions:
+
+You must retain the complete text of this software license in redistributions of
+the FDK AAC Codec or your modifications thereto in source code form.
+
+You must retain the complete text of this software license in the documentation
+and/or other materials provided with redistributions of the FDK AAC Codec or
+your modifications thereto in binary form. You must make available free of
+charge copies of the complete source code of the FDK AAC Codec and your
+modifications thereto to recipients of copies in binary form.
+
+The name of Fraunhofer may not be used to endorse or promote products derived
+from this library without prior written permission.
+
+You may not charge copyright license fees for anyone to use, copy or distribute
+the FDK AAC Codec software or your modifications thereto.
+
+Your modified versions of the FDK AAC Codec must carry prominent notices stating
+that you changed the software and the date of any change. For modified versions
+of the FDK AAC Codec, the term "Fraunhofer FDK AAC Codec Library for Android"
+must be replaced by the term "Third-Party Modified Version of the Fraunhofer FDK
+AAC Codec Library for Android."
+
+3.    NO PATENT LICENSE
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PATENT CLAIMS, including without
+limitation the patents of Fraunhofer, ARE GRANTED BY THIS SOFTWARE LICENSE.
+Fraunhofer provides no warranty of patent non-infringement with respect to this
+software.
+
+You may use this FDK AAC Codec software or modifications thereto only for
+purposes that are authorized by appropriate patent licenses.
+
+4.    DISCLAIMER
+
+This FDK AAC Codec software is provided by Fraunhofer on behalf of the copyright
+holders and contributors "AS IS" and WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
+including but not limited to the implied warranties of merchantability and
+fitness for a particular purpose. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE for any direct, indirect, incidental, special, exemplary,
+or consequential damages, including but not limited to procurement of substitute
+goods or services; loss of use, data, or profits, or business interruption,
+however caused and on any theory of liability, whether in contract, strict
+liability, or tort (including negligence), arising in any way out of the use of
+this software, even if advised of the possibility of such damage.
+
+5.    CONTACT INFORMATION
+
+Fraunhofer Institute for Integrated Circuits IIS
+Attention: Audio and Multimedia Departments - FDK AAC LL
+Am Wolfsmantel 33
+91058 Erlangen, Germany
+
+www.iis.fraunhofer.de/amm
+amm-info@iis.fraunhofer.de
+----------------------------------------------------------------------------- */
+
+/************************* System integration library **************************
+
+   Author(s):
+
+   Description:
+
+*******************************************************************************/
+
+/** \file   machine_type.h
+ *  \brief  Type defines for various processors and compiler tools.
+ */
+
+#if !defined(MACHINE_TYPE_H)
+#define MACHINE_TYPE_H
+
+#include <stddef.h> /* Needed to define size_t */
+
+#if defined(__ANDROID__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 4) && \
+    (__GNUC_GNU_INLINE__ == 1)
+typedef unsigned long long uint64_t;
+#include <sys/types.h>
+#endif
+
+/* Library calling convention spec. __cdecl and friends might be added here as
+ * required. */
+#define LINKSPEC_H
+#define LINKSPEC_CPP
+
+/* for doxygen the following docu parts must be separated */
+/** \var  SCHAR
+ *        Data type representing at least 1 byte signed integer on all supported
+ * platforms.
+ */
+/** \var  UCHAR
+ *        Data type representing at least 1 byte unsigned integer on all
+ * supported platforms.
+ */
+/** \var  INT
+ *        Data type representing at least 4 byte signed integer on all supported
+ * platforms.
+ */
+/** \var  UINT
+ *        Data type representing at least 4 byte unsigned integer on all
+ * supported platforms.
+ */
+/** \var  LONG
+ *        Data type representing 4 byte signed integer on all supported
+ * platforms.
+ */
+/** \var  ULONG
+ *        Data type representing 4 byte unsigned integer on all supported
+ * platforms.
+ */
+/** \var  SHORT
+ *        Data type representing 2 byte signed integer on all supported
+ * platforms.
+ */
+/** \var  USHORT
+ *        Data type representing 2 byte unsigned integer on all supported
+ * platforms.
+ */
+/** \var  INT64
+ *        Data type representing 8 byte signed integer on all supported
+ * platforms.
+ */
+/** \var  UINT64
+ *        Data type representing 8 byte unsigned integer on all supported
+ * platforms.
+ */
+/** \def  SHORT_BITS
+ *        Number of bits the data type short represents. sizeof() is not suited
+ * to get this info, because a byte is not always defined as 8 bits.
+ */
+/** \def  CHAR_BITS
+ *        Number of bits the data type char represents. sizeof() is not suited
+ * to get this info, because a byte is not always defined as 8 bits.
+ */
+/** \var  INT_PCM
+ *        Data type representing the width of input and output PCM samples.
+ */
+
+typedef signed int INT;
+typedef unsigned int UINT;
+#ifdef __LP64__
+/* force FDK long-datatypes to 4 byte  */
+/* Use defines to avoid type alias problems on 64 bit machines. */
+#define LONG INT
+#define ULONG UINT
+#else  /* __LP64__ */
+typedef signed long LONG;
+typedef unsigned long ULONG;
+#endif /* __LP64__ */
+typedef signed short SHORT;
+typedef unsigned short USHORT;
+typedef signed char SCHAR;
+typedef unsigned char UCHAR;
+
+#define SHORT_BITS 16
+#define CHAR_BITS 8
+
+/* Define 64 bit base integer type. */
+#ifdef _MSC_VER
+typedef __int64 INT64;
+typedef unsigned __int64 UINT64;
+#else
+typedef long long INT64;
+typedef unsigned long long UINT64;
+#endif
+
+#ifndef NULL
+#ifdef __cplusplus
+#define NULL 0
+#else
+#define NULL ((void *)0)
+#endif
+#endif
+
+#if ((defined(__i686__) || defined(__i586__) || defined(__i386__) ||  \
+      defined(__x86_64__)) ||                                         \
+     (defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)))) && \
+    !defined(FDK_ASSERT_ENABLE)
+#define FDK_ASSERT_ENABLE
+#endif
+
+#if defined(FDK_ASSERT_ENABLE)
+#include <assert.h>
+#define FDK_ASSERT(x) assert(x)
+#else
+#define FDK_ASSERT(ignore)
+#endif
+
+typedef SHORT INT_PCM;
+#define MAXVAL_PCM MAXVAL_SGL
+#define MINVAL_PCM MINVAL_SGL
+#define WAV_BITS 16
+#define SAMPLE_BITS 16
+#define SAMPLE_MAX ((INT_PCM)(((ULONG)1 << (SAMPLE_BITS - 1)) - 1))
+#define SAMPLE_MIN (~SAMPLE_MAX)
+
+/*!
+* \def    RAM_ALIGN
+*  Used to align memory as prefix before memory declaration. For example:
+   \code
+   RAM_ALIGN
+   int myArray[16];
+   \endcode
+
+   Note, that not all platforms support this mechanism. For example with TI
+compilers a preprocessor pragma is used, but to do something like
+
+   \code
+   #define RAM_ALIGN #pragma DATA_ALIGN(x)
+   \endcode
+
+   would require the preprocessor to process this line twice to fully resolve
+it. Hence, a fully platform-independant way to use alignment is not supported.
+
+* \def    ALIGNMENT_DEFAULT
+*         Default alignment in bytes.
+*/
+
+#define ALIGNMENT_DEFAULT 8
+
+/* RAM_ALIGN keyword causes memory alignment of global variables. */
+#if defined(_MSC_VER)
+#define RAM_ALIGN __declspec(align(ALIGNMENT_DEFAULT))
+#elif defined(__GNUC__)
+#define RAM_ALIGN __attribute__((aligned(ALIGNMENT_DEFAULT)))
+#else
+#define RAM_ALIGN
+#endif
+
+/*!
+ * \def  RESTRICT
+ *       The restrict keyword is supported by some platforms and RESTRICT maps
+ * to either the corresponding keyword on each platform or to void if the
+ *       compiler does not provide such feature. It tells the compiler that a
+ * pointer points to memory that does not overlap with other memories pointed to
+ * by other pointers. If this keyword is used and the assumption of no
+ * overlap is not true the resulting code might crash.
+ *
+ * \def  WORD_ALIGNED(x)
+ *       Tells the compiler that pointer x is 16 bit aligned. It does not cause
+ * the address itself to be aligned, but serves as a hint to the optimizer. The
+ * alignment of the pointer must be guarranteed, if not the code might
+ * crash.
+ *
+ * \def  DWORD_ALIGNED(x)
+ *       Tells the compiler that pointer x is 32 bit aligned. It does not cause
+ * the address itself to be aligned, but serves as a hint to the optimizer. The
+ * alignment of the pointer must be guarranteed, if not the code might
+ * crash.
+ *
+ */
+#define RESTRICT
+#define WORD_ALIGNED(x) C_ALLOC_ALIGNED_CHECK2((const void *)(x), 2);
+#define DWORD_ALIGNED(x) C_ALLOC_ALIGNED_CHECK2((const void *)(x), 4);
+
+/*-----------------------------------------------------------------------------------
+ * ALIGN_SIZE
+ *-----------------------------------------------------------------------------------*/
+/*!
+ * \brief  This macro aligns a given value depending on ::ALIGNMENT_DEFAULT.
+ *
+ * For example if #ALIGNMENT_DEFAULT equals 8, then:
+ * - ALIGN_SIZE(3) returns 8
+ * - ALIGN_SIZE(8) returns 8
+ * - ALIGN_SIZE(9) returns 16
+ */
+#define ALIGN_SIZE(a)                                                          \
+  ((a) + (((INT)ALIGNMENT_DEFAULT - ((size_t)(a) & (ALIGNMENT_DEFAULT - 1))) & \
+          (ALIGNMENT_DEFAULT - 1)))
+
+/*!
+ * \brief  This macro aligns a given address depending on ::ALIGNMENT_DEFAULT.
+ */
+#define ALIGN_PTR(a)                                      \
+  ((void *)((unsigned char *)(a) +                        \
+            ((((INT)ALIGNMENT_DEFAULT -                   \
+               ((size_t)(a) & (ALIGNMENT_DEFAULT - 1))) & \
+              (ALIGNMENT_DEFAULT - 1)))))
+
+/* Alignment macro for libSYS heap implementation */
+#define ALIGNMENT_EXTRES (ALIGNMENT_DEFAULT)
+#define ALGN_SIZE_EXTRES(a)                                               \
+  ((a) + (((INT)ALIGNMENT_EXTRES - ((INT)(a) & (ALIGNMENT_EXTRES - 1))) & \
+          (ALIGNMENT_EXTRES - 1)))
+
+/*!
+ * \def  FDK_FORCEINLINE
+ *       Sometimes compiler do not do what they are told to do, and in case of
+ * inlining some additional command might be necessary depending on the
+ * platform.
+ *
+ * \def  FDK_INLINE
+ *       Defines how the compiler is told to inline stuff.
+ */
+#ifndef FDK_FORCEINLINE
+#if defined(__GNUC__) && !defined(__SDE_MIPS__)
+#define FDK_FORCEINLINE inline __attribute((always_inline))
+#else
+#define FDK_FORCEINLINE inline
+#endif
+#endif
+
+#define FDK_INLINE static inline
+
+/*!
+ * \def  LNK_SECTION_DATA_L1
+ *       The LNK_SECTION_* defines allow memory to be drawn from specific memory
+ *       sections. Used as prefix before variable declaration.
+ *
+ * \def  LNK_SECTION_DATA_L2
+ *       See ::LNK_SECTION_DATA_L1
+ * \def  LNK_SECTION_L1_DATA_A
+ *       See ::LNK_SECTION_DATA_L1
+ * \def  LNK_SECTION_L1_DATA_B
+ *       See ::LNK_SECTION_DATA_L1
+ * \def  LNK_SECTION_CONSTDATA_L1
+ *       See ::LNK_SECTION_DATA_L1
+ * \def  LNK_SECTION_CONSTDATA
+ *       See ::LNK_SECTION_DATA_L1
+ * \def  LNK_SECTION_CODE_L1
+ *       See ::LNK_SECTION_DATA_L1
+ * \def  LNK_SECTION_CODE_L2
+ *       See ::LNK_SECTION_DATA_L1
+ * \def  LNK_SECTION_INITCODE
+ *       See ::LNK_SECTION_DATA_L1
+ */
+/**************************************************
+ * Code Section macros
+ **************************************************/
+#define LNK_SECTION_CODE_L1
+#define LNK_SECTION_CODE_L2
+#define LNK_SECTION_INITCODE
+
+/* Memory section macros. */
+
+/* default fall back */
+#define LNK_SECTION_DATA_L1
+#define LNK_SECTION_DATA_L2
+#define LNK_SECTION_CONSTDATA
+#define LNK_SECTION_CONSTDATA_L1
+
+#define LNK_SECTION_L1_DATA_A
+#define LNK_SECTION_L1_DATA_B
+
+/**************************************************
+ * Macros regarding static code analysis
+ **************************************************/
+#ifdef __cplusplus
+#if !defined(__has_cpp_attribute)
+#define __has_cpp_attribute(x) 0
+#endif
+#if defined(__clang__) && __has_cpp_attribute(clang::fallthrough)
+#define FDK_FALLTHROUGH [[clang::fallthrough]]
+#endif
+#endif
+
+#ifndef FDK_FALLTHROUGH
+#if defined(__GNUC__) && (__GNUC__ >= 7)
+#define FDK_FALLTHROUGH __attribute__((fallthrough))
+#else
+#define FDK_FALLTHROUGH
+#endif
+#endif
+
+#ifdef _MSC_VER
+/*
+ * Sometimes certain features are excluded from compilation and therefore the
+ * warning 4065 may occur: "switch statement contains 'default' but no 'case'
+ * labels" We consider this warning irrelevant and disable it.
+ */
+#pragma warning(disable : 4065)
+#endif
+
+#endif /* MACHINE_TYPE_H */

+ 202 - 0
native/include/fdk-aac/syslib_channelMapDescr.h

@@ -0,0 +1,202 @@
+/* -----------------------------------------------------------------------------
+Software License for The Fraunhofer FDK AAC Codec Library for Android
+
+© Copyright  1995 - 2018 Fraunhofer-Gesellschaft zur Förderung der angewandten
+Forschung e.V. All rights reserved.
+
+ 1.    INTRODUCTION
+The Fraunhofer FDK AAC Codec Library for Android ("FDK AAC Codec") is software
+that implements the MPEG Advanced Audio Coding ("AAC") encoding and decoding
+scheme for digital audio. This FDK AAC Codec software is intended to be used on
+a wide variety of Android devices.
+
+AAC's HE-AAC and HE-AAC v2 versions are regarded as today's most efficient
+general perceptual audio codecs. AAC-ELD is considered the best-performing
+full-bandwidth communications codec by independent studies and is widely
+deployed. AAC has been standardized by ISO and IEC as part of the MPEG
+specifications.
+
+Patent licenses for necessary patent claims for the FDK AAC Codec (including
+those of Fraunhofer) may be obtained through Via Licensing
+(www.vialicensing.com) or through the respective patent owners individually for
+the purpose of encoding or decoding bit streams in products that are compliant
+with the ISO/IEC MPEG audio standards. Please note that most manufacturers of
+Android devices already license these patent claims through Via Licensing or
+directly from the patent owners, and therefore FDK AAC Codec software may
+already be covered under those patent licenses when it is used for those
+licensed purposes only.
+
+Commercially-licensed AAC software libraries, including floating-point versions
+with enhanced sound quality, are also available from Fraunhofer. Users are
+encouraged to check the Fraunhofer website for additional applications
+information and documentation.
+
+2.    COPYRIGHT LICENSE
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted without payment of copyright license fees provided that you
+satisfy the following conditions:
+
+You must retain the complete text of this software license in redistributions of
+the FDK AAC Codec or your modifications thereto in source code form.
+
+You must retain the complete text of this software license in the documentation
+and/or other materials provided with redistributions of the FDK AAC Codec or
+your modifications thereto in binary form. You must make available free of
+charge copies of the complete source code of the FDK AAC Codec and your
+modifications thereto to recipients of copies in binary form.
+
+The name of Fraunhofer may not be used to endorse or promote products derived
+from this library without prior written permission.
+
+You may not charge copyright license fees for anyone to use, copy or distribute
+the FDK AAC Codec software or your modifications thereto.
+
+Your modified versions of the FDK AAC Codec must carry prominent notices stating
+that you changed the software and the date of any change. For modified versions
+of the FDK AAC Codec, the term "Fraunhofer FDK AAC Codec Library for Android"
+must be replaced by the term "Third-Party Modified Version of the Fraunhofer FDK
+AAC Codec Library for Android."
+
+3.    NO PATENT LICENSE
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PATENT CLAIMS, including without
+limitation the patents of Fraunhofer, ARE GRANTED BY THIS SOFTWARE LICENSE.
+Fraunhofer provides no warranty of patent non-infringement with respect to this
+software.
+
+You may use this FDK AAC Codec software or modifications thereto only for
+purposes that are authorized by appropriate patent licenses.
+
+4.    DISCLAIMER
+
+This FDK AAC Codec software is provided by Fraunhofer on behalf of the copyright
+holders and contributors "AS IS" and WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES,
+including but not limited to the implied warranties of merchantability and
+fitness for a particular purpose. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+CONTRIBUTORS BE LIABLE for any direct, indirect, incidental, special, exemplary,
+or consequential damages, including but not limited to procurement of substitute
+goods or services; loss of use, data, or profits, or business interruption,
+however caused and on any theory of liability, whether in contract, strict
+liability, or tort (including negligence), arising in any way out of the use of
+this software, even if advised of the possibility of such damage.
+
+5.    CONTACT INFORMATION
+
+Fraunhofer Institute for Integrated Circuits IIS
+Attention: Audio and Multimedia Departments - FDK AAC LL
+Am Wolfsmantel 33
+91058 Erlangen, Germany
+
+www.iis.fraunhofer.de/amm
+amm-info@iis.fraunhofer.de
+----------------------------------------------------------------------------- */
+
+/************************* System integration library **************************
+
+   Author(s):   Thomas Dietzen
+
+   Description:
+
+*******************************************************************************/
+
+/** \file   syslib_channelMapDescr.h
+ *  \brief  Function and structure declarations for the channel map descriptor implementation.
+ */
+
+#ifndef SYSLIB_CHANNELMAPDESCR_H
+#define SYSLIB_CHANNELMAPDESCR_H
+
+#include "machine_type.h"
+
+/**
+ * \brief  Contains information needed for a single channel map.
+ */
+typedef struct {
+  const UCHAR*
+      pChannelMap; /*!< Actual channel mapping for one single configuration. */
+  UCHAR numChannels; /*!< The number of channels for the channel map which is
+                        the maximum used channel index+1. */
+} CHANNEL_MAP_INFO;
+
+/**
+ * \brief   This is the main data struct. It contains the mapping for all
+ * channel configurations such as administration information.
+ *
+ * CAUTION: Do not access this structure directly from a algorithm specific
+ * library. Always use one of the API access functions below!
+ */
+typedef struct {
+  const CHANNEL_MAP_INFO* pMapInfoTab; /*!< Table of channel maps. */
+  UINT mapInfoTabLen; /*!< Length of the channel map table array. */
+  UINT fPassThrough;  /*!< Flag that defines whether the specified mapping shall
+                         be applied  (value: 0) or the input just gets passed
+                         through (MPEG mapping). */
+} FDK_channelMapDescr;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \brief  Initialize a given channel map descriptor.
+ *
+ * \param  pMapDescr      Pointer to a channel map descriptor to be initialized.
+ * \param  pMapInfoTab    Table of channel maps to initizalize the descriptor
+ with.
+ *                        If a NULL pointer is given a default table for
+ WAV-like mapping will be used.
+ * \param  mapInfoTabLen  Length of the channel map table array (pMapInfoTab).
+ If a zero length is given a default table for WAV-like mapping will be used.
+ * \param  fPassThrough   If the flag is set the reordering (given by
+ pMapInfoTab) will be bypassed.
+ */
+void FDK_chMapDescr_init(FDK_channelMapDescr* const pMapDescr,
+                         const CHANNEL_MAP_INFO* const pMapInfoTab,
+                         const UINT mapInfoTabLen, const UINT fPassThrough);
+
+/**
+ * \brief  Change the channel reordering state of a given channel map
+ * descriptor.
+ *
+ * \param  pMapDescr     Pointer to a (initialized) channel map descriptor.
+ * \param  fPassThrough  If the flag is set the reordering (given by
+ * pMapInfoTab) will be bypassed.
+ * \return               Value unequal to zero if set operation was not
+ * successful. And zero on success.
+ */
+int FDK_chMapDescr_setPassThrough(FDK_channelMapDescr* const pMapDescr,
+                                  UINT fPassThrough);
+
+/**
+ * \brief  Get the mapping value for a specific channel and map index.
+ *
+ * \param  pMapDescr  Pointer to channel map descriptor.
+ * \param  chIdx      Channel index.
+ * \param  mapIdx     Mapping index (corresponding to the channel configuration
+ * index).
+ * \return            Mapping value.
+ */
+UCHAR FDK_chMapDescr_getMapValue(const FDK_channelMapDescr* const pMapDescr,
+                                 const UCHAR chIdx, const UINT mapIdx);
+
+/**
+ * \brief  Evaluate whether channel map descriptor is reasonable or not.
+ *
+ * \param  pMapDescr Pointer to channel map descriptor.
+ * \return           Value unequal to zero if descriptor is valid, otherwise
+ * zero.
+ */
+int FDK_chMapDescr_isValid(const FDK_channelMapDescr* const pMapDescr);
+
+/**
+ * Extra variables for setting up Wg4 channel mapping.
+ */
+extern const CHANNEL_MAP_INFO FDK_mapInfoTabWg4[];
+extern const UINT FDK_mapInfoTabLenWg4;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(SYSLIB_CHANNELMAPDESCR_H) */

+ 68 - 0
pom.xml

@@ -0,0 +1,68 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <groupId>com.jttserver</groupId>
+    <artifactId>jttserver</artifactId>
+    <version>1.0-SNAPSHOT</version>
+
+    <properties>
+        <maven.compiler.source>17</maven.compiler.source>
+        <maven.compiler.target>17</maven.compiler.target>
+        <junit.version>5.9.2</junit.version>
+        <netty.version>4.1.100.Final</netty.version>
+        <slf4j.version>2.0.9</slf4j.version>
+    </properties>
+
+    <dependencies>
+        <!-- Netty核心依赖 -->
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-buffer</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-common</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-transport</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-codec</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty-codec-http</artifactId>
+            <version>${netty.version}</version>
+        </dependency>
+        
+        <dependency>
+            <groupId>org.junit.jupiter</groupId>
+            <artifactId>junit-jupiter-api</artifactId>
+            <version>${junit.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.junit.jupiter</groupId>
+            <artifactId>junit-jupiter-engine</artifactId>
+            <version>${junit.version}</version>
+            <scope>test</scope>
+        </dependency>
+        
+        <!-- SLF4J 日志框架 -->
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+            <version>${slf4j.version}</version>
+        </dependency>
+    </dependencies>
+
+</project>

+ 32 - 0
src/main/java/com/jttserver/Server.java

@@ -0,0 +1,32 @@
+package com.jttserver;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class Server {
+    private static final Logger logger = LoggerFactory.getLogger(Server.class);
+    public static void main(String[] args) {
+        // 打印 Git Commit ID
+        showGitHash();
+    }
+
+    public static void showGitHash() {
+        Properties props = new Properties();
+        try (InputStream is = Server.class.getClassLoader().getResourceAsStream("git.properties")) {
+            if (is != null) {
+                props.load(is);
+                String commitId = props.getProperty("git.commit.id.abbrev");
+                String branch = props.getProperty("git.branch");
+                logger.info("当前的Git-branch: {}, Git-Commit-ID: {}", branch, commitId);
+            } else {
+                logger.warn("Git commit info not found.");
+            }
+        } catch (IOException e) {
+            logger.error("Failed to load git.properties", e);
+        }
+    }
+}

+ 717 - 0
src/main/java/com/jttserver/codec/FlvPacketizer.java

@@ -0,0 +1,717 @@
+package com.jttserver.codec;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.Map;
+
+import com.jttserver.protocol.Jtt1078NaluPacket;
+import com.jttserver.protocol.Jtt1078PacketParams;
+import com.jttserver.protocol.JttConstants;
+import com.jttserver.utils.CommonUtils;
+import com.jttserver.codec.audio.AudioDecoder;
+import com.jttserver.codec.nativeaac.AacEncoderNative;
+import com.jttserver.codec.audio.AudioDecoder.AacEncodeResult;
+import com.jttserver.codec.audio.AudioUtils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * FLV封装器,用于将H.264/H.265 NALU单元封装成FLV格式
+ */
+public class FlvPacketizer {
+    private static final Logger logger = LoggerFactory.getLogger(FlvPacketizer.class);
+    // FLV头标识
+    private static final byte[] FLV_HEADER = new byte[] {
+            0x46, 0x4C, 0x56, // "FLV"
+            0x01, // 版本
+            0x05, // 视频音频标签存在 (0x05 表示音频+视频)
+            0x00, 0x00, 0x00, 0x09 // 头长度
+    };
+
+    // FLV文件头大小
+    @SuppressWarnings("unused")
+    private static final int FLV_HEADER_SIZE = 9;
+
+    // 上一个标签大小
+    private static final byte[] PREVIOUS_TAG_SIZE0 = new byte[] { 0x00, 0x00, 0x00, 0x00 };
+
+    // H.264相关NALU类型
+    private static final byte NALU_TYPE_H264_SPS = 7;
+    private static final byte NALU_TYPE_H264_PPS = 8;
+    private static final byte NALU_TYPE_H264_IDR = 5;
+
+    // H.265相关NALU类型
+    private static final byte NALU_TYPE_H265_VPS = 32;
+    private static final byte NALU_TYPE_H265_SPS = 33;
+    private static final byte NALU_TYPE_H265_PPS = 34;
+    private static final byte NALU_TYPE_H265_IDR = 19; // IDR_W_RADL
+    private static final byte NALU_TYPE_H265_IDR_N_LP = 20; // IDR_N_LP
+
+    // 存储每个channel的编解码器信息
+    private final Map<String, CodecInfo> channelCodecInfo = new ConcurrentHashMap<>();
+
+    /**
+     * 编解码器信息
+     */
+    private static class CodecInfo {
+        // 编码类型:true为H.264,false为H.265
+        int payloadVideo = -1;
+        // 是否已完成视频编码格式判断(避免重复判定)
+        boolean payloadVideoDetermined = false;
+        // SPS和PPS数据(H.264)
+        byte[] spsData;
+        byte[] ppsData;
+        // VPS、SPS和PPS数据(H.265)
+        byte[] vpsData;
+        byte[] spsData265;
+        byte[] ppsData265;
+        // 是否已经生成序列头
+        boolean flvAndSequenceHeaderGenerated = false;
+        // 缓存已生成的序列头Tag(AVC或HEVC)
+        byte[] flvAndSequenceHeaderTag;
+
+        // 音频相关
+        int payloadAudio = -1; // 音频负载类型
+        boolean payloadAudioDetermined = false; // 是否已完成音频编码格式判断(避免重复判定)
+        long aacHandle = 0; // AAC编码器句柄
+        int aacSampleRate = 8000; // 默认 8k
+        int aacChannels = 1; // 默认单声道
+        int aacBitrate = 16000; // 默认比特率
+        boolean aacUseAdts = false; // 输出ADTS以便直接写文件验证, false 时直接输出AAC帧数据用于flv
+        short[] pcmAccum; // PCM数据缓存
+        int pcmAccumPos = 0; // 缓存已生成的序列头Tag的缓存位置
+        // AAC序列头缓存与标记
+        boolean aacSequenceHeaderGenerated = false;
+        byte[] aacSequenceHeaderTag;
+
+        // 最近的I帧缓存
+        byte[] recentIFrame;
+    }
+
+    /**
+     * 创建FLV头
+     * 
+     * @return FLV头数据
+     */
+    public byte[] createFlvHeader() {
+        ByteArrayOutputStream header = new ByteArrayOutputStream();
+        try {
+            header.write(FLV_HEADER);
+            header.write(PREVIOUS_TAG_SIZE0);
+        } catch (IOException e) {
+            // 不会发生
+        }
+        return header.toByteArray();
+    }
+
+    /**
+     * 处理NALU单元并封装为FLV格式
+     * 
+     * @param channelId 通道ID
+     * @param naluData  NALU数据
+     * @param params    NALU参数
+     * @param timestamp 时间戳
+     * @return FLV视频标签数据
+     */
+    public byte[] processVideoNalu(String channelId, byte[] naluData,
+            Jtt1078PacketParams params, long timestamp) {
+        if (naluData == null || naluData.length < 1) {
+            return new byte[0];
+        }
+
+        // 获取或创建该channel的编解码器信息
+        CodecInfo codecInfo = channelCodecInfo.computeIfAbsent(channelId, k -> new CodecInfo());
+
+        synchronized (codecInfo) {
+            // 根据负载类型确定编码格式(只在第一次时确定)
+            if (!codecInfo.payloadVideoDetermined) {
+                codecInfo.payloadVideo = params.payloadType;
+                // 判断是不是已经确定了视频编码(H.264或H.265)
+                if (codecInfo.payloadVideo == JttConstants.PAYLOAD_TYPE_H264
+                        || codecInfo.payloadVideo == JttConstants.PAYLOAD_TYPE_H265) {
+                    codecInfo.payloadVideoDetermined = true;
+                } else {
+                    // 未知视频编码
+                    codecInfo.payloadVideo = -1;
+                    return new byte[0];
+                }
+            }
+
+            switch (codecInfo.payloadVideo) {
+                case JttConstants.PAYLOAD_TYPE_H264:
+                    return processH264Nalu(channelId, naluData, params, timestamp, codecInfo);
+                case JttConstants.PAYLOAD_TYPE_H265:
+                    return processH265Nalu(channelId, naluData, params, timestamp, codecInfo);
+
+                default:
+                    return new byte[0];
+            }
+        }
+    }
+
+    public byte[] processAudioNalu(String channelId, byte[] audioData,
+            Jtt1078PacketParams params, long timestamp) {
+        if (audioData == null || audioData.length < 1) {
+            return new byte[0];
+        }
+
+        // 预处理:检测并移除海思音频头 (00015200 或 0001??00,其中 ?? 为后续音频长度的一半)
+        byte[] audioBytes = AudioDecoder.stripHisiliconAudioHeader(audioData);
+        // 获取或创建该channel的编解码器信息
+        CodecInfo codecInfo = channelCodecInfo.computeIfAbsent(channelId, k -> new CodecInfo());
+
+        // long tStart = System.nanoTime();
+        byte[] aacBytes = new byte[0];
+        synchronized (codecInfo) {
+            // 首次确定负载类型
+            if (!codecInfo.payloadAudioDetermined) {
+                codecInfo.payloadAudio = params.payloadType;
+                codecInfo.payloadAudioDetermined = true;
+                // 若可从 params 推断采样率/声道,这里设置;否则使用默认值
+                // 如有具体采样率/声道信息,可以在此处赋值
+                codecInfo.pcmAccum = new short[1024 * codecInfo.aacChannels];
+            }
+
+            // 初始化AAC编码器
+            if (codecInfo.aacHandle == 0) {
+                try {
+                    codecInfo.aacHandle = AacEncoderNative.initEncoder(
+                            codecInfo.aacSampleRate,
+                            codecInfo.aacChannels,
+                            codecInfo.aacBitrate,
+                            2, // AAC-LC
+                            codecInfo.aacUseAdts);
+                } catch (Throwable t) {
+                    logger.error("初始化AAC编码器失败:{}", t.getMessage(), t);
+                    codecInfo.aacHandle = 0;
+                    return new byte[0];
+                }
+            }
+
+            // 解码到PCM(short)
+            short[] pcm = AudioDecoder.decodePayloadToPcm(codecInfo.payloadAudio, audioBytes);
+            if (pcm == null) {
+                logger.warn("暂不支持的音频负载类型: {}", JttConstants.getPayloadTypeName(codecInfo.payloadAudio));
+                return new byte[0];
+            }
+
+            if (pcm == null || pcm.length == 0) {
+                return new byte[0];
+            }
+            // 累积到 1024*channels 作为一帧编码
+            AacEncodeResult aacResult = AudioDecoder.accumulateAndEncodeToAacReturnOneFrame(
+                    pcm,
+                    codecInfo.pcmAccum,
+                    codecInfo.pcmAccumPos,
+                    codecInfo.aacChannels,
+                    codecInfo.aacHandle);
+            codecInfo.pcmAccumPos = aacResult.newAccumPos;
+            aacBytes = aacResult.aacFrame;
+        }
+        // long tEnd = System.nanoTime();
+        // long tookUsTotal = (tEnd - tStart) / 1_000;
+        // System.out.println("音频处理总耗时: " + tookUsTotal + " 微秒, 原始输入字节: " +
+        // audioData.length + ", 去头后字节: " + audioBytes.length + ", AAC输出字节: " +
+        // (aacBytes == null ? 0 : aacBytes.length));
+
+        if (aacBytes == null || aacBytes.length == 0) {
+            return aacBytes;
+        }
+
+        return createAudioTag(JttConstants.AAC_SEQUENCE_DATA, aacBytes, timestamp);
+    }
+
+    /**
+     * 创建视频标签
+     * 
+     * @param frameType       帧类型和编解码器标识
+     * @param avcPacketType   AVC包类型
+     * @param compositionTime 时间偏移
+     * @param data            数据
+     * @param timestamp       时间戳
+     * @return FLV视频标签数据
+     */
+    public byte[] createVideoTag(byte frameType, byte avcPacketType, int compositionTime, byte[] data,
+            long timestamp) {
+        ByteArrayOutputStream tag = new ByteArrayOutputStream();
+
+        try {
+            // Tag类型 (视频=0x09)
+            tag.write(0x09);
+
+            // 数据长度 (Data + 5字节AVC信息)
+            int dataLength = (data != null ? data.length : 0) + 5;
+            if (dataLength > 0xFFFFFF) {
+                // 超出FLV 24位长度范围,记录错误并返回空(避免写入非法Tag)
+                logger.error("FLV Video Tag data too large: {}", dataLength);
+                return new byte[0];
+            }
+            CommonUtils.writeUi24(tag, dataLength);
+
+            // 时间戳 (毫秒)
+            CommonUtils.writeTimestamp(tag, (int) timestamp);
+
+            // StreamID (总是0)
+            tag.write(new byte[] { 0x00, 0x00, 0x00 });
+
+            // 视频数据
+            tag.write(frameType); // FrameType和CodecID
+            tag.write(avcPacketType); // AVCPacketType
+
+            // CompositionTime (时间偏移,可能为负)
+            CommonUtils.writeSi24(tag, compositionTime);
+
+            // NALU数据(payload)
+            if (data != null && data.length > 0) {
+                tag.write(data);
+            }
+
+            // PreviousTagSize
+            int tagSize = tag.size();
+            byte[] previousTagSize = ByteBuffer.allocate(4).putInt(tagSize).array();
+
+            // 合并标签和PreviousTagSize
+            ByteArrayOutputStream result = new ByteArrayOutputStream();
+            result.write(tag.toByteArray());
+            result.write(previousTagSize);
+
+            return result.toByteArray();
+        } catch (IOException e) {
+            logger.error("创建视频标签时发生错误", e);
+
+            return new byte[0];
+        }
+    }
+
+    /**
+     * 创建AVC序列头(包含SPS和PPS)
+     * 
+     * @param sps       SPS数据
+     * @param pps       PPS数据
+     * @param timestamp 时间戳
+     * @return FLV视频标签数据
+     */
+    private byte[] createAVCSequenceHeader(CodecInfo codecInfo, byte[] sps, byte[] pps, long timestamp) {
+
+        try {
+            // AVCDecoderConfigurationRecord结构
+            ByteArrayOutputStream avcConfig = new ByteArrayOutputStream();
+
+            // 版本号
+            avcConfig.write(0x01);
+            // AVCProfileIndication
+            avcConfig.write(sps[1]);
+            // ProfileCompatibility
+            avcConfig.write(sps[2]);
+            // AVCLevelIndication
+            avcConfig.write(sps[3]);
+            // Reserved + LengthSizeMinusOne (默认为3表示长度字段为4字节)
+            avcConfig.write(0xFF);
+
+            // Reserved + NumOfSequenceParameterSets (SPS个数,通常为1)
+            avcConfig.write(0xE1);
+            // SPS长度
+            byte[] spsLength = ByteBuffer.allocate(2).putShort((short) sps.length).array();
+            avcConfig.write(spsLength);
+            // SPS数据
+            avcConfig.write(sps);
+
+            // NumOfPictureParameterSets (PPS个数,通常为1)
+            avcConfig.write(0x01);
+            // PPS长度
+            byte[] ppsLength = ByteBuffer.allocate(2).putShort((short) pps.length).array();
+            avcConfig.write(ppsLength);
+            // PPS数据
+            avcConfig.write(pps);
+
+            // 创建视频标签
+            byte[] videoTag = createVideoTag((byte) 0x17, (byte) 0x00, 0, avcConfig.toByteArray(), timestamp);
+
+            // 返回FLV头和序列头数据
+            ByteArrayOutputStream headerAVC = new ByteArrayOutputStream();
+            headerAVC.write(createFlvHeader());
+            headerAVC.write(videoTag);
+
+            // 在生成时直接缓存到CodecInfo
+            codecInfo.flvAndSequenceHeaderTag = headerAVC.toByteArray();
+            codecInfo.flvAndSequenceHeaderGenerated = true;
+            return codecInfo.flvAndSequenceHeaderTag;
+        } catch (IOException e) {
+
+            return new byte[0];
+        }
+    }
+
+    /**
+     * 处理H.264 NALU单元
+     */
+    private byte[] processH264Nalu(String channelId, byte[] naluData,
+            Jtt1078PacketParams params,
+            long timestamp, CodecInfo codecInfo) {
+        // 获取NALU类型
+        byte naluType = (byte) (naluData[0] & 0x1F);
+
+        try {
+            if ((naluType == NALU_TYPE_H264_SPS) || (naluType == NALU_TYPE_H264_PPS)) {
+
+                // 处理SPS
+                if (naluType == NALU_TYPE_H264_SPS) {
+                    codecInfo.spsData = naluData;
+                }
+                // 处理PPS
+                else if (naluType == NALU_TYPE_H264_PPS) {
+                    codecInfo.ppsData = naluData;
+                }
+
+                // 当同时获取到SPS和PPS时,创建AVC序列头
+                if (codecInfo.spsData != null && codecInfo.ppsData != null
+                        && !codecInfo.flvAndSequenceHeaderGenerated) {
+                    byte[] sequenceHeader = createAVCSequenceHeader(codecInfo, codecInfo.spsData, codecInfo.ppsData,
+                            timestamp);
+                    return sequenceHeader;
+                }
+                return new byte[0];
+            }
+
+            // 处理视频帧
+            if (naluType == NALU_TYPE_H264_IDR || naluType == 1 || naluType == 2) {
+                byte frameType = (naluType == NALU_TYPE_H264_IDR) ? (byte) 0x17 : (byte) 0x27;
+                byte[] payload = CommonUtils.toLengthPrefixedPayloadFromAnnexB(naluData);
+                byte[] videoTag = createVideoTag(frameType, (byte) 0x01, 0, payload, timestamp);
+                // 缓存最近的I帧
+                if (naluType == NALU_TYPE_H264_IDR) {
+                    codecInfo.recentIFrame = videoTag;
+                }
+                return videoTag;
+            }
+        } catch (Exception e) {
+            logger.error("处理H.264 NALU时出错", e);
+            e.printStackTrace();
+        }
+
+        return new byte[0];
+    }
+
+    /**
+     * 处理H.265 NALU单元
+     */
+    private byte[] processH265Nalu(String channelId, byte[] naluData,
+            Jtt1078PacketParams params,
+            long timestamp, CodecInfo codecInfo) {
+        // 获取NALU类型
+        byte naluType = (byte) ((naluData[0] >> 1) & 0x3F);
+
+        try {
+            if ((naluType == NALU_TYPE_H265_VPS) || (naluType == NALU_TYPE_H265_SPS)
+                    || (naluType == NALU_TYPE_H265_PPS)) {
+
+                // 处理VPS
+                if (naluType == NALU_TYPE_H265_VPS) {
+                    codecInfo.vpsData = naluData;
+                } else if (naluType == NALU_TYPE_H265_SPS) {
+                    codecInfo.spsData265 = naluData;
+                } else if (naluType == NALU_TYPE_H265_PPS) {
+                    codecInfo.ppsData265 = naluData;
+                }
+                // 当同时获取到VPS、SPS和PPS时,创建HEVC序列头
+                if (codecInfo.vpsData != null && codecInfo.spsData265 != null && codecInfo.ppsData265 != null
+                        && !codecInfo.flvAndSequenceHeaderGenerated) {
+                    byte[] sequenceHeader = createHEVCSequenceHeader(codecInfo, codecInfo.vpsData, codecInfo.spsData265,
+                            codecInfo.ppsData265, timestamp);
+                    return sequenceHeader;
+                }
+
+                return new byte[0];
+
+            }
+
+            // 处理视频帧
+            if (naluType == NALU_TYPE_H265_IDR || naluType == NALU_TYPE_H265_IDR_N_LP ||
+                    naluType == 1 || naluType == 20) { // Slice segments
+                byte frameType = (naluType == NALU_TYPE_H265_IDR || naluType == NALU_TYPE_H265_IDR_N_LP)
+                        ? (byte) 0x1C
+                        : (byte) 0x2C; // 0x1C for H.265 keyframe, 0x2C for inter frame
+                byte[] payload = CommonUtils.toLengthPrefixedPayloadFromAnnexB(naluData);
+                return createVideoTag(frameType, (byte) 0x01, 0, payload, timestamp);
+            }
+        } catch (Exception e) {
+            logger.error("处理H.265 NALU时出错", e);
+            e.printStackTrace();
+        }
+
+        return new byte[0];
+    }
+
+    /**
+     * 创建HEVC序列头(包含VPS、SPS和PPS)
+     * 
+     * @param vps       VPS数据
+     * @param sps       SPS数据
+     * @param pps       PPS数据
+     * @param timestamp 时间戳
+     * @return FLV视频标签数据
+     */
+    private byte[] createHEVCSequenceHeader(CodecInfo codecInfo, byte[] vps, byte[] sps, byte[] pps, long timestamp) {
+
+        try {
+            // HEVCDecoderConfigurationRecord结构
+            ByteArrayOutputStream hevcConfig = new ByteArrayOutputStream();
+
+            // configurationVersion (1 byte)
+            hevcConfig.write(0x01);
+
+            // general_profile_space (2 bits), general_tier_flag (1 bit),
+            // general_profile_idc (5 bits)
+            hevcConfig.write(0x00);
+
+            // general_profile_compatibility_flags (4 bytes)
+            hevcConfig.write(new byte[] { 0x00, 0x00, 0x00, 0x00 });
+
+            // general_constraint_indicator_flags (6 bytes)
+            hevcConfig.write(new byte[] { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 });
+
+            // general_level_idc (1 byte)
+            hevcConfig.write(0x00);
+
+            // reserved (4 bits), min_spatial_segmentation_idc (12 bits)
+            hevcConfig.write(new byte[] { (byte) 0xF0, 0x00 });
+
+            // reserved (6 bits), parallelismType (2 bits)
+            hevcConfig.write((byte) 0xFC);
+
+            // reserved (6 bits), chromaFormat (2 bits)
+            hevcConfig.write((byte) 0xFC);
+
+            // bitDepthLumaMinus8 (3 bits), reserved (5 bits)
+            hevcConfig.write((byte) 0xF8);
+
+            // bitDepthChromaMinus8 (3 bits), reserved (5 bits)
+            hevcConfig.write((byte) 0xF8);
+
+            // avgFrameRate (2 bytes)
+            hevcConfig.write(new byte[] { 0x00, 0x00 });
+
+            // constantFrameRate (2 bits), numTemporalLayers (3 bits),
+            // temporalIdNested (1 bit), lengthSizeMinusOne (2 bits)
+            hevcConfig.write((byte) 0x03); // lengthSizeMinusOne = 3 (4 bytes NALU length)
+
+            // numOfArrays (1 byte) - VPS, SPS, PPS
+            hevcConfig.write(0x03);
+
+            // VPS记录
+            // array_completeness (1 bit), reserved (1 bit), NAL_unit_type (6 bits)
+            hevcConfig.write(0x40 | ((vps[0] >> 1) & 0x3F)); // VPS NAL type
+            // numNalus (2 bytes)
+            hevcConfig.write(new byte[] { 0x00, 0x01 });
+            // VPS长度
+            byte[] vpsLength = ByteBuffer.allocate(2).putShort((short) vps.length).array();
+            hevcConfig.write(vpsLength);
+            // VPS数据
+            hevcConfig.write(vps);
+
+            // SPS记录
+            // array_completeness (1 bit), reserved (1 bit), NAL_unit_type (6 bits)
+            hevcConfig.write(0x40 | ((sps[0] >> 1) & 0x3F)); // SPS NAL type
+            // numNalus (2 bytes)
+            hevcConfig.write(new byte[] { 0x00, 0x01 });
+            // SPS长度
+            byte[] spsLength = ByteBuffer.allocate(2).putShort((short) sps.length).array();
+            hevcConfig.write(spsLength);
+            // SPS数据
+            hevcConfig.write(sps);
+
+            // PPS记录
+            // array_completeness (1 bit), reserved (1 bit), NAL_unit_type (6 bits)
+            hevcConfig.write(0x40 | ((pps[0] >> 1) & 0x3F)); // PPS NAL type
+            // numNalus (2 bytes)
+            hevcConfig.write(new byte[] { 0x00, 0x01 });
+            // PPS长度
+            byte[] ppsLength = ByteBuffer.allocate(2).putShort((short) pps.length).array();
+            hevcConfig.write(ppsLength);
+            // PPS数据
+            hevcConfig.write(pps);
+
+            // 创建视频标签
+            byte[] videoTag = createVideoTag((byte) 0x1C, (byte) 0x00, 0, hevcConfig.toByteArray(), timestamp);
+
+            // 返回FLV头和序列头数据
+            ByteArrayOutputStream headerHEVC = new ByteArrayOutputStream();
+            headerHEVC.write(createFlvHeader());
+            headerHEVC.write(videoTag);
+
+            // 在生成时直接缓存到CodecInfo
+            codecInfo.flvAndSequenceHeaderTag = headerHEVC.toByteArray();
+            codecInfo.flvAndSequenceHeaderGenerated = true;
+            return codecInfo.flvAndSequenceHeaderTag;
+
+        } catch (IOException e) {
+            return new byte[0];
+        }
+    }
+
+    // 获取当前已缓存的FLV头和序列头数据
+    public byte[] getFlvAndsSequenceHeader(String channelId) {
+        CodecInfo codecInfo = channelCodecInfo.get(channelId);
+        if (codecInfo == null || !codecInfo.flvAndSequenceHeaderGenerated) {
+            return new byte[0];
+        }
+        synchronized (codecInfo) {
+            return (codecInfo.flvAndSequenceHeaderTag != null) ? codecInfo.flvAndSequenceHeaderTag : new byte[0];
+        }
+    }
+
+    /**
+     * 清除指定channel的编解码器信息
+     * 
+     * @param channelId 通道ID
+     */
+    public void clearChannel(String channelId) {
+        CodecInfo info = channelCodecInfo.remove(channelId);
+        if (info != null) {
+            synchronized (info) {
+                // 关闭原生AAC编码器
+                if (info.aacHandle != 0) {
+                    try {
+                        AacEncoderNative.close(info.aacHandle);
+                    } catch (Throwable t) {
+                        logger.warn("关闭AAC编码器失败: {}", t.getMessage());
+                    }
+                    info.aacHandle = 0;
+                }
+                // 重置PCM累积缓冲
+                info.pcmAccumPos = 0;
+                info.pcmAccum = null;
+            }
+        }
+    }
+
+    /**
+     * 创建音频(AAC)标签
+     * 
+     * @param aacPacketType AAC包类型 (0: sequence header, 1: raw data)
+     * @param data          音频数据
+     * @param timestamp     时间戳
+     * @return FLV音频标签数据
+     */
+    public byte[] createAudioTag(byte aacPacketType, byte[] data, long timestamp) {
+        ByteArrayOutputStream tag = new ByteArrayOutputStream();
+
+        try {
+            // Tag类型 (音频=0x08)
+            tag.write(0x08);
+
+            // 数据长度 (Data + 2字节音频信息)
+            int dataLength = (data != null ? data.length : 0) + 2;
+            if (dataLength > 0xFFFFFF) {
+                logger.error("FLV Audio Tag data too large: {}", dataLength);
+                return new byte[0];
+            }
+            CommonUtils.writeUi24(tag, dataLength);
+
+            // 时间戳 (毫秒)
+            CommonUtils.writeTimestamp(tag, (int) timestamp);
+
+            // StreamID (总是0)
+            tag.write(new byte[] { 0x00, 0x00, 0x00 });
+
+            // 音频数据
+            // SoundFormat(4bits) + SoundRate(2bits) + SoundSize(1bit) + SoundType(1bit)
+            // AAC = 10, 44kHz = 3, 16-bit = 1, Stereo = 1
+            tag.write(0xAF);
+            tag.write(aacPacketType); // AACPacketType (0: sequence header, 1: raw data)
+
+            // 音频数据
+            tag.write(data);
+
+            // PreviousTagSize
+            int tagSize = tag.size();
+            byte[] previousTagSize = ByteBuffer.allocate(4).putInt(tagSize).array();
+
+            // 合并标签和PreviousTagSize
+            ByteArrayOutputStream result = new ByteArrayOutputStream();
+            result.write(tag.toByteArray());
+            result.write(previousTagSize);
+
+            return result.toByteArray();
+        } catch (IOException e) {
+            logger.error("创建音频标签时发生错误", e);
+            return new byte[0];
+        }
+    }
+
+    // 创建AAC序列头(AudioSpecificConfig),并封装为可直接写入FLV的音频Tag(AACPacketType=0)
+    public byte[] createAacSequenceHeader(int sampleRate, int channels, long timestamp) {
+        if (channels <= 0)
+            channels = 1;
+        byte[] asc = AudioUtils.buildAudioSpecificConfig(sampleRate, channels);
+        return createAudioTag((byte) 0, asc, timestamp);
+    }
+
+    /**
+     * 按channel获取或首次生成AAC序列头Tag
+     * 
+     * 该方法会根据通道ID和时间戳,生成或返回AAC序列头标签。
+     * 如果该通道之前未生成过序列头,会根据通道的AAC采样率和声道数创建新的序列头。
+     * 注意:若通道已生成过序列头,将返回空字节数组,避免重复广播。
+     * 
+     * @param channelId 通道ID
+     * @param timestamp 时间戳
+     * @return AAC序列头标签数据
+     */
+    public byte[] getOrCreateAacSequenceHeader(String channelId, long timestamp) {
+        CodecInfo codecInfo = channelCodecInfo.computeIfAbsent(channelId, k -> new CodecInfo());
+        synchronized (codecInfo) {
+            if (codecInfo.aacSequenceHeaderGenerated && codecInfo.aacSequenceHeaderTag != null
+                    && codecInfo.aacSequenceHeaderTag.length > 0) {
+                return new byte[0]; // 已生成过,不重复返回
+            }
+            byte[] header = createAacSequenceHeader(codecInfo.aacSampleRate, codecInfo.aacChannels, timestamp);
+            codecInfo.aacSequenceHeaderTag = header;
+            codecInfo.aacSequenceHeaderGenerated = true;
+            return header;
+        }
+    }
+
+    /**
+     * 获取已生成的AAC序列头Tag
+     * 
+     * @param channelId 通道ID
+     * @return AAC序列头标签数据
+     */
+    public byte[] getAacSequenceHeader(String channelId) {
+        CodecInfo codecInfo = channelCodecInfo.getOrDefault(channelId, new CodecInfo());
+        synchronized (codecInfo) {
+            return codecInfo.aacSequenceHeaderTag != null ? codecInfo.aacSequenceHeaderTag : new byte[0];
+        }
+    }
+
+    /**
+     * 获取通道最近的I帧
+     * 
+     * @param channelId 通道ID
+     * @return 最近的I帧数据
+     */
+    public byte[] getRecentIFrame(String channelId) {
+        CodecInfo codecInfo = channelCodecInfo.getOrDefault(channelId, new CodecInfo());
+        synchronized (codecInfo) {
+            return codecInfo.recentIFrame != null ? codecInfo.recentIFrame : new byte[0];
+        }
+    }
+
+    /**
+     * 获取通道是否已生成AAC序列头
+     * 
+     * @param channelId 通道ID
+     * @return 如果已生成AAC序列头则返回true,否则返回false
+     */
+    public boolean isAacSequenceHeaderGenerated(String channelId) {
+        CodecInfo codecInfo = channelCodecInfo.getOrDefault(channelId, new CodecInfo());
+        synchronized (codecInfo) {
+            return codecInfo.aacSequenceHeaderGenerated;
+        }
+    }
+
+}

+ 152 - 0
src/main/java/com/jttserver/codec/Jtt1078MessageDecoder.java

@@ -0,0 +1,152 @@
+package com.jttserver.codec;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.handler.codec.ByteToMessageDecoder;
+
+import java.util.List;
+
+/**
+ * JTT1078协议消息解码器
+ * 处理TCP粘包和拆包问题
+ */
+
+public class Jtt1078MessageDecoder extends ByteToMessageDecoder {
+
+    // JTT1078数据包标识 0x30316364
+    private static final int PACKET_HEADER = 0x30316364;
+    private static final int MIN_DATA_LENGTH = 19; //最小为透传数据18+1个字节
+    private static final int MIN_HEADER_LENGTH = 18;
+    private static final int MAX_HEADER_LENGTH = 30;
+
+    @Override
+    protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
+        // 循环处理,直到没有完整数据包可读(至少需要最小头部长度+NALU数据长度)
+        while (in.isReadable(MIN_DATA_LENGTH)) {
+
+            // 查找数据包头部
+            int headerIndex = findHeader(in);
+            if (headerIndex == -1) {
+                // 没有找到头部,等待更多数据
+                return;
+            }
+
+            // 跳过无效数据到头部位置
+            if (headerIndex > 0) {
+                // System.out.println("Jtt1078MessageDecoder: 跳过了 " + headerIndex + " 个无效字节");
+                in.skipBytes(headerIndex);
+            }
+
+            // 确保有足够的数据进行头部解析
+            if (in.readableBytes() < MIN_HEADER_LENGTH) {
+                return;
+            }
+
+            // 读取头部标识
+            int header = in.readInt();
+            if (header != PACKET_HEADER) {
+                throw new IllegalStateException("Header mismatch after sync");
+            }
+
+            // 确保有足够的数据读取到第15字节(已经读取了4字节header)
+            if (in.readableBytes() < 12) {
+                in.resetReaderIndex();
+                return;
+            }
+
+            // 跳过前11字节到达第15字节 (已读取4字节header,还需跳过11字节到达第15字节)
+            in.skipBytes(11);
+
+            // 读取第15字节并获取前4位
+            byte fifteenthByte = in.readByte();
+            int first4Bits = (fifteenthByte & 0xF0) >> 4;
+
+            // 根据前4位确定包头长度
+            int headerLength;
+            switch (first4Bits) {
+                case 0x0: // 0000
+                case 0x1: // 0001
+                case 0x2: // 0010
+                    headerLength = MAX_HEADER_LENGTH;
+                    break;
+                case 0x3: // 0011
+                    headerLength = 26;
+                    break;
+                case 0x4: // 0100
+                    headerLength = 18;
+                    break;
+                default:
+                    // 未知类型, 报错
+                    throw new IllegalStateException("Nalu type unknow!");
+            }
+
+            // 回到包头开始位置
+            in.readerIndex(in.readerIndex() - 16); // 回退16字节
+
+            // 确保有足够的数据读取整个包头
+            if (in.readableBytes() < headerLength) {
+                return;
+            }
+
+            // 跳到包头最后2个字节的位置读取数据长度 (使用大端序)
+            in.skipBytes(headerLength - 2);
+            int dataLength = in.readUnsignedShort(); // 使用大端序读取
+
+            // 计算整个包长度
+            int totalLength = headerLength + dataLength;
+
+            // 回到包开始位置 (头部标识位置)
+            in.readerIndex(in.readerIndex() - headerLength); // 回退到头部开始位置
+
+            // 检查是否有足够的数据构成完整的消息
+            if (in.readableBytes() < totalLength) {
+                return;
+            }
+
+            // 读取完整数据包(包括包头和数据)
+            byte[] packet = new byte[totalLength];
+            in.readBytes(packet);
+
+            // 将解码后的完整数据包添加到输出列表
+            out.add(packet);
+        }
+    }
+
+    /**
+     * 在缓冲区中查找JTT1078数据包头部
+     * 
+     * @param buffer 输入缓冲区
+     * @return 头部位置,未找到返回-1
+     */
+    private int findHeader(ByteBuf buffer) {
+        // 创建搜索用的头部字节数组
+        byte[] headerBytes = new byte[] {
+                (byte) ((PACKET_HEADER >> 24) & 0xFF),
+                (byte) ((PACKET_HEADER >> 16) & 0xFF),
+                (byte) ((PACKET_HEADER >> 8) & 0xFF),
+                (byte) (PACKET_HEADER & 0xFF)
+        };
+
+        // 保存当前读索引
+        int originalReaderIndex = buffer.readerIndex();
+
+        // 在可读范围内搜索头部
+        for (int i = 0; i <= buffer.readableBytes() - 4; i++) {
+            boolean found = true;
+            for (int j = 0; j < 4; j++) {
+                byte b = buffer.getByte(originalReaderIndex + i + j);
+                if (b != headerBytes[j]) {
+                    found = false;
+                    break;
+                }
+            }
+
+            if (found) {
+                return i;
+            }
+        }
+
+        return -1;
+    }
+
+}

+ 250 - 0
src/main/java/com/jttserver/codec/audio/AudioDecoder.java

@@ -0,0 +1,250 @@
+package com.jttserver.codec.audio;
+
+
+import com.jttserver.codec.nativeaac.AacEncoderNative;
+import com.jttserver.protocol.JttConstants;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * 音频解码与格式转换工具
+ * - 海思音频头移除
+ * - S16LE字节与short数组互转
+ * - G.711 A-law/μ-law 解码
+ * - IMA ADPCM(单声道)解码
+ * - 按负载类型分发解码到PCM
+ */
+public final class AudioDecoder {
+
+    private static final Logger logger = LoggerFactory.getLogger(AudioDecoder.class);
+
+    private AudioDecoder() {}
+
+    /**
+     * 海思音频头移除:00015200 或 0001??00(?? 为后续音频长度/2)
+     */
+    public static byte[] stripHisiliconAudioHeader(byte[] data) {
+        if (data == null || data.length < 4) return data;
+        int b0 = data[0] & 0xFF;
+        int b1 = data[1] & 0xFF;
+        int b2 = data[2] & 0xFF;
+        int b3 = data[3] & 0xFF;
+        // 固定头 00015200
+        if (b0 == 0x00 && b1 == 0x01 && b2 == 0x52 && b3 == 0x00) {
+            byte[] out = new byte[data.length - 4];
+            System.arraycopy(data, 4, out, 0, out.length);
+            return out;
+        }
+        // 可变头 0001??00:?? 等于后续音频长度的一半
+        if (b0 == 0x00 && b1 == 0x01 && b3 == 0x00) {
+            int remaining = data.length - 4;
+            if (remaining >= 0 && (b2 * 2) == remaining) {
+                byte[] out = new byte[remaining];
+                System.arraycopy(data, 4, out, 0, remaining);
+                return out;
+            }
+        }
+        return data;
+    }
+
+    /**
+     * S16LE字节转short[]
+     */
+    public static short[] s16leToShorts(byte[] data) {
+        int n = data.length / 2;
+        short[] out = new short[n];
+        for (int i = 0; i < n; i++) {
+            int lo = data[2 * i] & 0xFF;
+            int hi = data[2 * i + 1] & 0xFF;
+            out[i] = (short) ((hi << 8) | lo);
+        }
+        return out;
+    }
+
+    /**
+     * G.711 A-law 解码到 PCM
+     */
+    public static short[] g711aToPcm(byte[] data) {
+        short[] out = new short[data.length];
+        for (int i = 0; i < data.length; i++) {
+            out[i] = alawToPcm(data[i]);
+        }
+        return out;
+    }
+
+    private static short alawToPcm(byte a) {
+        a ^= 0x55;
+        int t = (a & 0x0F) << 4;
+        int seg = (a & 0x70) >> 4;
+        switch (seg) {
+            case 0: t += 8; break;
+            case 1: t += 0x108; break;
+            default: t += 0x108; t <<= (seg - 1); break;
+        }
+        return (short) ((a & 0x80) != 0 ? t : -t);
+    }
+
+    /**
+     * G.711 μ-law 解码到 PCM
+     */
+    public static short[] g711uToPcm(byte[] data) {
+        short[] out = new short[data.length];
+        for (int i = 0; i < data.length; i++) {
+            out[i] = ulawToPcm(data[i]);
+        }
+        return out;
+    }
+
+    private static short ulawToPcm(byte u) {
+        u = (byte) ~u;
+        int t = ((u & 0x0F) << 3) + 0x84;
+        t <<= ((u & 0x70) >> 4);
+        return (short) (((u & 0x80) != 0) ? (0x84 - t) : (t - 0x84));
+    }
+
+    /**
+     * 简化版 IMA ADPCM 单声道解码(若数据块包含预测器与index头,自动使用;否则默认0/0)
+     */
+    public static short[] decodeImaAdpcmMono(byte[] adpcm) {
+        int predictor = 0;
+        int index = 0;
+        int pos = 0;
+        if (adpcm.length >= 4) {
+            predictor = (short) ((adpcm[1] << 8) | (adpcm[0] & 0xFF));
+            index = adpcm[2] & 0xFF;
+            pos = 4;
+        }
+        int nibbles = (adpcm.length - pos) * 2;
+        short[] out = new short[nibbles];
+        int outPos = 0;
+        for (int i = pos; i < adpcm.length; i++) {
+            int b = adpcm[i] & 0xFF;
+            out[outPos++] = imaDecodeNibble(b & 0x0F, predictor, index);
+            predictor = lastPredictor;
+            index = lastIndex;
+            out[outPos++] = imaDecodeNibble((b >> 4) & 0x0F, predictor, index);
+            predictor = lastPredictor;
+            index = lastIndex;
+        }
+        return out;
+    }
+
+    // IMA ADPCM步进与索引表
+    private static final int[] IMA_INDEX_TABLE = { -1,-1,-1,-1, 2,4,6,8, -1,-1,-1,-1, 2,4,6,8 };
+    private static final int[] IMA_STEP_TABLE = {
+        7,8,9,10,11,12,13,14,16,17,19,21,23,25,28,31,
+        34,37,41,45,50,55,60,66,73,80,88,97,107,118,130,143,
+        157,173,190,209,230,253,279,307,337,371,408,449,494,544,598,658,
+        724,796,876,963,1060,1166,1282,1411,1552,1707,1878,2066,2272,2499,2749,3024,
+        3327,3660,4026,4428,4871,5358,5894,6484,7132,7845,8630,9493,10442,11487,12635,13899,
+        15289,16818,18500,20350,22385,24623,27086,29794,32767 };
+    private static int lastPredictor;
+    private static int lastIndex;
+    private static short imaDecodeNibble(int code, int predictor, int index) {
+        int step = IMA_STEP_TABLE[index];
+        int diff = step >> 3;
+        if ((code & 1) != 0) diff += step >> 2;
+        if ((code & 2) != 0) diff += step >> 1;
+        if ((code & 4) != 0) diff += step;
+        if ((code & 8) != 0) diff = -diff;
+        int p = predictor + diff;
+        if (p > 32767) p = 32767;
+        if (p < -32768) p = -32768;
+        int idx = index + IMA_INDEX_TABLE[code];
+        if (idx < 0) idx = 0;
+        if (idx > 88) idx = 88;
+        lastPredictor = p;
+        lastIndex = idx;
+        return (short) p;
+    }
+
+
+    /**
+     * 按负载类型将字节数据解码为 PCM(short[])。
+     * 仅负责字节到PCM的转换,不做累积与编码。
+     * @param payloadType JTT1078音频负载类型常量
+     * @param audioBytes 已经去除海思头的原始音频字节
+     * @return PCM short[],若不支持该类型返回 null
+     */
+    public static short[] decodePayloadToPcm(int payloadType, byte[] audioBytes) {
+        if (audioBytes == null || audioBytes.length == 0) return null;
+        switch (payloadType) {
+            case JttConstants.PAYLOAD_TYPE_ADPCMA:
+                return decodeImaAdpcmMono(audioBytes);
+            case JttConstants.PAYLOAD_TYPE_PCM_AUDIO:
+            case JttConstants.PAYLOAD_TYPE_PCM_VOICE:
+                return s16leToShorts(audioBytes);
+            case JttConstants.PAYLOAD_TYPE_G711A:
+                return g711aToPcm(audioBytes);
+            case JttConstants.PAYLOAD_TYPE_G711U:
+                return g711uToPcm(audioBytes);
+            default:
+                return null;
+        }
+    }
+
+    
+    // 编码返回首个AAC帧,同时返回更新后的累积位置(不破坏现有方法)
+    public static class AacEncodeResult {
+        public final int newAccumPos;
+        public final byte[] aacFrame; // 若本次未凑够一帧则为null
+        public AacEncodeResult(int newAccumPos, byte[] aacFrame) {
+            this.newAccumPos = newAccumPos;
+            this.aacFrame = aacFrame;
+        }
+    }
+
+    /**
+     * 累积PCM至1024*channels并编码为AAC,返回首个AAC帧和更新后的累积位置
+     * @param pcm 待编码的 PCM 数据
+     * @param pcmAccum 累积的 PCM 数据
+     * @param pcmAccumPos 累积数据的起始位置
+     * @param channels 声道数
+     * @param aacHandle AAC 编码器句柄
+     * @return 包含新累积位置和首个AAC帧(若有)的对象
+     */
+    public static AacEncodeResult accumulateAndEncodeToAacReturnOneFrame(short[] pcm,
+                                                                          short[] pcmAccum,
+                                                                          int pcmAccumPos,
+                                                                          int channels,
+                                                                          long aacHandle) {
+        if (pcm == null || pcm.length == 0) {
+            return new AacEncodeResult(pcmAccumPos, null);
+        }
+        if (pcmAccum == null || channels <= 0 || aacHandle == 0) {
+            return new AacEncodeResult(pcmAccumPos, null);
+        }
+        int frameSamples = 1024 * Math.max(1, channels);
+        int srcPos = 0;
+        byte[] firstAac = null;
+        while (srcPos < pcm.length) {
+            int canCopy = Math.min(frameSamples - pcmAccumPos, pcm.length - srcPos);
+            System.arraycopy(pcm, srcPos, pcmAccum, pcmAccumPos, canCopy);
+            pcmAccumPos += canCopy;
+            srcPos += canCopy;
+            if (pcmAccumPos == frameSamples) {
+                
+                // long timestampBegin = System.nanoTime();
+                byte[] aac = null;
+                try {
+                    aac = AacEncoderNative.encodeFrame(aacHandle, pcmAccum);
+                } catch (RuntimeException | Error e) {
+                    logger.error("AAC编码失败:{}", e.getMessage(), e);
+                    aac = null;
+                }
+                // long timestampEnd = System.nanoTime();
+                // System.out.println("AAC编码耗时: " + (timestampEnd - timestampBegin)/1000 + " 微秒, 输出字节: " + (aac == null ? 0 : aac.length));
+
+                pcmAccumPos = 0;
+                if (aac != null && aac.length > 0) {
+                    // 仅保存本次处理中的首个AAC帧用于返回
+                    if (firstAac == null) {
+                        firstAac = aac;
+                    }
+                }
+            }
+        }
+        return new AacEncodeResult(pcmAccumPos, firstAac);
+    }
+}

+ 46 - 0
src/main/java/com/jttserver/codec/audio/AudioUtils.java

@@ -0,0 +1,46 @@
+package com.jttserver.codec.audio;
+
+/**
+ * 音频相关的工具方法。
+ */
+public final class AudioUtils {
+    private AudioUtils() {}
+
+    /**
+     * 计算AAC采样率索引(AudioSpecificConfig用)。如不匹配则选择最接近的。
+     */
+    public static int mapAacSampleRateToIndex(int sampleRate) {
+        int[] rates = new int[] {
+                96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
+                16000, 12000, 11025, 8000, 7350
+        };
+        int bestIdx = -1;
+        int bestDiff = Integer.MAX_VALUE;
+        for (int i = 0; i < rates.length; i++) {
+            int diff = Math.abs(sampleRate - rates[i]);
+            if (diff == 0)
+                return i;
+            if (diff < bestDiff) {
+                bestDiff = diff;
+                bestIdx = i;
+            }
+        }
+        return bestIdx >= 0 ? bestIdx : 4; // 默认44100
+    }
+
+    /**
+     * 构建 AAC 的 AudioSpecificConfig 序列头(不含FLV封装)。
+     * 返回的字节可直接作为 AACPacketType=0 的负载写入 FLV 音频Tag。
+     */
+    public static byte[] buildAudioSpecificConfig(int sampleRate, int channels) {
+        int profile = 2; // AAC-LC
+        if (channels <= 0)
+            channels = 1;
+        int freqIndex = mapAacSampleRateToIndex(sampleRate);
+        // AudioSpecificConfig (ISO/IEC 14496-3)
+        byte[] asc = new byte[2];
+        asc[0] = (byte) ((profile << 3) | (freqIndex >> 1));
+        asc[1] = (byte) (((freqIndex & 1) << 7) | ((channels & 0x0F) << 3));
+        return asc;
+    }
+}

+ 69 - 0
src/main/java/com/jttserver/codec/nativeaac/AacEncoderNative.java

@@ -0,0 +1,69 @@
+package com.jttserver.codec.nativeaac;
+
+import java.nio.file.Path;
+import java.nio.file.Files;
+
+import com.jttserver.config.ConfigManager;
+
+public class AacEncoderNative {
+    static {
+        String os = System.getProperty("os.name").toLowerCase();
+        Path baseDir;
+        Path fdkAacPath;
+        Path jniLibPath;
+
+        if (os.contains("win")) {
+            // Windows平台配置
+            baseDir = ConfigManager.getPath(
+                    "native.windows.x64.baseDir",
+                    "native\\windows\\x64\\Release");
+            fdkAacPath = baseDir.resolve("fdk-aac.dll");
+            jniLibPath = baseDir.resolve("aac_jni.dll");
+        } else if (os.contains("linux")) {
+            // Linux平台配置(统一使用baseDir)
+            baseDir = ConfigManager.getPath(
+                    "native.linux.x64.baseDir",
+                    "/opt/jtt1078server/native/linux");
+            fdkAacPath = baseDir.resolve("libfdk-aac.so");
+            jniLibPath = baseDir.resolve("libaac_jni.so");
+        } else {
+            throw new UnsatisfiedLinkError("Unsupported OS for AAC JNI");
+        }
+
+        // 路径与库文件校验
+        if (!Files.exists(baseDir) || !Files.isDirectory(baseDir)) {
+            throw new UnsatisfiedLinkError("本地库目录不存在或不可用: " + baseDir.toAbsolutePath());
+        }
+        if (!Files.exists(fdkAacPath) || !Files.isRegularFile(fdkAacPath)) {
+            throw new UnsatisfiedLinkError("缺少 AAC 库文件: " + fdkAacPath.toAbsolutePath());
+        }
+        if (!Files.exists(jniLibPath) || !Files.isRegularFile(jniLibPath)) {
+            throw new UnsatisfiedLinkError("缺少 JNI 库文件: " + jniLibPath.toAbsolutePath());
+        }
+        
+        // 加载fdk-aac库
+        System.out.println("Loading fdk-aac library: " + fdkAacPath.toAbsolutePath());
+        System.load(fdkAacPath.toAbsolutePath().toString());
+        
+        // 加载JNI库
+        System.out.println("Loading JNI library: " + jniLibPath.toAbsolutePath());
+        System.load(jniLibPath.toAbsolutePath().toString());
+    }
+
+    private AacEncoderNative() {
+    }
+
+    // 初始化编码器:返回原生句柄
+    public static native long initEncoder(int sampleRate,
+            int channels,
+            int bitrate,
+            int aot /* 2:LC, 5:HE, 29:HEv2 */,
+            boolean useAdts /* true=ADTS输出; false=RAW输出 */);
+
+    // 编码一帧:传入交错的PCM(short),长度必须是 1024*channels
+    public static native byte[] encodeFrame(long handle, short[] pcmInterleaved);
+
+    // 关闭编码器
+    public static native void close(long handle);
+
+}

+ 95 - 0
src/main/java/com/jttserver/config/ConfigManager.java

@@ -0,0 +1,95 @@
+package com.jttserver.config;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+/**
+ * 配置管理器
+ * 用于管理全局配置开关与通用配置读取
+ */
+public class ConfigManager {
+     /**
+     * 是否启用Web服务器和设备管理功能
+     * 默认为false,以提高性能
+     */
+    private static volatile boolean deviceManagementEnabled = false;
+    
+    // 通用配置属性(优先级:System Property > Env > app.properties > 默认值)
+    private static final Properties PROPS = new Properties();
+
+    static {
+         // 尝试从类路径读取 app.properties
+        try (InputStream in = ConfigManager.class.getClassLoader().getResourceAsStream("app.properties")) {
+            if (in != null) {
+                PROPS.load(in);
+            }
+        } catch (IOException e) {
+            // 读取失败时忽略,使用默认值或其他来源
+        }
+
+        // 加载本地覆盖文件(不提交到仓库)
+        try (InputStream in = ConfigManager.class.getClassLoader().getResourceAsStream("app-local.properties")) {
+            if (in != null) {
+                Properties local = new Properties();
+                local.load(in);
+                PROPS.putAll(local);
+            }
+        } catch (IOException ignored) {}
+    }
+
+    /**
+     * 启用设备管理功能
+     */
+    public static void enableDeviceManagement() {
+        deviceManagementEnabled = true;
+    }
+    
+    /**
+     * 禁用设备管理功能
+     */
+    public static void disableDeviceManagement() {
+        deviceManagementEnabled = false;
+    }
+    
+    /**
+     * 检查设备管理功能是否启用
+     * @return true表示启用,false表示禁用
+     */
+    public static boolean isDeviceManagementEnabled() {
+        return deviceManagementEnabled;
+    }
+
+
+     /* 
+      * 获取通用配置属性
+      * @param key 属性键
+      * @param defaultValue 默认值
+      * @return 属性值,如果未找到则返回默认值
+      */
+    public static String get(String key, String defaultValue) {
+        // 优先从System Property中获取
+        String val = System.getProperty(key);
+        if (val != null && !val.isEmpty()) return val;
+
+        // 尝试从环境变量中获取
+        String envKey = key.replace('.', '_').toUpperCase();
+        val = System.getenv(envKey);
+        if (val != null && !val.isEmpty()) return val;
+
+        // 尝试从app.properties中获取
+        val = PROPS.getProperty(key);
+        if (val != null && !val.isEmpty()) return val;
+
+        // 默认值
+        return defaultValue;
+    }
+
+   /*
+    * 获取通用配置属性(路径)
+    */
+    public static java.nio.file.Path getPath(String key, String defaultValue) {
+        String v = get(key, defaultValue);
+        return java.nio.file.Paths.get(v);
+    }
+}

+ 161 - 0
src/main/java/com/jttserver/device/DeviceManager.java

@@ -0,0 +1,161 @@
+package com.jttserver.device;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.concurrent.ConcurrentHashMap;
+
+import com.jttserver.config.ConfigManager;
+
+import java.util.Map;
+
+public class DeviceManager {
+
+    // 存储设备连接信息
+    private static final Map<String, DeviceInfo> deviceInfoMap = new ConcurrentHashMap<>();
+
+    /**
+     * 设备信息类
+     */
+    public static class DeviceInfo {
+        private String channelId;
+        private String remoteAddress;
+        private long connectTime;
+        private String simCardNumber;
+        private byte logicChannelNumber;
+        private long lastActiveTime;
+
+        public DeviceInfo(String channelId, String remoteAddress) {
+            this.channelId = channelId;
+            this.remoteAddress = remoteAddress;
+            this.connectTime = System.currentTimeMillis();
+            this.lastActiveTime = System.currentTimeMillis();
+        }
+
+        // Getters and setters
+        public String getChannelId() {
+            return channelId;
+        }
+
+        public String getRemoteAddress() {
+            return remoteAddress;
+        }
+
+        public long getConnectTime() {
+            return connectTime;
+        }
+
+        public String getSimCardNumber() {
+            return simCardNumber;
+        }
+
+        public void setSimCardNumber(String simCardNumber) {
+            this.simCardNumber = simCardNumber;
+        }
+
+        public byte getLogicChannelNumber() {
+            return logicChannelNumber;
+        }
+
+        public void setLogicChannelNumber(byte logicChannelNumber) {
+            this.logicChannelNumber = logicChannelNumber;
+        }
+
+        public long getLastActiveTime() {
+            return lastActiveTime;
+        }
+
+        public void updateLastActiveTime() {
+            this.lastActiveTime = System.currentTimeMillis();
+        }
+    }
+
+    /**
+     * 注册设备信息
+     * 
+     * @param channelId  通道ID
+     * @param deviceInfo 设备信息
+     */
+    public static void registerDevice(String channelId, DeviceInfo deviceInfo) {
+        // 检查功能开关
+        if (!ConfigManager.isDeviceManagementEnabled()) {
+            return;
+        }
+        deviceInfoMap.put(channelId, deviceInfo);
+    }
+
+    /**
+     * 移除设备信息
+     * 
+     * @param channelId 通道ID
+     */
+    public static void unregisterDevice(String channelId) {
+        // 检查功能开关
+        if (!ConfigManager.isDeviceManagementEnabled()) {
+            return;
+        }
+        deviceInfoMap.remove(channelId);
+    }
+
+    /**
+     * 更新设备活动时间
+     * 
+     * @param channelId 通道ID
+     */
+    public static void updateDeviceActiveTime(String channelId) {
+        // 检查功能开关
+        if (!ConfigManager.isDeviceManagementEnabled()) {
+            return;
+        }
+        DeviceInfo deviceInfo = deviceInfoMap.get(channelId);
+        if (deviceInfo != null) {
+            deviceInfo.updateLastActiveTime();
+        }
+    }
+
+    /**
+     * 更新设备SIM卡号
+     * 
+     * @param channelId     通道ID
+     * @param simCardNumber SIM卡号
+     */
+    public static void updateDeviceSimCardNumber(String channelId, String simCardNumber) {
+        // 检查功能开关
+        if (!ConfigManager.isDeviceManagementEnabled()) {
+            return;
+        }
+        DeviceInfo deviceInfo = deviceInfoMap.get(channelId);
+        if (deviceInfo != null) {
+            deviceInfo.setSimCardNumber(simCardNumber);
+        }
+    }
+
+    /**
+     * 更新设备逻辑通道号
+     * 
+     * @param channelId          通道ID
+     * @param logicChannelNumber 逻辑通道号
+     */
+    public static void updateDeviceLogicChannelNumber(String channelId, byte logicChannelNumber) {
+        // 检查功能开关
+        if (!ConfigManager.isDeviceManagementEnabled()) {
+            return;
+        }
+        DeviceInfo deviceInfo = deviceInfoMap.get(channelId);
+        if (deviceInfo != null) {
+            deviceInfo.setLogicChannelNumber(logicChannelNumber);
+        }
+    }
+
+    /**
+     * 获取当前连接的设备信息
+     * 
+     * @return 设备信息列表
+     */
+    public static Collection<DeviceInfo> getConnectedDevices() {
+        // 检查功能开关
+        if (!ConfigManager.isDeviceManagementEnabled()) {
+            return Collections.emptyList();
+        }
+        return deviceInfoMap.values();
+    }
+}

+ 171 - 0
src/main/java/com/jttserver/protocol/Jtt1078NaluPacket.java

@@ -0,0 +1,171 @@
+package com.jttserver.protocol;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * JTT1078协议NALU包组装器
+ * 用于组装jtt1078包多个分片传输的NALU单元,包含了多个jtt包合并一个(有错包防呆机制)
+ */
+public class Jtt1078NaluPacket {
+
+    // 当前NALU单元的参数列表
+    private List<Jtt1078PacketParams> packetsParams;
+
+    // 所有NALU数据片段
+    private List<byte[]> naluFragments;
+
+    // 上一个包序号
+    private int lastPacketSequenceNumber = -1;
+
+    /**
+     * 构造函数
+     */
+    public Jtt1078NaluPacket() {
+        this.packetsParams = new ArrayList<>();
+        this.naluFragments = new ArrayList<>();
+    }
+
+    /**
+     * 添加一个JTT1078数据包到NALU单元中
+     * 
+     * @param packet JTT1078数据包
+     * @return 如果NALU单元已完成并可以处理则返回true,否则返回false
+     */
+    public boolean addPacket(Jtt1078PacketParser.Jtt1078Packet packet) {
+        // 创建参数对象
+        Jtt1078PacketParams params = Jtt1078PacketParams.fromPacket(packet);
+        // 检查包序号连续性(目前丢包暂时不做处理)
+        if (lastPacketSequenceNumber != -1 &&
+                params.packetSequenceNumber != lastPacketSequenceNumber + 1) {
+            // System.out.println("警告: 检测到丢包,上一个包序号=" + lastPacketSequenceNumber +
+            //         ",当前包序号=" + params.packetSequenceNumber);
+            // 包序号不连续时,丢弃前面的包和当前包
+            clear();
+            return false;
+        }
+
+        lastPacketSequenceNumber = params.packetSequenceNumber;
+
+        // 根据分包处理标记进行不同处理
+        switch (params.subpackageFlag) {
+            case 0: // 原子包
+            case 1: // 分包处理时的第一个包
+                // 如果之前还有包则丢弃之前的包
+                if (!packetsParams.isEmpty()) {
+                    // System.out.println("信息: 发现新的NALU开始,丢弃之前的未完成NALU包");
+                    clear();
+                }
+
+                // 添加当前包
+                packetsParams.add(params);
+                naluFragments.add(packet.naluData);
+
+                // 如果是原子包,直接完成
+                if (params.subpackageFlag == 0) {
+                    return true; // 原子包总是立即完成
+                }
+                break;
+
+            case 2: // 分包处理时的最后一个包
+            case 3: // 分包处理时的中间包
+                // 如果之前没有包,则丢弃当前包
+                if (packetsParams.isEmpty()) {
+                    // System.out.println("警告: 收到中间或最后一个分片包,但没有起始包,丢弃当前包");
+                    return false;
+                }
+
+                // 添加当前包
+                packetsParams.add(params);
+                naluFragments.add(packet.naluData);
+
+                // 如果是最后一个包,返回true
+                if (params.subpackageFlag == 2) {
+                    return true;
+                }
+                break;
+
+            default:
+                // System.out.println("警告: 未知的分包处理标记: " + params.subpackageFlag);
+                return false;
+        }
+
+        return false; // NALU单元尚未完成
+
+    }
+
+    /**
+     * 清空当前NALU单元
+     */
+    public void clear() {
+        packetsParams.clear();
+        naluFragments.clear();
+        lastPacketSequenceNumber = -1;
+    }
+
+    /**
+     * 获取完整的NALU数据
+     * 
+     * @return 完整的NALU数据
+     */
+    public byte[] getCompleteNaluData() {
+        if (naluFragments.isEmpty()) {
+            return new byte[0];
+        }
+
+        // 计算总长度
+        int totalLength = 0;
+        for (byte[] fragment : naluFragments) {
+            totalLength += fragment.length;
+        }
+
+        // 组装数据
+        byte[] completeData = new byte[totalLength];
+        int offset = 0;
+        for (byte[] fragment : naluFragments) {
+            System.arraycopy(fragment, 0, completeData, offset, fragment.length);
+            offset += fragment.length;
+        }
+
+        return completeData;
+    }
+
+    /**
+     * 获取最新的参数信息
+     * 
+     * @return 最新的参数信息
+     */
+    public Jtt1078PacketParams getLatestParams() {
+        if (packetsParams.isEmpty()) {
+            return null;
+        }
+        return packetsParams.get(packetsParams.size() - 1);
+    }
+
+    /**
+     * 检查NALU单元是否为空
+     * 
+     * @return 如果为空返回true,否则返回false
+     */
+    public boolean isEmpty() {
+        return packetsParams.isEmpty();
+    }
+
+    /**
+     * 获取包参数列表
+     * 
+     * @return 包参数列表
+     */
+    public List<Jtt1078PacketParams> getPacketsParams() {
+        return new ArrayList<>(packetsParams);
+    }
+
+    /**
+     * 获取NALU片段列表
+     * 
+     * @return NALU片段列表
+     */
+    public List<byte[]> getNaluFragments() {
+        return new ArrayList<>(naluFragments);
+    }
+}

+ 67 - 0
src/main/java/com/jttserver/protocol/Jtt1078PacketParams.java

@@ -0,0 +1,67 @@
+package com.jttserver.protocol;
+
+/**
+ * JTT1078数据包参数信息
+ */
+public class Jtt1078PacketParams {
+    // 主标识
+    public int mFlag;
+    // 载荷类型
+    public int payloadType;
+    // 包序号
+    public int packetSequenceNumber;
+    // SIM卡号
+    public byte[] simCardNumber;
+    // SIM卡号字符串
+    public String simCardNumberStr;
+    // 逻辑通道号
+    public int logicChannelNumber;
+    // 数据类型
+    public int dataType;
+    // 分包处理标记
+    public int subpackageFlag;
+    // 时间戳
+    public byte[] timestamp;
+    // 上一I帧间隔
+    public int lastIFrameInterval;
+    // 上一帧间隔
+    public int lastFrameInterval;
+    // 数据体长度
+    public int dataBodyLength;
+
+    /**
+     * 从Jtt1078Packet创建参数对象
+     * 
+     * @param packet Jtt1078Packet对象
+     * @return 参数对象
+     */
+    public static Jtt1078PacketParams fromPacket(Jtt1078PacketParser.Jtt1078Packet packet) {
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.mFlag = packet.mFlag;
+        params.payloadType = packet.payloadType;
+        params.packetSequenceNumber = packet.packetSequenceNumber;
+        params.simCardNumber = packet.simCardNumber != null ? packet.simCardNumber.clone() : null;
+        params.logicChannelNumber = packet.logicChannelNumber;
+        params.simCardNumberStr = packet.simCardNumberStr;
+        params.dataType = packet.dataType;
+        params.subpackageFlag = packet.subpackageFlag;
+        params.timestamp = packet.timestamp != null ? packet.timestamp.clone() : null;
+        params.lastIFrameInterval = packet.lastIFrameInterval;
+        params.lastFrameInterval = packet.lastFrameInterval;
+        params.dataBodyLength = packet.dataBodyLength;
+        return params;
+    }
+
+    @Override
+    public String toString() {
+        return "Jtt1078PacketParams{" +
+                "mFlag=" + mFlag +
+                ", payloadType=" + payloadType +
+                ", packetSequenceNumber=" + packetSequenceNumber +
+                ", logicChannelNumber=" + logicChannelNumber +
+                ", dataType=" + dataType +
+                ", subpackageFlag=" + subpackageFlag +
+                ", dataBodyLength=" + dataBodyLength +
+                '}';
+    }
+}

+ 301 - 0
src/main/java/com/jttserver/protocol/Jtt1078PacketParser.java

@@ -0,0 +1,301 @@
+package com.jttserver.protocol;
+
+import io.netty.buffer.ByteBuf;
+
+import com.jttserver.utils.SimCardUtils;
+
+/**
+ * JTT1078协议包解析器
+ * 用于解析符合JTT1078标准的视频流数据包(解析各个字段)
+ */
+public class Jtt1078PacketParser {
+
+    /**
+     * JTT1078协议数据包实体类
+     */
+    public static class Jtt1078Packet {
+        // M边界帧标志
+        public byte mFlag;
+
+        // 负载类型
+        public byte payloadType;
+
+        // 包序号
+        public int packetSequenceNumber;
+
+        // SIM卡号
+        public byte[] simCardNumber;
+
+        // SIM卡号字符串
+        public String simCardNumberStr;
+
+        // 逻辑通道号
+        public byte logicChannelNumber;
+
+        // 数据类型
+        public byte dataType;
+
+        // 分包处理标记
+        public byte subpackageFlag;
+
+        // 时间戳
+        public byte[] timestamp;
+
+        // Last I Frame Interval
+        public int lastIFrameInterval;
+
+        // Last Frame Interval
+        public int lastFrameInterval;
+
+        // 数据体长度
+        public int dataBodyLength;
+
+        // NALU数据体
+        public byte[] naluData;
+
+        /**
+         * 获取分包处理标记的名称
+         * 
+         * @param subpackageFlag 分包处理标记
+         * @return 分包处理标记名称
+         */
+        private String getSubpackageFlagName(int subpackageFlag) {
+            switch (subpackageFlag) {
+                case 0:
+                    return "原子包";
+                case 1:
+                    return "分包处理时的第一个包";
+                case 2:
+                    return "分包处理时的最后一个包";
+                case 3:
+                    return "分包处理时的中间包";
+                default:
+                    return "未知包";
+            }
+        }
+
+        /**
+         * 将时间戳字节数组转换为十六进制字符串
+         * 
+         * @param timestamp 时间戳字节数组
+         * @return 十六进制字符串表示的时间戳
+         */
+        private String getTimestampString(byte[] timestamp) {
+            if (timestamp == null) {
+                return "null";
+            }
+
+            StringBuilder sb = new StringBuilder();
+            sb.append("[");
+            for (int i = 0; i < timestamp.length; i++) {
+                if (i > 0) {
+                    sb.append(", ");
+                }
+                // 将有符号字节转换为无符号整数并格式化为十进制
+                int unsignedByte = timestamp[i] & 0xFF;
+                sb.append(unsignedByte);
+            }
+            sb.append("]");
+            return sb.toString();
+        }
+
+        /**
+         * 将时间戳字节数组转换为毫秒数
+         * 
+         * @param timestamp 时间戳字节数组
+         * @return 毫秒数
+         */
+        private long getTimestampMillis(byte[] timestamp) {
+            if (timestamp == null || timestamp.length != 8) {
+                return 0;
+            }
+
+            // 将8字节时间戳转换为long值(大端序)
+            return ((long) (timestamp[0] & 0xFF) << 56) |
+                    ((long) (timestamp[1] & 0xFF) << 48) |
+                    ((long) (timestamp[2] & 0xFF) << 40) |
+                    ((long) (timestamp[3] & 0xFF) << 32) |
+                    ((long) (timestamp[4] & 0xFF) << 24) |
+                    ((long) (timestamp[5] & 0xFF) << 16) |
+                    ((long) (timestamp[6] & 0xFF) << 8) |
+                    ((long) (timestamp[7] & 0xFF));
+        }
+
+        /**
+         * 获取负载类型名称
+         * 
+         * @param payloadType 负载类型
+         * @return 负载类型名称
+         */
+        private String getPayloadTypeName(byte payloadType) {
+            switch (payloadType) {
+                case 98:
+                    return "H.264";
+                case 99:
+                    return "H.265";
+                case 100:
+                    return "AVS";
+                case 101:
+                    return "SVAC";
+                default:
+                    if (payloadType >= 1 && payloadType <= 28) {
+                        return "Audio";
+                    } else if (payloadType >= 91 && payloadType <= 97) {
+                        return "Reserved/System";
+                    } else {
+                        return "Unknown";
+                    }
+            }
+        }
+
+        /**
+         * 获取数据类型名称
+         * 
+         * @param dataType 数据类型
+         * @return 数据类型名称
+         */
+        private String getDataTypeName(byte dataType) {
+            switch (dataType) {
+                case 0x00:
+                    return "视频 I 帧";
+                case 0x01:
+                    return "视频 P 帧";
+                case 0x02:
+                    return "视频 B 帧";
+                case 0x03:
+                    return "音频帧";
+                case 0x04:
+                    return "透传数据";
+                default:
+                    return "未知";
+            }
+        }
+
+        /**
+         * 打印Jtt1078Packet对象的内容
+         */
+        @Override
+        public String toString() {
+            return "Jtt1078Packet{\n" +
+                    "  mFlag=" + mFlag + "\n" +
+                    "  payloadType=" + payloadType + " (" + getPayloadTypeName(payloadType) + ")\n" +
+                    "  packetSequenceNumber=" + packetSequenceNumber + "\n" +
+                    "  simCardNumber=" + simCardNumberStr + "\n" +
+                    "  logicChannelNumber=" + logicChannelNumber + "\n" +
+                    "  dataType=" + dataType + " (" + getDataTypeName(dataType) + ")\n" +
+                    "  subpackageFlag=" + subpackageFlag + " (" + getSubpackageFlagName(subpackageFlag) + ")\n" +
+                    "  timestamp=" + getTimestampString(timestamp) + " (" + getTimestampMillis(timestamp) + " ms)\n" +
+                    "  lastIFrameInterval=" + lastIFrameInterval + " ms\n" +
+                    "  lastFrameInterval=" + lastFrameInterval + " ms\n" +
+                    "  dataBodyLength=" + dataBodyLength + "\n" +
+                    "  naluData.length=" + (naluData != null ? naluData.length : 0) + "\n" +
+                    "}";
+        }
+
+    }
+
+    /**
+     * 判断是否为视频帧
+     * 
+     * @param dataType 数据类型
+     * @return 是否为视频帧
+     */
+    public static boolean isVideoFrame(byte dataType) {
+        // 0000:视频 I 帧; 0001:视频 P 帧; 0010:视频 B 帧
+        return dataType == 0x00 || dataType == 0x01 || dataType == 0x02;
+    }
+
+    /**
+     * 解析JTT1078协议数据包
+     * 
+     * @param buf 包含完整JTT1078数据包的ByteBuf
+     * @return 解析后的数据包对象
+     */
+    public static Jtt1078Packet parse(ByteBuf buf) {
+        Jtt1078Packet packet = new Jtt1078Packet();
+
+        // 保存readerIndex以便恢复
+        int startIndex = buf.readerIndex();
+
+        try {
+            // 解析M边界帧标志和负载类型
+            buf.readerIndex(startIndex + 5); // 起始字节:5
+            byte fifthByte = buf.readByte();
+
+            // 获取M边界帧标志(最高位)
+            packet.mFlag = (byte) ((fifthByte >> 7) & 0x01);
+
+            // 获取负载类型(低7位)
+            packet.payloadType = (byte) (fifthByte & 0x7F);
+
+            // 解析包序号
+            buf.readerIndex(startIndex + 6); // 起始字节:6
+            packet.packetSequenceNumber = buf.readUnsignedShort();
+
+            // 解析第8-13字节:SIM卡号(BCD[6]格式)
+            buf.readerIndex(startIndex + 8); // 起始字节:8
+            packet.simCardNumber = new byte[6];
+            buf.readBytes(packet.simCardNumber);
+
+            // 立即计算并缓存SIM卡号字符串,避免后续重复计算
+            packet.simCardNumberStr = SimCardUtils.toStandardString(packet.simCardNumber);
+
+            // 解析:逻辑通道号
+            buf.readerIndex(startIndex + 14); // 起始字节:14
+            packet.logicChannelNumber = buf.readByte();
+
+            // 解析:数据类型(高4位)和分包处理标记(低4位)
+            buf.readerIndex(startIndex + 15); // 起始字节:15
+            byte fifteenthByte = buf.readByte();
+            packet.dataType = (byte) ((fifteenthByte >> 4) & 0x0F);
+            packet.subpackageFlag = (byte) (fifteenthByte & 0x0F);
+
+            // 最后偏移位置,因为类型不同,有的字段可能没有
+            short lastOffset = 16;
+
+            // 解析第16-23字节:时间戳(当数据类型为透传数据0100时不存在)
+            if (packet.dataType != 0x04) { // 0100(二进制) = 4(十进制)
+                buf.readerIndex(startIndex + lastOffset); // 起始字节:16
+                packet.timestamp = new byte[8];
+                buf.readBytes(packet.timestamp);
+                lastOffset += 8;
+            } else {
+                packet.timestamp = new byte[0];
+            }
+
+            // 解析Last I Frame Interval和Last Frame Interval(当数据类型为非视频帧时不存在)
+            if (isVideoFrame(packet.dataType)) {
+                buf.readerIndex(startIndex + lastOffset); // 起始字节:24(默认情况)
+                packet.lastIFrameInterval = buf.readUnsignedShort();
+                lastOffset += 2;
+                buf.readerIndex(startIndex + lastOffset); // 起始字节:26(默认情况)
+                packet.lastFrameInterval = buf.readUnsignedShort();
+                lastOffset += 2;
+            } else {
+                // 对于非视频帧,这些字段不存在,设置为0
+                packet.lastIFrameInterval = -1;
+                packet.lastFrameInterval = -1;
+            }
+
+            // 解析数据体长度(WORD,2 bytes)
+            buf.readerIndex(startIndex + lastOffset); // 起始字节:28(默认情况)
+            packet.dataBodyLength = buf.readUnsignedShort();
+            lastOffset += 2;
+
+            // 解析NALU数据体
+            if (packet.dataBodyLength > 0) {
+                buf.readerIndex(startIndex + lastOffset); // 起始字节:30(默认情况)
+                packet.naluData = new byte[packet.dataBodyLength];
+                buf.readBytes(packet.naluData);
+            } else {
+                packet.naluData = new byte[0];
+            }
+
+            return packet;
+
+        } finally {
+            // 恢复readerIndex
+            buf.readerIndex(startIndex);
+        }
+    }
+}

+ 129 - 0
src/main/java/com/jttserver/protocol/JttConstants.java

@@ -0,0 +1,129 @@
+package com.jttserver.protocol;
+
+public class JttConstants {
+
+    /****** AAC数据类型 *******/
+    public static final byte AAC_SEQUENCE_HEADER = 0; // 序列头
+    public static final byte AAC_SEQUENCE_DATA = 1; // 序列数据
+
+    /****** 接收到的数据类型 *******/
+    // 视频I帧
+    public static final int TYPE_VIDEO_I_FRAME = 0x00;
+    // 视频P帧
+    public static final int TYPE_VIDEO_P_FRAME = 0x01;
+    // 视频B帧
+    public static final int TYPE_VIDEO_B_FRAME = 0x02;
+    // 音频数据类型
+    public static final int TYPE_AUDIO = 0x03;
+    // 透传数据类型
+    public static final int TYPE_PASSTHROUGH = 0x04;
+
+    /****** 接收到的负载类型 *******/
+
+    public static final int PAYLOAD_TYPE_G721 = 1; // G.721
+    public static final int PAYLOAD_TYPE_G722 = 2; // G.722
+    public static final int PAYLOAD_TYPE_G723 = 3; // G.723
+    public static final int PAYLOAD_TYPE_G728 = 4; // G.728
+    public static final int PAYLOAD_TYPE_G729 = 5; // G.729
+    public static final int PAYLOAD_TYPE_G711A = 6; // G.711A
+    public static final int PAYLOAD_TYPE_G711U = 7; // G.711U
+    public static final int PAYLOAD_TYPE_G726 = 8; // G.726
+    public static final int PAYLOAD_TYPE_G729A = 9; // G.729A
+    public static final int PAYLOAD_TYPE_DVI4_3 = 10; // DVI4_3
+    public static final int PAYLOAD_TYPE_DVI4_4 = 11; // DVI4_4
+    public static final int PAYLOAD_TYPE_DVI4_8K = 12; // DVI4_8K
+    public static final int PAYLOAD_TYPE_DVI4_16K = 13; // DVI4_16K
+    public static final int PAYLOAD_TYPE_LPC = 14; // LPC
+    public static final int PAYLOAD_TYPE_S16BE_STEREO = 15; // S16BE, STEREO
+    public static final int PAYLOAD_TYPE_S16BE_MONO = 16; // S16BE, MONO
+    public static final int PAYLOAD_TYPE_MPEGAUDIO = 17; // MPEGAUDIO
+    public static final int PAYLOAD_TYPE_LPCM = 18; // LPCM
+    public static final int PAYLOAD_TYPE_AAC = 19; // AAC
+    public static final int PAYLOAD_TYPE_WMA9STD = 20; // WMA9STD
+    public static final int PAYLOAD_TYPE_HEAAC = 21; // HEAAC
+    public static final int PAYLOAD_TYPE_PCM_VOICE = 22; // PCM, VOICE
+    public static final int PAYLOAD_TYPE_PCM_AUDIO = 23; // PCM, AUDIO
+    public static final int PAYLOAD_TYPE_AACLC = 24; // AACLC
+    public static final int PAYLOAD_TYPE_MP3 = 25; // MP3
+    public static final int PAYLOAD_TYPE_ADPCMA = 26; // ADPCMA
+    public static final int PAYLOAD_TYPE_MP4AUDIO = 27; // MP4AUDIO
+    public static final int PAYLOAD_TYPE_AMR = 28; // AMR
+
+    // H.264
+    public static final int PAYLOAD_TYPE_H264 = 98;
+    // H.265
+    public static final int PAYLOAD_TYPE_H265 = 99;
+
+    /**
+     * 根据负载类型获取负载类型名称
+     * 
+     * @param payloadType 负载类型
+     * @return 负载类型名称
+     */
+    public static String getPayloadTypeName(int payloadType) {
+        switch (payloadType) {
+            case PAYLOAD_TYPE_G721:
+                return "G.721";
+            case PAYLOAD_TYPE_G722:
+                return "G.722";
+            case PAYLOAD_TYPE_G723:
+                return "G.723";
+            case PAYLOAD_TYPE_G728:
+                return "G.728";
+            case PAYLOAD_TYPE_G729:
+                return "G.729";
+            case PAYLOAD_TYPE_G711A:
+                return "G.711A";
+            case PAYLOAD_TYPE_G711U:
+                return "G.711U";
+            case PAYLOAD_TYPE_G726:
+                return "G.726";
+            case PAYLOAD_TYPE_G729A:
+                return "G.729A";
+            case PAYLOAD_TYPE_DVI4_3:
+                return "DVI4_3";
+            case PAYLOAD_TYPE_DVI4_4:
+                return "DVI4_4";
+            case PAYLOAD_TYPE_DVI4_8K:
+                return "DVI4_8K";
+            case PAYLOAD_TYPE_DVI4_16K:
+                return "DVI4_16K";
+            case PAYLOAD_TYPE_LPC:
+                return "LPC";
+            case PAYLOAD_TYPE_S16BE_STEREO:
+                return "S16BE, STEREO";
+            case PAYLOAD_TYPE_S16BE_MONO:
+                return "S16BE, MONO";
+            case PAYLOAD_TYPE_MPEGAUDIO:
+                return "MPEGAUDIO";
+            case PAYLOAD_TYPE_LPCM:
+                return "LPCM";
+            case PAYLOAD_TYPE_AAC:
+                return "AAC";
+            case PAYLOAD_TYPE_WMA9STD:
+                return "WMA9STD";
+            case PAYLOAD_TYPE_HEAAC:
+                return "HEAAC";
+            case PAYLOAD_TYPE_PCM_VOICE:
+                return "PCM, VOICE";
+            case PAYLOAD_TYPE_PCM_AUDIO:
+                return "PCM, AUDIO";
+            case PAYLOAD_TYPE_AACLC:
+                return "AACLC";
+            case PAYLOAD_TYPE_MP3:
+                return "MP3";
+            case PAYLOAD_TYPE_ADPCMA:
+                return "ADPCMA";
+            case PAYLOAD_TYPE_MP4AUDIO:
+                return "MP4AUDIO";
+            case PAYLOAD_TYPE_AMR:
+                return "AMR";
+            case PAYLOAD_TYPE_H264:
+                return "H.264";
+            case PAYLOAD_TYPE_H265:
+                return "H.265";
+            default:
+                return "未知负载类型";
+        }
+    }   
+}

+ 268 - 0
src/main/java/com/jttserver/relay/FlvStreamRelay.java

@@ -0,0 +1,268 @@
+package com.jttserver.relay;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.jttserver.codec.FlvPacketizer;
+import com.jttserver.protocol.Jtt1078NaluPacket;
+import com.jttserver.protocol.Jtt1078PacketParams;
+import com.jttserver.relay.workerthreads.BroadcastWorker;
+import com.jttserver.service.publisher.PublishServer;
+import com.jttserver.utils.SimCardUtils;
+
+import io.netty.buffer.Unpooled;
+import io.netty.channel.Channel;
+import io.netty.handler.codec.http.websocketx.BinaryWebSocketFrame;
+
+public class FlvStreamRelay extends StreamRelay {
+
+    // 单个nalu解析结果
+    private static class NaluSegment {
+        final byte[] payload; // 解析出的NALU负载(不含起始码)
+        final int consumedBytes; // 从偏移开始本次解析消耗的字节数(含起始码与负载)
+
+        NaluSegment(byte[] payload, int consumedBytes) {
+            this.payload = payload;
+            this.consumedBytes = consumedBytes;
+        }
+    }
+
+    private static final Logger logger = LoggerFactory.getLogger(FlvStreamRelay.class);
+
+    // 为每个 channelId 缓存已计算的 streamId,避免重复计算
+    private final Map<String, String> channelIdToStreamId = new ConcurrentHashMap<>();
+
+    // 使用单实例 FlvPacketizer,内部以 channelId 维护编解码器信息
+    private final FlvPacketizer packetizer;
+
+    
+    public FlvStreamRelay() {
+        // 当前仅支持 FLV 打包
+        this.packetizer = new FlvPacketizer();
+    }
+
+    /*
+     * 推流视频数据
+     */
+    @Override
+    public void publishVideo(String channelId, byte[] nalu, Jtt1078PacketParams params,
+            long timestampMs) {
+        if (nalu == null || nalu.length == 0) {
+            return;
+        }
+        // 逐段解析并发布单个NALU单元(不含起始码)
+        int offset = 0;
+        int totalLength = nalu.length;
+        while (offset < totalLength) {
+            // 提取从当前偏移开始的下一个NALU单元(不含起始码),并返回本次消耗的字节数
+            NaluSegment segment = getNaluSingle(nalu, offset);
+            if (segment.consumedBytes <= 0) { // 未能有效解析出片段,结束循环
+                break;
+            }
+            // 有效负载才进行发布
+            if (segment.payload != null && segment.payload.length > 0) {
+                publishSingleNalu(channelId, segment.payload, params, timestampMs);
+            }
+            // 增加偏移到下一个片段起始位置
+            offset += segment.consumedBytes;
+        }
+    }
+
+    /*
+     * 推流单个NALU单元
+     */
+    public void publishSingleNalu(String channelId, byte[] nalu, Jtt1078PacketParams params,
+            long timestampMs) {
+        if (nalu == null || nalu.length == 0) {
+            return;
+        }
+
+        // 处理单个NALU数据
+        byte[] tag = packetizer.processVideoNalu(channelId, nalu, params, timestampMs);
+        if (tag != null && tag.length > 0) {
+            // 通过构造时缓存的 publishServer 引用进行广播
+            if (publishServer != null) {
+                broadcastStreamData(channelId, tag, params);
+            } else {
+                logger.warn("publishServer is null, cannot broadcast stream data");
+            }
+        }
+    }
+
+    /*
+     * 推流音频数据
+     */
+    @Override
+    public void publishAudio(String channelId, byte[] audio, Jtt1078PacketParams params,
+            long timestampMs) {
+        if (audio == null || audio.length == 0) {
+            return;
+        }
+        byte[] tag = packetizer.processAudioNalu(channelId, audio, params, timestampMs);
+        if (tag != null && tag.length > 0) {
+            // 通过构造时缓存的 publishServer 引用进行广播
+            if (publishServer != null) {
+                // 如果是channel第一次广播音频,先广播AAC序列头
+                byte[] aacSeqHeader = packetizer.getOrCreateAacSequenceHeader(channelId, timestampMs);
+                if (aacSeqHeader != null && aacSeqHeader.length > 0) {
+                    broadcastStreamData(channelId, aacSeqHeader, params);
+                }
+
+                broadcastStreamData(channelId, tag, params);
+            } else {
+                logger.warn("ws is null, cannot broadcast stream data");
+            }
+        }
+    }
+
+    // 将广播逻辑封装为独立函数,提升可读性与复用性
+    private void broadcastStreamData(String channelId, byte[] tag, Jtt1078PacketParams params) {
+        // 先尝试复用缓存的 streamId
+        String streamId = channelIdToStreamId.get(channelId);
+        if (streamId == null || streamId.isEmpty()) {
+            // 首次或未缓存,计算并建立映射
+            streamId = SimCardUtils.buildStreamId(params.simCardNumberStr, params.logicChannelNumber);
+            if (streamId != null && !streamId.isEmpty()) {
+                logger.info("channelId: {}, streamId: {}", channelId, streamId);
+                channelIdToStreamId.put(channelId, streamId);
+                // 同步建立映射关系
+                publishServer.mapStreamToChannel(streamId, channelId);
+                publishServer.addStreamRelay(streamId, this);
+            }
+        }
+        // 使用 streamId
+        if (streamId != null && !streamId.isEmpty()) {
+            BroadcastWorker.broadcast(publishServer, channelId, streamId, tag);
+        } else {
+            logger.warn("streamId为空,无法广播数据");
+        }
+    }
+
+    // 提供初始化段(FLV Header + 最新序列头),序列头由 FlvPacketizer 内部构建
+    public byte[] getChannelInitVideoSegment(String channelId) {
+        return packetizer.getFlvAndsSequenceHeader(channelId);
+    }
+
+    // 提供初始化段(FLV Header + 最新序列头),序列头由 FlvPacketizer 内部构建
+    public byte[] getChannelInitAudioSegment(String channelId) {
+        return packetizer.getAacSequenceHeader(channelId);
+    }
+
+    // 提供最近的I帧(若提供者未就绪或未生成序列头则返回空)
+    public byte[] getChannelRecentIFrame(String channelId) {
+        return packetizer.getRecentIFrame(channelId);
+    }
+
+    /*
+     * 重置指定 channelId 的流状态(如断线重连时调用)
+     */
+    public void resetChannel(String channelId) {
+        // 清空 FlvPacketizer 的该通道编解码器信息
+        packetizer.clearChannel(channelId);
+
+        // 同步移除映射
+        if (publishServer != null) {
+            publishServer.removeChannelMapping(channelId);
+        }
+        // 同步清理本地缓存的 streamId
+        channelIdToStreamId.remove(channelId);
+    }
+
+    /*
+     * 关闭指定 channelId 的流(如连接断开时调用)
+     */
+    @Override
+    public void closeChannel(String channelId) {
+        // 同步清理 FlvPacketizer 的该通道信息
+        packetizer.clearChannel(channelId);
+        // 同步移除映射
+        if (publishServer != null) {
+            publishServer.removeChannelMapping(channelId);
+        }
+        // 同步清理本地缓存的 streamId
+        channelIdToStreamId.remove(channelId);
+    }
+
+    // 提取从指定偏移开始的单个NALU负载(不含起始码),返回同时包含本次消耗的字节数
+    private NaluSegment getNaluSingle(byte[] data, int offset) {
+        int len = (data == null) ? 0 : data.length;
+        if (data == null || offset >= len) {
+            return new NaluSegment(new byte[0], 0);
+        }
+
+        // 查找当前或之后的起始码(00 00 00 01)
+        int start = -1;
+        for (int i = offset; i <= len - 4; i++) {
+            if (data[i] == 0x00 && data[i + 1] == 0x00 && data[i + 2] == 0x00 && data[i + 3] == 0x01) {
+                start = i;
+                break;
+            }
+        }
+
+        if (start < 0) {
+            // 未检测到起始码:将剩余数据视为单个NALU负载进行处理
+            int remaining = len - offset;
+            byte[] payload = new byte[remaining];
+            System.arraycopy(data, offset, payload, 0, remaining);
+            return new NaluSegment(payload, remaining);
+        }
+
+        // 负载从起始码之后开始
+        int payloadStart = start + 4;
+
+        // 查找下一个起始码,用于确定本片段的结束位置
+        int nextStart = -1;
+        for (int j = payloadStart; j <= len - 4; j++) {
+            if (data[j] == 0x00 && data[j + 1] == 0x00 && data[j + 2] == 0x00 && data[j + 3] == 0x01) {
+                nextStart = j;
+                break;
+            }
+        }
+        int end = (nextStart >= 0) ? nextStart : len;
+
+        // 复制负载数据(不含起始码)
+
+        // 负载为起始码之后到下一个起始码(或数据末尾)之间的数据
+        int payloadLen = end - payloadStart;
+        if (payloadLen <= 0) {
+            return new NaluSegment(new byte[0], 0);
+        }
+        byte[] payload = new byte[payloadLen];
+
+        System.arraycopy(data, payloadStart, payload, 0, payloadLen);
+
+        // 本次消耗的字节数 = 起始码长度(4) + 负载长度
+        int consumed = 4 + payloadLen;
+        return new NaluSegment(payload, consumed);
+    }
+
+    @Override
+    public void initChannelConn(String channelId, Channel ch) {
+        // 补发FLV头+视频序列头
+        byte[] initVideoSegment = getChannelInitVideoSegment(channelId);
+        if (initVideoSegment != null && initVideoSegment.length > 0) {
+            ch.writeAndFlush(new BinaryWebSocketFrame(Unpooled.wrappedBuffer(initVideoSegment)));
+        }
+
+        // 补发音频序列头
+        byte[] initAudioSegment = getChannelInitAudioSegment(channelId);
+        if (initAudioSegment != null && initAudioSegment.length > 0) {
+            ch.writeAndFlush(new BinaryWebSocketFrame(Unpooled.wrappedBuffer(initAudioSegment)));
+        }
+
+        // 补发最近的I帧
+        byte[] recentIFrame = getChannelRecentIFrame(channelId);
+        if (recentIFrame != null && recentIFrame.length > 0) {
+            ch.writeAndFlush(new BinaryWebSocketFrame(Unpooled.wrappedBuffer(recentIFrame)));
+        }
+
+    }
+
+    @Override
+    public void destroyChannelDisconn(String channelId) {
+        receiveServer.disconnChannel(channelId);
+    }
+}

+ 63 - 0
src/main/java/com/jttserver/relay/StreamRelay.java

@@ -0,0 +1,63 @@
+package com.jttserver.relay;
+
+import com.jttserver.protocol.Jtt1078PacketParams;
+import com.jttserver.service.publisher.PublishServer;
+import com.jttserver.service.receiver.RecvSever;
+
+import io.netty.channel.Channel;
+
+/* 
+ * 流媒体转发基类
+ */
+public abstract class StreamRelay {
+
+    // 接收服务器
+    protected RecvSever receiveServer;
+
+    // 发布服务器引用 websocketserver
+    protected PublishServer publishServer;
+
+    /* 
+     * 构造函数
+     */
+    public StreamRelay() {
+        
+    }
+
+    public void setPublishServer(PublishServer publishServer) {
+        // 设置发布服务器引用
+        this.publishServer = publishServer;
+    }
+
+    public void setReceiveServer(RecvSever receiveServer) {
+        // 设置接收服务器引用
+        this.receiveServer = receiveServer;
+    }
+
+
+    /* 
+     * 推流视频数据
+     */
+    public abstract void publishVideo(String channelId, byte[] nalu, Jtt1078PacketParams params,long timestampMs);
+
+    /* 
+     * 推流音频数据
+     */
+    public abstract void publishAudio(String channelId, byte[] audio, Jtt1078PacketParams params, long timestampMs);
+
+    /* 
+     * 关闭并清理指定通道的资源
+     */
+    public abstract void closeChannel(String channelId);
+
+
+    /* 
+     * 初始化通道连接(订阅时调用一次,用于补发数据等)
+     */
+    public abstract void initChannelConn(String channelId, Channel ch);
+
+    /**
+     * 结束通道连接(断开时调用一次,清理资源)
+     */
+    public abstract void destroyChannelDisconn(String channelId);
+}

+ 111 - 0
src/main/java/com/jttserver/relay/workerthreads/BroadcastWorker.java

@@ -0,0 +1,111 @@
+package com.jttserver.relay.workerthreads;
+
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.jttserver.config.ConfigManager;
+import com.jttserver.service.publisher.PublishServer;
+
+/* 
+ * 全局广播工作线程类   
+ */
+public class BroadcastWorker {
+
+    private static final Logger logger = LoggerFactory.getLogger(BroadcastWorker.class);
+
+    // 为每个连接维护消费者线程池
+    private static final ConcurrentHashMap<String, ThreadPoolExecutor> broadcastExecutorMap = new ConcurrentHashMap<>();    
+
+    /**
+     * 为指定通道创建单消费者执行器
+     * 
+     * @param channelId 通道ID
+     * @return 单线程线程池
+     */
+    public static ThreadPoolExecutor createSingleConsumerExecutor(String channelId) {
+        int capacity = Integer.parseInt(ConfigManager.get("broadcast.queue.capacity", "1000"));
+        ThreadFactory tf = r -> {
+            Thread t = new Thread(r);
+            t.setName("broadcast-" + channelId);
+            t.setDaemon(true);
+            return t;
+        };
+        // 创建单消费者线程池
+        return new ThreadPoolExecutor(
+                1,
+                1,
+                0L,
+                TimeUnit.MILLISECONDS,
+                new ArrayBlockingQueue<>(capacity),
+                tf,
+                new ThreadPoolExecutor.DiscardPolicy());
+    }
+
+    /**
+     * 广播数据到指定通道
+     * 
+     * @param streamId 通道ID
+     * @param data     数据
+     */
+    public static void broadcast(PublishServer publishServer, String channelId, String streamId, byte[] data) {
+        
+        // 创建广播任务
+        Runnable broadcastTask = () -> {
+            // 处理广播逻辑
+            publishServer.broadcast(streamId, data);
+        };
+
+        // 获取该channelId对应的单线程执行器
+        ThreadPoolExecutor exec = broadcastExecutorMap.get(channelId);
+        if (exec != null) {
+            try {
+                exec.execute(broadcastTask);
+            } catch (RejectedExecutionException e) {
+                // 以后优化丢弃最前面的数据
+                logger.warn("广播队列已满,丢弃数据。channelId: {}, 队列容量: {}", channelId, exec.getQueue().size());
+            }
+        } else {
+            logger.warn("未找到通道 {} 的广播线程池", channelId);
+            // 直接在当前线程执行,保证功能可用
+            broadcastTask.run();
+        }
+    }
+
+    /**
+     * 为指定通道初始化发布线程池
+     * @param channelId 通道ID
+     */
+    public static void initBroadcastExecutor(String channelId) {
+        broadcastExecutorMap.putIfAbsent(channelId, createSingleConsumerExecutor(channelId));
+    }
+
+    /**
+     * 关闭并移除指定通道的广播线程池
+     * 
+     * @param channelId 通道ID
+     */
+    public static void shutdownExecutor(String channelId) {
+        ThreadPoolExecutor exec = broadcastExecutorMap.remove(channelId);
+        if (exec != null) {
+            exec.shutdownNow();
+        }
+    }
+
+    /**
+      * 获取指定通道的广播线程池
+      * 
+      * @param channelId 通道ID
+      * @return 线程池
+      */
+     public static ThreadPoolExecutor getExecutor(String channelId) {
+         return broadcastExecutorMap.get(channelId);
+     }
+
+}

+ 147 - 0
src/main/java/com/jttserver/relay/workerthreads/VideoPublishWorker.java

@@ -0,0 +1,147 @@
+package com.jttserver.relay.workerthreads;
+
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Stream;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.jttserver.config.ConfigManager;
+import com.jttserver.protocol.Jtt1078NaluPacket;
+import com.jttserver.protocol.Jtt1078PacketParams;
+import com.jttserver.protocol.JttConstants;
+import com.jttserver.relay.StreamRelay;
+
+/**
+ * 视频发布工作线程管理器
+ * 负责为每个连接维护单消费者线程池,处理异步视频推流任务
+ */
+public class VideoPublishWorker {
+    private static final Logger logger = LoggerFactory.getLogger(VideoPublishWorker.class);
+
+    // 为每个连接维护单消费者线程池,确保每个channelId只有一个线程池在执行任务
+    private static final ConcurrentHashMap<String, ThreadPoolExecutor> publishExecutorMap = new ConcurrentHashMap<>();
+
+    /**
+     * 为指定通道创建单消费者执行器
+     * 
+     * @param channelId 通道ID
+     * @return 单线程线程池
+     */
+    public static ThreadPoolExecutor createSingleConsumerExecutor(String channelId) {
+        int capacity = Integer.parseInt(ConfigManager.get("publisher.queue.capacity", "1000"));
+        ThreadFactory tf = r -> {
+            Thread t = new Thread(r);
+            t.setName("publisher-" + channelId);
+            t.setDaemon(true);
+            return t;
+        };
+        // 创建单消费者线程池
+        return new ThreadPoolExecutor(
+                1,
+                1,
+                0L,
+                TimeUnit.MILLISECONDS,
+                new ArrayBlockingQueue<>(capacity),
+                tf,
+                new ThreadPoolExecutor.DiscardPolicy());
+    }
+
+    /**
+     * 为指定通道初始化发布线程池
+     * 
+     * @param channelId 通道ID
+     */
+    public static void initializeExecutor(String channelId) {
+        publishExecutorMap.put(channelId, createSingleConsumerExecutor(channelId));
+    }
+
+
+    /**
+     * 异步发布视频或音频数据 - 确保串行消费
+     * 使用单线程执行器,保证对于同一个channelId,任务是按顺序串行执行的
+     * 
+     * @param channelId 通道ID
+     * @param completeNaluData 完整的NALU数据(已复制,安全引用)
+     * @param latestParams 最新的参数信息(关键字段已克隆,安全引用)
+     * @param tsMs 时间戳
+     * @param streamRelay 流转发器(常用flv)
+     */
+    public static void publishAsync(String channelId, byte[] completeNaluData, 
+                                   Jtt1078PacketParams latestParams,
+                                   long tsMs, StreamRelay streamRelay) {
+        
+        // 创建推流任务
+        Runnable publishTask = () -> {
+            try {
+                if (latestParams.dataType <= JttConstants.TYPE_VIDEO_B_FRAME) {
+                    streamRelay.publishVideo(channelId, completeNaluData, latestParams, tsMs);
+                } else if (latestParams.dataType == JttConstants.TYPE_AUDIO) {
+                    streamRelay.publishAudio(channelId, completeNaluData, latestParams, tsMs);
+                }
+            } catch (Throwable t) {
+                logger.error("推流任务执行失败: {}", t.getMessage(), t);
+            }
+        };
+
+        // 获取该channelId对应的单线程执行器
+        ThreadPoolExecutor executor = publishExecutorMap.get(channelId);
+        if (executor != null) {
+            try {
+                // 将任务提交到队列,由单线程按顺序执行
+                executor.execute(publishTask);
+            } catch (RejectedExecutionException e) {
+                // 队列已满,丢弃数据(以后补充如果丢弃,则丢弃P帧)
+                logger.warn("推流队列已满,丢弃数据。channelId: {}, 队列容量: {}", channelId, executor.getQueue().size());
+            }
+        } else {
+            logger.warn("未找到通道 {} 的发布线程池,回退为直接推流", channelId);
+            // 直接在当前线程执行,保证功能可用
+            publishTask.run();
+        }
+    }
+
+    /**
+     * 关闭并移除指定通道的发布线程池
+     * 
+     * @param channelId 通道ID
+     */
+    public static void shutdownExecutor(String channelId) {
+        ThreadPoolExecutor exec = publishExecutorMap.remove(channelId);
+        if (exec != null) {
+            exec.shutdownNow();
+        }
+    }
+
+    /**
+     * 获取指定通道的线程池
+     * 
+     * @param channelId 通道ID
+     * @return 线程池
+     */
+    public static ThreadPoolExecutor getExecutor(String channelId) {
+        return publishExecutorMap.get(channelId);
+    }
+
+    /**
+     * 检查线程池状态和队列情况
+     * 用于监控和调试
+     * 
+     * @param channelId 通道ID
+     * @return 线程池状态信息
+     */
+    public static String getExecutorStatus(String channelId) {
+        ThreadPoolExecutor executor = publishExecutorMap.get(channelId);
+        if (executor == null) {
+            return "线程池不存在: " + channelId;
+        }
+        return String.format("channelId: %s, 活跃线程: %d, 队列任务数: %d, 完成任务数: %d",
+                channelId, executor.getActiveCount(), executor.getQueue().size(), executor.getCompletedTaskCount());
+    }
+
+}

+ 37 - 0
src/main/java/com/jttserver/service/publisher/PublishServer.java

@@ -0,0 +1,37 @@
+package com.jttserver.service.publisher;
+
+import com.jttserver.relay.StreamRelay;
+
+/* 
+ * 发布服务器基类
+ */
+public abstract class PublishServer {
+
+    /**
+     * 根据原始 channelId 移除映射(视频连接断开时调用)
+     */
+    public abstract void removeChannelMapping(String channelId);
+
+    /**
+     * 维护 streamId(sim卡号+逻辑通道号) 与 随机 channelId 的对应关系
+     */
+    public abstract void mapStreamToChannel(String streamId, String channelId);
+
+    /**
+     * 广播数据到指定 streamId 的所有订阅者
+     * 
+     * @param streamId 流ID
+     * @param data     数据
+     */
+    public abstract void broadcast(String streamId, byte[] data);
+
+    /* 
+     * 添加流转发器映射
+     */
+    public abstract void addStreamRelay(String streamId, StreamRelay relay);
+
+    /* 
+     * 移除流转发器映射
+     */
+    public abstract void removeStreamRelay(String streamId);
+}

+ 289 - 0
src/main/java/com/jttserver/service/publisher/WebsockServer.java

@@ -0,0 +1,289 @@
+package com.jttserver.service.publisher;
+
+import com.jttserver.relay.StreamRelay;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.netty.bootstrap.ServerBootstrap;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelId;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.ChannelOption;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.group.ChannelGroup;
+import io.netty.channel.group.DefaultChannelGroup;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.channel.socket.SocketChannel;
+import io.netty.channel.socket.nio.NioServerSocketChannel;
+import io.netty.handler.codec.http.HttpServerCodec;
+import io.netty.handler.codec.http.HttpObjectAggregator;
+import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler;
+import io.netty.handler.codec.http.websocketx.BinaryWebSocketFrame;
+import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolConfig;
+
+public class WebsockServer extends PublishServer {
+
+    private static final Logger logger = LoggerFactory.getLogger(WebsockServer.class);
+
+    private final int port;
+
+    private EventLoopGroup bossGroup;
+    private EventLoopGroup workerGroup;
+    private Channel serverChannel;
+
+    // 管理 streamId 与对应的 StreamRelay 实例
+    private Map<String, StreamRelay> streamRelays = new ConcurrentHashMap<>();
+    public Map<String, StreamRelay> getStreamRelays() {
+        return streamRelays;
+    }
+    public StreamRelay getStreamRelay(String streamId) {
+        return streamRelays.get(streamId);
+    }
+
+
+    // 维护 streamId(sim+logic) 与 原始视频 channelId 的映射(注意是jtt1078设备的ChannelId)
+    private final Map<String, String> streamIdToChannelId = new ConcurrentHashMap<>();
+    private final Map<String, String> channelIdToStreamId = new ConcurrentHashMap<>();
+
+    // 订阅数据结构
+    // 按照streamId 为 key,ChannelGroup(包含多个channel) 为 value
+    private final Map<String, ChannelGroup> streamGroups = new ConcurrentHashMap<>();
+    // 按照channelId 为 key,streamId 为 value
+    private final Map<ChannelId, String> channelStreamMap = new ConcurrentHashMap<>();
+
+    // 按照channel为key,streamRelay为value,方便断开时清理
+    private final Map<ChannelId, StreamRelay> channelRelayMap = new ConcurrentHashMap<>();
+
+    public WebsockServer(int port) {
+        this.port = port;
+    }
+
+    /**
+     * 启动 WebSocket 服务器
+     */
+    public void start() throws InterruptedException {
+        bossGroup = new NioEventLoopGroup();
+        workerGroup = new NioEventLoopGroup();
+
+        ServerBootstrap bootstrap = new ServerBootstrap();
+
+        bootstrap.group(bossGroup, workerGroup)
+                .channel(NioServerSocketChannel.class)
+                .localAddress(new InetSocketAddress(port))
+                .childHandler(new ChannelInitializer<SocketChannel>() {
+
+                    @Override
+                    protected void initChannel(SocketChannel ch) throws Exception {
+                        ch.pipeline().addLast(new HttpServerCodec());
+                        ch.pipeline().addLast(new HttpObjectAggregator(65536));
+                        // 使用配置开启前缀匹配,允许 /realtime/{streamId}
+                        ch.pipeline().addLast(new WebSocketServerProtocolHandler(
+                                WebSocketServerProtocolConfig.newBuilder()
+                                        .websocketPath("/realtime")
+                                        .subprotocols(null)
+                                        .allowExtensions(true)
+                                        .checkStartsWith(true)
+                                        .build()));
+                        // 连接/断开日志与订阅管理处理器,注入当前服务器引用
+                        ch.pipeline().addLast(new WebSocketHandler(WebsockServer.this));
+                    }
+
+                })
+                .option(ChannelOption.SO_BACKLOG, 128)
+                .childOption(ChannelOption.SO_KEEPALIVE, true);
+
+        ChannelFuture future = bootstrap.bind().sync();
+        serverChannel = future.channel();
+        logger.info("FlvWebsock 推流服务器启动,监听端口: {}", port);
+    }
+
+    /**
+     * 增加流中转器
+     */
+    @Override
+    public void addStreamRelay(String streamId, StreamRelay relay) {
+        streamRelays.put(streamId, relay);
+    }
+
+    /**
+     * 移除流中转器
+     */
+    @Override
+    public void removeStreamRelay(String streamId) {
+        streamRelays.remove(streamId);
+    }
+
+    @Override
+    public void broadcast(String streamId, byte[] data) {
+
+    }
+
+    /**
+     * 维护 streamId(sim卡号+逻辑通道号) 与 随机 channelId 的对应关系
+     */
+    @Override
+    public void mapStreamToChannel(String streamId, String channelId) {
+        if (streamId == null || streamId.isEmpty() || channelId == null || channelId.isEmpty()) {
+            return;
+        }
+        streamIdToChannelId.put(streamId, channelId);
+        channelIdToStreamId.put(channelId, streamId);
+    }
+
+    /**
+     * 根据原始 channelId 移除映射(视频连接断开时调用)
+     */
+    @Override
+    public void removeChannelMapping(String channelId) {
+        if (channelId == null || channelId.isEmpty()) {
+            return;
+        }
+        String sid = channelIdToStreamId.remove(channelId);
+        if (sid != null) {
+            streamIdToChannelId.remove(sid);
+        }
+    }
+
+    /**
+     * 获取channelId对应的streamId方法
+     */
+    public String getStreamIdByChannelId(String channelId) {
+        return channelIdToStreamId.get(channelId);
+    }
+
+    /**
+     * 获取streamId对应的channelId方法
+     */
+    public String getChannelIdByStreamId(String streamId) {
+        return streamIdToChannelId.get(streamId);
+    }
+
+    /**
+     * 简单的日志处理器:打印连接、握手和断开事件,同时维护订阅关系
+     */
+    @ChannelHandler.Sharable
+    public static class WebSocketHandler extends ChannelInboundHandlerAdapter {
+        private static final Logger logger = LoggerFactory.getLogger(WebSocketHandler.class);
+
+        private final WebsockServer server;
+
+        public WebSocketHandler(WebsockServer server) {
+            this.server = server;
+        }
+
+        @Override
+        public void channelActive(ChannelHandlerContext ctx) throws Exception {
+            logger.info("WebSocket 客户端连接: {}", ctx.channel().remoteAddress());
+            super.channelActive(ctx);
+        }
+
+        @Override
+        public void channelInactive(ChannelHandlerContext ctx) throws Exception {
+            logger.info("WebSocket 客户端断开: {}", ctx.channel().remoteAddress());
+            server.unregisterChannel(ctx.channel());
+            super.channelInactive(ctx);
+        }
+
+        @Override
+        public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
+            if (evt instanceof WebSocketServerProtocolHandler.HandshakeComplete) {
+                WebSocketServerProtocolHandler.HandshakeComplete hc = (WebSocketServerProtocolHandler.HandshakeComplete) evt;
+                String uri = hc.requestUri();
+                String path = uri.split("\\?")[0];
+                // 获取前缀列表 例如 /realtime/、 /playback/
+
+                String streamId = "";
+                String currPrefix = "";
+                // 流媒体转发类
+                StreamRelay relay = null;
+
+                for (String prefix : server.getStreamRelays().keySet()) {
+                    if (path.startsWith(prefix) && path.length() > prefix.length()) {
+                        streamId = path.substring(prefix.length());
+                        currPrefix = prefix;
+                        relay = server.getStreamRelay(currPrefix);
+
+                        // 移除可能的.flv后缀
+                        if (streamId.endsWith(".flv")) {
+                            streamId = streamId.substring(0, streamId.length() - 4);
+                        }
+                        break;
+                    }
+                }
+
+                logger.info("FLVWebSocket 握手完成 - 请求路径: {}, 流ID: {}", uri, streamId);
+                if (!streamId.isEmpty() && relay != null) {
+                    // 在注册订阅前,初始化发送该类型需要的数据
+                    String channelId = server.getChannelIdByStreamId(streamId);
+
+                    relay.initChannelConn(channelId, ctx.channel());
+                    server.registerChannel(streamId, ctx.channel(), relay);
+                }
+            }
+            super.userEventTriggered(ctx, evt);
+        }
+
+    }
+
+    // 注册订阅(握手完成后调用)
+    void registerChannel(String streamId, Channel ch, StreamRelay relay) {
+        if (streamId == null || streamId.isEmpty() || ch == null) {
+            return;
+        }
+        // 将channel加入对应的ChannelGroup
+        ChannelGroup group = streamGroups.computeIfAbsent(streamId,
+                k -> new DefaultChannelGroup(workerGroup.next()));
+        group.add(ch);
+        channelStreamMap.put(ch.id(), streamId);
+
+        // 将流转发器与channel关联
+        channelRelayMap.put(ch.id(), relay);
+    }
+
+    // 取消订阅(断开或异常时调用)
+    void unregisterChannel(Channel ch) {
+        if (ch == null) {
+            return;
+        }
+        String sid = channelStreamMap.remove(ch.id());
+        if (sid != null) {
+            ChannelGroup g = streamGroups.get(sid);
+            if (g != null) {
+                g.remove(ch);
+                if (g.isEmpty()) {
+                    streamGroups.remove(sid);
+                    // 移除流与通道映射
+                    String channelId = getChannelIdByStreamId(sid);
+                    if (channelId != null) {
+                        logger.info("流 {} 已无订阅者,channel已移除{}", sid, channelId);
+                        
+                        // destroy
+                        StreamRelay relay = channelRelayMap.remove(ch.id());
+                        if (relay != null) {
+                            relay.destroyChannelDisconn(channelId);
+                        }
+
+                        // 移除通道与流映射
+                        removeChannelMapping(channelId);
+                        
+                    }
+                }
+            }
+        }
+    }
+
+    
+
+}

+ 373 - 0
src/main/java/com/jttserver/service/receiver/JttVideoRecvServer.java

@@ -0,0 +1,373 @@
+package com.jttserver.service.receiver;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.jttserver.codec.Jtt1078MessageDecoder;
+import com.jttserver.config.ConfigManager;
+import com.jttserver.device.DeviceManager;
+import com.jttserver.protocol.Jtt1078NaluPacket;
+import com.jttserver.protocol.Jtt1078PacketParams;
+import com.jttserver.protocol.Jtt1078PacketParser;
+import com.jttserver.relay.StreamRelay;
+import com.jttserver.relay.workerthreads.BroadcastWorker;
+import com.jttserver.relay.workerthreads.VideoPublishWorker;
+import com.jttserver.service.publisher.PublishServer;
+import com.jttserver.utils.CommonUtils;
+
+import io.netty.bootstrap.ServerBootstrap;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.ChannelOption;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.channel.socket.SocketChannel;
+import io.netty.channel.socket.nio.NioServerSocketChannel;
+
+/* 
+ * 通用接收视频服务器
+ */
+public class JttVideoRecvServer extends RecvSever {
+    private static final Logger logger = LoggerFactory.getLogger(JttVideoRecvServer.class);
+
+    // 接收服务器监听端口,-1表示未配置
+    private final int port = -1;
+
+    private EventLoopGroup bossGroup;
+    private EventLoopGroup workerGroup;
+    private Channel serverChannel;
+
+    // 设备管理功能开关状态(初始化时获取,避免频繁调用ConfigManager)
+    private final boolean deviceManagementEnabled = ConfigManager.isDeviceManagementEnabled();
+
+    // 存储每个连接的Channel
+    private static final Map<String, Channel> channelIdToCtxMap = new ConcurrentHashMap<>();
+
+    // 流转发器
+    private StreamRelay streamRelay;
+
+    // 流发布服务器
+    private PublishServer publishServer;
+
+    JttVideoRecvServer(StreamRelay streamRelay, PublishServer publishServer) {
+        this.streamRelay = streamRelay;
+        this.publishServer = publishServer;
+    }
+
+    /**
+     * 启动服务器
+     */
+    public void start() throws InterruptedException {
+        // 用于接受新的连接
+        bossGroup = new NioEventLoopGroup(1);
+        // 用于处理已经被接收的连接
+        workerGroup = new NioEventLoopGroup();
+
+        ServerBootstrap bootstrap = new ServerBootstrap();
+        bootstrap.group(bossGroup, workerGroup)
+                .channel(NioServerSocketChannel.class)
+                .localAddress(new InetSocketAddress(port))
+                .childHandler(new ChannelInitializer<SocketChannel>() {
+                    @Override
+                    protected void initChannel(SocketChannel ch) throws Exception {
+                        ch.pipeline().addLast(new Jtt1078MessageDecoder()); // 添加JTT1078解码器处理粘包拆包
+
+                        // 添加处理视频流数据的处理器
+                        ch.pipeline()
+                                .addLast(new VideoStreamHandler(deviceManagementEnabled, streamRelay, publishServer));
+                    }
+                })
+                .option(ChannelOption.SO_BACKLOG, 128)
+                .childOption(ChannelOption.SO_KEEPALIVE, true);
+        // 绑定端口并启动服务器
+        ChannelFuture future = bootstrap.bind().sync();
+        serverChannel = future.channel();
+
+        logger.info("Netty视频流服务器启动,监听端口: {}", port);
+
+    }
+
+    /**
+     * 等待服务器关闭
+     */
+    public void waitForShutdown() throws InterruptedException {
+        if (serverChannel != null) {
+            // 等待服务器通道关闭
+            serverChannel.closeFuture().sync();
+        }
+    }
+
+    /**
+     * 停止服务器
+     */
+    public void stop() {
+        if (serverChannel != null) {
+            serverChannel.close();
+        }
+
+        if (bossGroup != null) {
+            bossGroup.shutdownGracefully();
+        }
+
+        if (workerGroup != null) {
+            workerGroup.shutdownGracefully();
+        }
+
+        logger.info("Netty视频流服务器已停止");
+    }
+
+    /**
+     * 主动断开通道
+     * 
+     * @param channelId 通道ID
+     */
+    public void disconnChannel(String channelId) {
+        // 获取 Channel
+        Channel channel = channelIdToCtxMap.get(channelId);
+        if (channel != null) {
+            channel.eventLoop().execute(() -> {
+                VideoStreamHandler handler = channel.pipeline().get(VideoStreamHandler.class);
+                if(handler != null) {
+                    handler.deviceDisConnectChannel(channelId);
+                }
+            });
+        }
+    }
+
+    /**
+     * 视频流数据处理器
+     */
+    @ChannelHandler.Sharable
+    public static class VideoStreamHandler extends ChannelInboundHandlerAdapter {
+
+        private static final Logger logger = LoggerFactory.getLogger(VideoStreamHandler.class);
+
+        // 存储每个连接的NALU包组装器
+        private final Map<String, Jtt1078NaluPacket> naluPacketAssemblerMap = new ConcurrentHashMap<>();
+
+        // 设备管理功能开关状态
+        private final boolean deviceManagementEnabled;
+
+        // flv发布器引用
+        private final StreamRelay streamRelay;
+        // WebSocket服务器引用
+        private final PublishServer publishServer;
+
+        public VideoStreamHandler(boolean deviceManagementEnabled, StreamRelay streamRelay,
+                PublishServer publishServer) {
+            this.deviceManagementEnabled = deviceManagementEnabled;
+            this.streamRelay = streamRelay;
+            this.publishServer = publishServer;
+        }
+
+        @Override
+        public void channelActive(ChannelHandlerContext ctx) throws Exception {
+            String channelId = ctx.channel().id().asShortText();
+            // 为每个连接创建独立的NALU包组装器
+            naluPacketAssemblerMap.put(channelId, new Jtt1078NaluPacket());
+            // 为每个连接创建单消费者线程池用于异步推流
+            VideoPublishWorker.initializeExecutor(channelId);
+            // 为每个连接创建广播线程池
+            BroadcastWorker.initBroadcastExecutor(channelId);
+
+            // 添加设备信息(受功能开关控制)
+            if (deviceManagementEnabled) {
+                DeviceManager.DeviceInfo deviceInfo = new DeviceManager.DeviceInfo(channelId,
+                        ctx.channel().remoteAddress().toString());
+                DeviceManager.registerDevice(channelId, deviceInfo);
+            }
+
+            // 存储Channel
+            channelIdToCtxMap.put(channelId, ctx.channel());
+
+            logger.info("新客户端连接: {} ,ChannelId: {}", ctx.channel().remoteAddress(), channelId);
+            super.channelActive(ctx);
+        }
+
+        @Override
+        public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
+
+            ByteBuf buf = null;
+
+            if (msg instanceof ByteBuf) {
+                buf = (ByteBuf) msg;
+            } else if (msg instanceof byte[]) {
+                // 如果消息是byte[]类型(来自Jtt1078MessageDecoder),则转换为ByteBuf
+                buf = Unpooled.wrappedBuffer((byte[]) msg);
+            } else {
+                logger.warn("收到未知类型数据: {}", msg.getClass().getName());
+                super.channelRead(ctx, msg);
+                return;
+            }
+
+            // 打印收到的数据信息
+            // int length = buf.readableBytes();
+            // System.out.println("收到来自 " + ctx.channel().remoteAddress() + " 的数据,长度: " +
+            // length + " 字节");
+
+            // 更新设备活动时间(受功能开关控制)
+            String channelId = ctx.channel().id().asShortText();
+            if (deviceManagementEnabled) {
+                DeviceManager.updateDeviceActiveTime(channelId);
+            }
+            // 解析JTT1078协议数据包
+            try {
+                Jtt1078PacketParser.Jtt1078Packet packet = Jtt1078PacketParser.parse(buf);
+                // System.out.println("解析到JTT1078数据包: " + packet.toString());
+
+                // 更新设备信息(受功能开关控制)
+                if (deviceManagementEnabled) {
+                    updateDeviceInfo(packet, channelId);
+                }
+
+                // 处理NALU包拼接逻辑,处理数据
+                handleNaluPacketAssembly(packet, ctx, channelId);
+            } catch (Exception e) {
+                logger.error("解析JTT1078数据包时出错", e);
+            }
+
+            // 释放ByteBuf资源(如果是从byte[]创建的Unpooled.wrappedBuffer,则不需要显式释放)
+            if (msg instanceof ByteBuf) {
+                buf.release();
+            }
+
+            // 释放ByteBuf资源
+            super.channelRead(ctx, msg);
+        }
+
+        /**
+         * 更新设备信息(SIM卡号、逻辑通道号)
+         */
+        private void updateDeviceInfo(Jtt1078PacketParser.Jtt1078Packet packet, String channelId) {
+            // 将SIM卡号转换为标准字符串
+            String simStr = packet.simCardNumberStr;
+            DeviceManager.updateDeviceSimCardNumber(channelId, simStr);
+            DeviceManager.updateDeviceLogicChannelNumber(channelId, packet.logicChannelNumber);
+        }
+
+        /**
+         * 处理NALU包拼接逻辑
+         * 
+         * @param packet 解析后的JTT1078数据包
+         * @param ctx    ChannelHandlerContext
+         */
+        private void handleNaluPacketAssembly(Jtt1078PacketParser.Jtt1078Packet packet, ChannelHandlerContext ctx,
+                String channelId) {
+
+            Jtt1078NaluPacket assembler = naluPacketAssemblerMap.get(channelId);
+            if (assembler == null) {
+                logger.warn("未找到通道 {} 的NALU包组装器", channelId);
+                return;
+            }
+            // 将数据包添加到NALU组装器中
+            boolean isNaluComplete = assembler.addPacket(packet);
+
+            // 如果NALU单元已完成
+            if (isNaluComplete) {
+                // 获取完整的NALU数据
+                byte[] completeNaluData = assembler.getCompleteNaluData();
+
+                // 获取最新的参数信息
+                Jtt1078PacketParams latestParams = assembler.getLatestParams();
+
+                // 计算时间戳
+                long tsMs = CommonUtils.toTimestampMillis(latestParams != null ? latestParams.timestamp : null);
+                // // 视频数据类型0x00-0x02 推流
+                // if (latestParams.dataType <= JttConstants.TYPE_VIDEO_B_FRAME) {
+                // flvPublisher.publishVideo(channelId, completeNaluData, latestParams, tsMs);
+                // }
+                // // 音频数据类型0x03 推流
+                // else if (latestParams.dataType == JttConstants.TYPE_AUDIO) {
+                // flvPublisher.publishAudio(channelId, completeNaluData, latestParams, tsMs);
+                // }
+
+                // 异步推送到视频发布服务(单线程串行消费)
+                VideoPublishWorker.publishAsync(channelId, completeNaluData, latestParams, tsMs, streamRelay);
+
+                // 清空当前NALU单元,准备下一个
+                assembler.clear();
+            }
+        }
+
+        /**
+         * 主动断开通道
+         * 
+         * @param channelId 通道ID
+         */
+        public void deviceDisConnectChannel(String channelId) {
+            // 获取 Channel
+            Channel channel = channelIdToCtxMap.get(channelId);
+            if (channel != null && channel.isActive()) {
+                // 主动关闭通道
+                logger.info("主动关闭通道: {}", channelId);
+                channel.close();
+            }
+        }
+
+        /**
+         * 清除通道上下文
+         * 
+         * @param ctx ChannelHandlerContext
+         */
+        public void clearChannelContext(ChannelHandlerContext ctx, Boolean isInactive) throws Exception {
+            String channelId = ctx.channel().id().asShortText();
+            // 清理资源
+            naluPacketAssemblerMap.remove(channelId);
+            if (isInactive) {
+                logger.info("客户端断开连接: {} ,ChannelId: {}", ctx.channel().remoteAddress(), channelId);
+            }
+            // 移除设备信息(受功能开关控制)
+            if (deviceManagementEnabled) {
+                DeviceManager.unregisterDevice(channelId);
+            }
+            // 同步清理发布器缓冲
+            streamRelay.closeChannel(channelId);
+            // 关闭并移除发布线程池
+            VideoPublishWorker.shutdownExecutor(channelId);
+            // 关闭并移除广播线程池
+            BroadcastWorker.shutdownExecutor(channelId);
+            // 移除ChannelHandlerContext
+            channelIdToCtxMap.remove(channelId);
+        }
+
+        @Override
+        public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
+            clearChannelContext(ctx, false);
+            super.handlerRemoved(ctx);
+        }
+
+        @Override
+        public void channelInactive(ChannelHandlerContext ctx) throws Exception {
+            clearChannelContext(ctx, true);
+            super.channelInactive(ctx);
+        }
+
+        @Override
+        public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
+            // 检查是否为连接中断异常
+            if (cause instanceof IOException &&
+                    (cause.getMessage().contains("An established connection was aborted") ||
+                            cause.getMessage().contains("软件中止了一个已建立的连接") ||
+                            cause.getMessage().contains("Connection reset"))) {
+                logger.warn("客户端主动断开连接: {}", ctx.channel().remoteAddress());
+            } else {
+                logger.error("处理客户端数据时出错", cause);
+            }
+
+            // 关闭连接
+            ctx.close();
+        }
+    }
+
+}

+ 14 - 0
src/main/java/com/jttserver/service/receiver/RecvSever.java

@@ -0,0 +1,14 @@
+package com.jttserver.service.receiver;
+
+/* 
+ * 接收服务器基类
+ */
+public abstract class RecvSever {
+
+    /**
+     * 主动断开通道
+     * 
+     * @param channelId 通道ID
+     */
+    public abstract void disconnChannel(String channelId);
+}

+ 130 - 0
src/main/java/com/jttserver/utils/CommonUtils.java

@@ -0,0 +1,130 @@
+package com.jttserver.utils;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * 通用工具方法(字节与编解码相关的公共逻辑)。
+ */
+public final class CommonUtils {
+    private CommonUtils() {
+    }
+
+    // 写无符号24位整数(大端)
+    public static void writeUi24(ByteArrayOutputStream out, int value) throws IOException {
+        out.write((value >> 16) & 0xFF);
+        out.write((value >> 8) & 0xFF);
+        out.write(value & 0xFF);
+    }
+
+    // 写有符号24位整数(大端),用于CompositionTime
+    public static void writeSi24(ByteArrayOutputStream out, int value) throws IOException {
+        out.write((value >> 16) & 0xFF);
+        out.write((value >> 8) & 0xFF);
+        out.write(value & 0xFF);
+    }
+
+    // 写FLV时间戳(Timestamp 24位 + TimestampExtended 8位)
+    public static void writeTimestamp(ByteArrayOutputStream out, int timestamp) throws IOException {
+        // 低24位在前,扩展高位在后(与现有实现一致)
+        byte[] ts = ByteBuffer.allocate(4).putInt(timestamp).array();
+        out.write(ts, 1, 3);
+        out.write(ts[0]);
+    }
+
+    // 将8字节时间戳转换为毫秒(按大端)
+    public static long toTimestampMillis(byte[] ts) {
+        if (ts == null || ts.length != 8) {
+            return 0L;
+        }
+        long v = 0L;
+        for (int i = 0; i < 8; i++) {
+            v = (v << 8) | (ts[i] & 0xFF);
+        }
+        return v;
+    }
+
+    // 将Annex-B或单一NALU转换为长度前缀(4字节)的payload,支持多个NALU拼接
+    public static byte[] toLengthPrefixedPayloadFromAnnexB(byte[] input) throws IOException {
+        if (input == null || input.length == 0)
+            return new byte[0];
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+
+        int pos = 0;
+        int nalStart = -1;
+
+        while (pos < input.length) {
+            int scPos = -1;
+            int scLen = 0;
+            // 查找下一个起始码 0x000001 或 0x00000001
+            for (int i = pos; i <= input.length - 3; i++) {
+                // 0x000001
+                if (input[i] == 0x00 && input[i + 1] == 0x00 && input[i + 2] == 0x01) {
+                    scPos = i;
+                    scLen = 3;
+                    break;
+                }
+                // 0x00000001
+                if (i <= input.length - 4 && input[i] == 0x00 && input[i + 1] == 0x00 && input[i + 2] == 0x00
+                        && input[i + 3] == 0x01) {
+                    scPos = i;
+                    scLen = 4;
+                    break;
+                }
+            }
+
+            if (scPos == -1) {
+                // 没有更多起始码
+                if (nalStart == -1) {
+                    // 整个输入视为一个NALU
+                    int len = input.length - pos;
+                    if (len <= 0)
+                        break;
+                    out.write(ByteBuffer.allocate(4).putInt(len).array());
+                    out.write(input, pos, len);
+                } else {
+                    // 末尾NALU从nalStart到末尾
+                    int len = input.length - nalStart;
+                    if (len > 0) {
+                        out.write(ByteBuffer.allocate(4).putInt(len).array());
+                        out.write(input, nalStart, len);
+                    }
+                }
+                break;
+            } else {
+                if (nalStart == -1) {
+                    // 记录第一个NALU的开始位置(起始码之后)
+                    nalStart = scPos + scLen;
+                    pos = nalStart;
+                } else {
+                    // 写出上一个NALU(从nalStart到当前起始码之前)
+                    int len = scPos - nalStart;
+                    if (len > 0) {
+                        out.write(ByteBuffer.allocate(4).putInt(len).array());
+                        out.write(input, nalStart, len);
+                    }
+                    // 更新下一个NALU开始位置
+                    nalStart = scPos + scLen;
+                    pos = nalStart;
+                }
+                // 继续循环查找下一个起始码
+            }
+        }
+
+        // 如果没有检测到任何起始码并且out还是空,确保至少写一个长度前缀的片段
+        if (out.size() == 0) {
+            out.write(ByteBuffer.allocate(4).putInt(input.length).array());
+            out.write(input);
+        }
+        return out.toByteArray();
+    }
+
+    public static String bytesToHex(byte[] bytes) {
+         StringBuilder sb = new StringBuilder();
+         for (byte b : bytes) {
+             sb.append(String.format("%02X ", b));
+         }
+         return sb.toString().trim();
+     }
+}

+ 56 - 0
src/main/java/com/jttserver/utils/SimCardUtils.java

@@ -0,0 +1,56 @@
+package com.jttserver.utils;
+
+/**
+ * SIM卡号工具类
+ * 提供将SIM卡号字节数组转换为统一字符串格式的功能。
+ * 对于空或null输入,返回空字符串。
+ *
+ * 示例:
+ *   输入:{ 0x01, 0x34, 0x02, 0x48, 0x03, (byte)0x91 }(BCD)
+ *   解析:"01340248039"(去除前导零后:"1340248039")
+ */
+public final class SimCardUtils {
+
+    private SimCardUtils() {}
+
+    /**
+     * 将SIM卡号字节数组转换为统一的十进制字符串格式。
+     *
+     * @param simBytes SIM卡号字节数组(通常为BCD编码)
+     * @return 标准化后的SIM卡号字符串;null或空输入返回空字符串;全零返回"0"
+     */
+    public static String toStandardString(byte[] simBytes) {
+        if (simBytes == null || simBytes.length == 0) {
+            return "";
+        }
+        StringBuilder digits = new StringBuilder(simBytes.length * 2);
+        for (byte b : simBytes) {
+            int high = (b >> 4) & 0x0F;
+            int low = b & 0x0F;
+            // 仅当为0-9时输出,忽略0xF填充及其他非法nibble
+            if (high >= 0 && high <= 9) digits.append(high);
+            if (low >= 0 && low <= 9) digits.append(low);
+        }
+        // 去除前导零
+        int i = 0;
+        while (i < digits.length() && digits.charAt(i) == '0') {
+            i++;
+        }
+        if (i >= digits.length()) {
+            return "0"; // 全零的情况
+        }
+        return digits.substring(i);
+    }
+
+    // 根据SIM卡号(BCD)与逻辑通道号拼接构建streamId
+    public static String buildStreamId(String sim, int logicChannelNumber) {
+        if (sim == null || sim.isEmpty()) {
+            return "";
+        }
+        // String sim = params.simCardNumberStr;
+        if (sim == null || sim.isEmpty()) {
+            return "";
+        }
+        return sim + "-" + logicChannelNumber;
+    }
+}

+ 19 - 0
src/main/resources/app.properties

@@ -0,0 +1,19 @@
+# 重要提示:默认路径,编译机器使用,如果开发者机器有自定义路径,可在 app-local.properties 中覆盖,可以使用以下运行代码修改运行时路径:java -Dnative.windows.x64.baseDir=D:\libs\aac -jar jtt1078server.jar 其他参数类似。
+
+
+# Windows x64 base directory containing fdk-aac.dll and aac_jni.dll
+native.windows.x64.baseDir=d:\\codePro\\TQ\\jtt1078server\\native\\windows\\x64\\Release
+
+# Linux base directory containing fdk-aac.so and aac_jni.so
+native.linux.x64.baseDir=/opt/jtt1078server/native/linux/
+
+# 默认服务器端口(可在 app-local.properties 中覆盖)
+server.video.port=18080
+server.websocket.port=18090
+server.manager.port=8099
+
+# 默认自动关闭时间
+server.video.autoCloseTimeout=300
+
+# 默认无数据30秒关闭
+server.video.idleTimer = 30

+ 35 - 0
src/main/resources/logback.xml

@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+    <!-- Console Appender -->
+    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
+        <encoder>
+            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
+            <charset>${CONSOLE_CHARSET:-UTF-8}</charset>
+        </encoder>
+    </appender>
+
+    <!-- Rolling File Appender: daily rollover, keep 14 days -->
+    <appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
+        <file>logs/jtt1078server.log</file>
+        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+            <fileNamePattern>logs/jtt1078server.%d{yyyy-MM-dd}.log</fileNamePattern>
+            <maxHistory>14</maxHistory>
+            <totalSizeCap>2GB</totalSizeCap>
+        </rollingPolicy>
+        <encoder>
+            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
+            <charset>UTF-8</charset>
+        </encoder>
+    </appender>
+
+    <!-- Set default logging levels -->
+    <logger name="com.jttserver" level="DEBUG" additivity="false">
+        <appender-ref ref="CONSOLE"/>
+        <appender-ref ref="FILE"/>
+    </logger>
+
+    <root level="INFO">
+        <appender-ref ref="CONSOLE"/>
+        <appender-ref ref="FILE"/>
+    </root>
+</configuration>

+ 274 - 0
src/main/resources/web/devices.html

@@ -0,0 +1,274 @@
+<!DOCTYPE html>
+<html lang="zh-CN">
+<head>
+    <meta charset="UTF-8">
+    <meta name="viewport" content="width=device-width, initial-scale=1" />
+    <title>设备连接详情</title>
+    <style>
+        :root {
+            --bg: #f6f8fa;
+            --card: #ffffff;
+            --border: #e5e7eb;
+            --text: #111827;
+            --muted: #6b7280;
+            --primary: #1976d2;
+            --secondary: #f9fafb;
+        }
+        * { box-sizing: border-box; }
+        html, body { height: 100%; }
+        body {
+            margin: 0;
+            font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen, Ubuntu, Cantarell, "Helvetica Neue", Arial, "Noto Sans", "PingFang SC", "Microsoft YaHei", sans-serif;
+            color: var(--text);
+            background: var(--bg);
+        }
+        .page { max-width: 1100px; margin: 0 auto; padding: 20px; }
+        .page-header { display: flex; align-items: center; justify-content: space-between; margin-bottom: 16px; }
+        .page-header h1 { font-size: 22px; margin: 0; }
+        .subtext { color: var(--muted); font-size: 13px; }
+
+        .grid { display: grid; grid-template-columns: 1fr; gap: 16px; }
+        @media (min-width: 768px) { .grid { grid-template-columns: 1fr; } }
+
+        .card { background: var(--card); border: 1px solid var(--border); border-radius: 12px; box-shadow: 0 1px 3px rgba(0,0,0,.06); }
+        .card-header { padding: 14px 16px; border-bottom: 1px solid var(--border); display: flex; align-items: center; justify-content: space-between; }
+        .card-title { font-size: 16px; font-weight: 600; }
+        .card-body { padding: 14px 16px; }
+
+        .toolbar { display: flex; flex-wrap: wrap; gap: 12px; }
+        .btn { display: inline-flex; align-items: center; gap: 8px; padding: 10px 14px; border-radius: 8px; text-decoration: none; border: 1px solid transparent; transition: background .2s ease, box-shadow .2s ease; }
+        .btn-primary { background: var(--primary); color: #fff; }
+        .btn-primary:hover { background: #1561ac; }
+        .btn-secondary { background: var(--secondary); color: #111827; }
+        .btn-secondary:hover { background: #e5e7eb; }
+        .btn-player { background: #3b82f6; color: #fff; }
+        .btn-player:hover { background: #2563eb; }
+        .btn-player-no-audio { background: #10b981; color: #fff; }
+        .btn-player-no-audio:hover { background: #0d9463; }
+
+        .table-responsive { width: 100%; overflow-x: auto; }
+        table { width: 100%; border-collapse: collapse; }
+        thead th {
+            background: #f9fafb;
+            border-bottom: 1px solid var(--border);
+            color: #374151;
+            font-weight: 600;
+            text-align: left;
+            padding: 12px 14px;
+            white-space: nowrap;
+        }
+        tbody td {
+            border-top: 1px solid var(--border);
+            padding: 12px 14px;
+            line-height: 1.5;
+            color: #1f2937;
+        }
+        tbody tr:nth-child(odd) { background: #fcfcfd; }
+        tbody tr:hover { background: #eef5ff; }
+
+        /* 轻量分隔与模块层次 */
+        .section-note { margin-top: 4px; color: var(--muted); font-size: 12px; }
+
+        /* 响应式优化 */
+        @media (max-width: 600px) {
+            .page { padding: 16px; }
+            .page-header h1 { font-size: 18px; }
+            thead th, tbody td { padding: 10px 12px; font-size: 14px; }
+            .btn { width: 100%; justify-content: center; }
+        }
+    </style>
+</head>
+<body>
+    <div class="page">
+        <header class="page-header">
+            <div>
+                <h1>设备连接详情</h1>
+                <div class="subtext">实时展示当前连接设备的关键信息</div>
+            </div>
+        </header>
+
+        <div class="grid">
+            <!-- 功能入口模块 -->
+            <section class="card">
+                <div class="card-header">
+                    <div class="card-title">功能入口</div>
+                </div>
+                <div class="card-body">
+                    <div class="toolbar">
+                        <a id="openPlayerBtn" class="btn btn-player" href="/player.html" target="_blank"
+                            rel="noopener">打开 WebSocket 播放器测试页面</a>
+                        <a id="openPlayerNoAudioBtn" class="btn btn-player-no-audio" href="/playerWhthoutAudio.html"
+                            target="_blank" rel="noopener">打开无音频播放器测试页面</a>
+                        <a id="openJessibucaBtn" class="btn btn-secondary" href="/jessibuca/demo.html"
+                            target="_blank" rel="noopener">打开 Jessibuca 播放器测试页面</a>
+                        <a id="openOfflinePlayerBtn" class="btn btn-secondary" href="/offline_player.html"
+                            target="_blank" rel="noopener">打开离线播放器(开发参考)</a>
+                        <a id="openOfflinePlayerNoAudioBtn" class="btn btn-secondary"
+                            href="/offline_player.html?noAudio=true" target="_blank" rel="noopener">打开离线无音频播放器(开发参考)</a>
+                    </div>
+                    <div id="popupTip" class="section-note" style="display:none;">
+                        浏览器阻止了弹出窗口。请允许弹窗或点击以下备用链接:
+                        <a href="/player.html" target="_blank" rel="noopener">播放器</a> |
+                        <a href="/jessibuca/demo.html" target="_blank" rel="noopener">Jessibuca 播放器</a> |
+                        <a href="/playerWhthoutAudio.html" target="_blank" rel="noopener">无音频播放器</a> |   
+                        <a href="/offline_player.html?noAudio=true" target="_blank" rel="noopener">离线无音频播放器</a>
+                    </div>
+                </div>
+            </section>
+
+            <!-- 设备列表模块 -->
+            <section class="card">
+                <div class="card-header">
+                    <div class="card-title">设备列表</div>
+                </div>
+                <div class="card-body">
+                    <div class="table-responsive">
+                        <table>
+                            <thead>
+                                <tr>
+                                    <th>远程地址</th>
+                                    <th>SIM卡号</th>
+                                    <th>逻辑通道号</th>
+                                    <th>连接时间</th>
+                                    <th>最后活跃时间</th>
+                                    <th>操作</th>
+                                </tr>
+                            </thead>
+                            <tbody id="deviceTableBody">
+                                <!-- 数据将通过JavaScript填充 -->
+                            </tbody>
+                        </table>
+                    </div>
+                </div>
+            </section>
+        </div>
+    </div>
+
+    <script>
+        function loadDevices() {
+            fetch('/devices')
+                .then(response => response.json())
+                .then(devices => {
+                    const tbody = document.getElementById('deviceTableBody');
+                    tbody.innerHTML = '';
+
+                    devices.forEach(device => {
+                        const row = document.createElement('tr');
+
+                        const addressCell = document.createElement('td');
+                        addressCell.textContent = device.remoteAddress;
+                        row.appendChild(addressCell);
+
+                        const simCell = document.createElement('td');
+                        simCell.textContent = device.simCardNumber || 'N/A';
+                        row.appendChild(simCell);
+
+                        const channelCell = document.createElement('td');
+                        channelCell.textContent = device.logicChannelNumber;
+                        row.appendChild(channelCell);
+
+                        const connectTimeCell = document.createElement('td');
+                        connectTimeCell.textContent = new Date(device.connectTime).toLocaleString();
+                        row.appendChild(connectTimeCell);
+
+                        const activeTimeCell = document.createElement('td');
+                        activeTimeCell.textContent = new Date(device.lastActiveTime).toLocaleString();
+                        row.appendChild(activeTimeCell);
+
+                        // 添加操作按钮列
+                        const actionCell = document.createElement('td');
+                        actionCell.style.whiteSpace = 'nowrap';
+                        actionCell.style.display = 'flex';
+                        actionCell.style.gap = '6px';
+                        actionCell.style.flexWrap = 'wrap';
+                        
+
+                        // 播放按钮
+                        const playBtn = document.createElement('button');
+                        playBtn.textContent = '播放';
+                        playBtn.className = 'btn btn-player';
+                        playBtn.onclick = function () {
+                            // 构建带参数的URL
+                            const url = `/player.html?sim=${encodeURIComponent(device.simCardNumber || '')}&channel=${encodeURIComponent(device.logicChannelNumber)}`;
+                            // 使用bindOpenInPopup函数打开播放器
+                            openPlayerInPopup(url, 'PlayerWindow');
+                        };
+                        actionCell.appendChild(playBtn);
+
+                        // 无音频播放按钮(偏绿色)
+                        const playNoAudioBtn = document.createElement('button');
+                        playNoAudioBtn.textContent = '无音频播放';
+                        playNoAudioBtn.className = 'btn btn-player-no-audio';
+
+                        playNoAudioBtn.onclick = function () {
+                            // 构建带参数的URL
+                            const url = `/playerWhthoutAudio.html?sim=${encodeURIComponent(device.simCardNumber || '')}&channel=${encodeURIComponent(device.logicChannelNumber)}`;
+                            // 使用bindOpenInPopup函数打开无音频播放器
+                            openPlayerInPopup(url, 'PlayerWindowNoAudio');
+                        };
+                        actionCell.appendChild(playNoAudioBtn);
+
+                        row.appendChild(actionCell);
+
+                        tbody.appendChild(row);
+                    });
+                })
+                .catch(error => console.error('Error:', error));
+        }
+
+        // 初始加载
+        loadDevices();
+        // 每5秒刷新一次
+        setInterval(loadDevices, 5000);
+
+        // 打开播放器(新窗口)通用函数
+        function openPlayerInPopup(targetUrl, windowName) {
+            const availW = Math.max(800, (window.screen && window.screen.availWidth) ? window.screen.availWidth : 1200);
+            const availH = Math.max(600, (window.screen && window.screen.availHeight) ? window.screen.availHeight : 800);
+            const width = Math.min(1200, Math.floor(availW * 0.9));
+            const height = Math.min(800, Math.floor(availH * 0.9));
+            const left = Math.max(0, Math.floor((availW - width) / 2));
+            const top = Math.max(0, Math.floor((availH - height) / 2));
+            const features = `popup=yes,width=${width},height=${height},left=${left},top=${top},menubar=no,toolbar=no,location=no,status=no,resizable=yes,scrollbars=yes`;
+            const newWin = window.open(targetUrl, windowName, features);
+            if (newWin && typeof newWin.focus === 'function') {
+                try { newWin.focus(); } catch (_) { }
+            } else {
+                // 如果弹窗被阻止,显示提示并直接跳转
+                alert('浏览器阻止了弹出窗口,将直接跳转');
+                window.open(targetUrl, '_blank');
+            }
+        }
+
+        // 打开播放器(新窗口)通用绑定
+        function bindOpenInPopup(btnId, targetUrl, windowName, tipId) {
+            const btn = document.getElementById(btnId);
+            const tip = tipId ? document.getElementById(tipId) : null;
+            if (!btn) return;
+            btn.addEventListener('click', function (e) {
+                e.preventDefault();
+                const availW = Math.max(800, (window.screen && window.screen.availWidth) ? window.screen.availWidth : 1200);
+                const availH = Math.max(600, (window.screen && window.screen.availHeight) ? window.screen.availHeight : 800);
+                const width = Math.min(1200, Math.floor(availW * 0.9));
+                const height = Math.min(800, Math.floor(availH * 0.9));
+                const left = Math.max(0, Math.floor((availW - width) / 2));
+                const top = Math.max(0, Math.floor((availH - height) / 2));
+                const features = `popup=yes,width=${width},height=${height},left=${left},top=${top},menubar=no,toolbar=no,location=no,status=no,resizable=yes,scrollbars=yes`;
+                const newWin = window.open(targetUrl, windowName, features);
+                if (newWin && typeof newWin.focus === 'function') {
+                    try { newWin.focus(); } catch (_) {}
+                } else {
+                    if (tip) tip.style.display = 'block';
+                }
+            });
+        }
+        bindOpenInPopup('openPlayerBtn', '/player.html', 'PlayerWindow', 'popupTip');
+        bindOpenInPopup('openPlayerNoAudioBtn', '/playerWhthoutAudio.html', 'PlayerWindowNoAudio', 'popupTip');
+        bindOpenInPopup('openJessibucaBtn', '/jessibuca/demo.html', 'JessibucaPlayerWindow', 'popupTip');
+        bindOpenInPopup('openOfflinePlayerBtn', '/offline_player.html', 'OfflinePlayerWindow', 'popupTip');
+        bindOpenInPopup('openOfflinePlayerNoAudioBtn', '/offline_player.html?noAudio=true', 'OfflinePlayerWindowNoAudio', 'popupTip');
+
+
+    </script>
+</body>
+</html>

File diff suppressed because it is too large
+ 0 - 0
src/main/resources/web/jessibuca/decoder.js


BIN
src/main/resources/web/jessibuca/decoder.wasm


+ 190 - 0
src/main/resources/web/jessibuca/demo.html

@@ -0,0 +1,190 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta charset="UTF-8">
+    <title>Title</title>
+    <script src="./jessibuca.js"></script>
+    <style>
+        .root {
+            display: flex;
+            place-content: center;
+            margin-top: 3rem;
+        }
+
+        .container-shell {
+            backdrop-filter: blur(5px);
+            background: hsla(0, 0%, 50%, 0.5);
+            padding: 30px 4px 10px 4px;
+            /* border: 2px solid black; */
+            width: auto;
+            position: relative;
+            border-radius: 5px;
+            box-shadow: 0 10px 20px;
+        }
+
+        .container-shell:before {
+            content: "jessibuca demo player";
+            position: absolute;
+            color: darkgray;
+            top: 4px;
+            left: 10px;
+            text-shadow: 1px 1px black;
+        }
+
+        #container {
+            background: rgba(13, 14, 27, 0.7);
+            width: 640px;
+            height: 398px;
+        }
+
+        .input {
+            display: flex;
+            margin-top: 10px;
+            color: white;
+            place-content: stretch;
+        }
+
+        .input2 {
+            bottom: 0px;
+        }
+
+        .input input {
+            flex: auto;
+        }
+
+        .err {
+            position: absolute;
+            top: 40px;
+            left: 10px;
+            color: red;
+        }
+
+        .option {
+            position: absolute;
+            top: 4px;
+            right: 10px;
+            display: flex;
+            place-content: center;
+            font-size: 12px;
+        }
+
+        .option span {
+            color: white;
+        }
+
+        .page {
+            background: white;
+            background-repeat: no-repeat;
+            background-position: top;
+        }
+
+        @media (max-width: 720px) {
+            #container {
+                width: 90vw;
+                height: 52.7vw;
+            }
+        }
+    </style>
+</head>
+<body class="page">
+<div class="root">
+    <div class="container-shell">
+        <div id="container"></div>
+        <div class="input">
+            <div>输入URL:</div>
+            <input
+                autocomplete="on"
+                id="playUrl"
+                value=""
+            />
+            <button id="play">播放</button>
+            <button id="pause" style="display: none">停止</button>
+        </div>
+        <div class="input" style="line-height: 30px">
+            <button id="destroy">销毁</button>
+        </div>
+    </div>
+</div>
+
+<script>
+    var $player = document.getElementById('play');
+    var $pause = document.getElementById('pause');
+    var $playHref = document.getElementById('playUrl');
+    var $container = document.getElementById('container');
+    var $destroy = document.getElementById('destroy');
+
+    var showOperateBtns = false; // 是否显示按钮
+    var forceNoOffscreen = true; //
+    var jessibuca = null;
+
+    function create() {
+        jessibuca = null;
+        jessibuca = new Jessibuca({
+            container: $container,
+            videoBuffer: 0.2, // 缓存时长
+            isResize: false,
+            text: "",
+            loadingText: "",
+            useMSE: false,
+            debug: true,
+            showBandwidth: showOperateBtns, // 显示网速
+            operateBtns: {
+                fullscreen: showOperateBtns,
+                screenshot: showOperateBtns,
+                play: showOperateBtns,
+                audio: true,
+                recorder: false
+            },
+            forceNoOffscreen: forceNoOffscreen,
+            isNotMute: false,
+        },);
+
+        jessibuca.on('audioInfo', function (audioInfo) {
+            console.log('audioInfo',audioInfo);
+        })
+
+        jessibuca.on('videoInfo', function (videoInfo) {
+            console.log('videoInfo',videoInfo);
+        })
+
+        $player.style.display = 'inline-block';
+        $pause.style.display = 'none';
+        $destroy.style.display = 'none';
+    }
+
+
+    create();
+
+    $player.addEventListener('click', function () {
+        var href = $playHref.value;
+        if (href) {
+            jessibuca.play(href);
+            $player.style.display = 'none';
+            $pause.style.display = 'inline-block';
+            $destroy.style.display = 'inline-block';
+        }
+    }, false)
+
+
+    $pause.addEventListener('click', function () {
+        $player.style.display = 'inline-block';
+        $pause.style.display = 'none';
+        jessibuca.pause();
+    })
+
+    $destroy.addEventListener('click', function () {
+        if (jessibuca) {
+            jessibuca.destroy().then(()=>{
+                create();
+            });
+        }
+        else {
+            create();
+        }
+    })
+
+</script>
+
+</body>
+</html>
+

+ 683 - 0
src/main/resources/web/jessibuca/jessibuca.d.ts

@@ -0,0 +1,683 @@
+declare namespace Jessibuca {
+
+    /** 超时信息 */
+    enum TIMEOUT {
+        /** 当play()的时候,如果没有数据返回 */
+        loadingTimeout = 'loadingTimeout',
+        /** 当播放过程中,如果超过timeout之后没有数据渲染 */
+        delayTimeout = 'delayTimeout',
+    }
+
+    /** 错误信息 */
+    enum ERROR {
+        /** 播放错误,url 为空的时候,调用 play 方法 */
+        playError = 'playError',
+        /** http 请求失败 */
+        fetchError = 'fetchError',
+        /** websocket 请求失败 */
+        websocketError = 'websocketError',
+        /** webcodecs 解码 h265 失败 */
+        webcodecsH265NotSupport = 'webcodecsH265NotSupport',
+        /** mediaSource 解码 h265 失败 */
+        mediaSourceH265NotSupport = 'mediaSourceH265NotSupport',
+        /** wasm 解码失败 */
+        wasmDecodeError = 'wasmDecodeError',
+    }
+
+    interface Config {
+        /**
+         * 播放器容器
+         * *  若为 string ,则底层调用的是 document.getElementById('id')
+         * */
+        container: HTMLElement | string;
+        /**
+         * 设置最大缓冲时长,单位秒,播放器会自动消除延迟
+         */
+        videoBuffer?: number;
+        /**
+         * worker地址
+         * *  默认引用的是根目录下面的decoder.js文件 ,decoder.js 与 decoder.wasm文件必须是放在同一个目录下面。 */
+        decoder?: string;
+        /**
+         * 是否不使用离屏模式(提升渲染能力)
+         */
+        forceNoOffscreen?: boolean;
+        /**
+         * 是否开启当页面的'visibilityState'变为'hidden'的时候,自动暂停播放。
+         */
+        hiddenAutoPause?: boolean;
+        /**
+         * 是否有音频,如果设置`false`,则不对音频数据解码,提升性能。
+         */
+        hasAudio?: boolean;
+        /**
+         * 设置旋转角度,只支持,0(默认),180,270 三个值
+         */
+        rotate?: boolean;
+        /**
+         * 1. 当为`true`的时候:视频画面做等比缩放后,高或宽对齐canvas区域,画面不被拉伸,但有黑边。 等同于 `setScaleMode(1)`
+         * 2. 当为`false`的时候:视频画面完全填充canvas区域,画面会被拉伸。等同于 `setScaleMode(0)`
+         */
+        isResize?: boolean;
+        /**
+         * 1. 当为`true`的时候:视频画面做等比缩放后,完全填充canvas区域,画面不被拉伸,没有黑边,但画面显示不全。等同于 `setScaleMode(2)`
+         */
+        isFullResize?: boolean;
+        /**
+         * 1. 当为`true`的时候:ws协议不检验是否以.flv为依据,进行协议解析。
+         */
+        isFlv?: boolean;
+        /**
+         * 是否开启控制台调试打
+         */
+        debug?: boolean;
+        /**
+         * 1. 设置超时时长, 单位秒
+         * 2. 在连接成功之前(loading)和播放中途(heart),如果超过设定时长无数据返回,则回调timeout事件
+         */
+        timeout?: number;
+        /**
+         * 1. 设置超时时长, 单位秒
+         * 2. 在连接成功之前,如果超过设定时长无数据返回,则回调timeout事件
+         */
+        heartTimeout?: number;
+        /**
+         * 1. 设置超时时长, 单位秒
+         * 2. 在连接成功之前,如果超过设定时长无数据返回,则回调timeout事件
+         */
+        loadingTimeout?: number;
+        /**
+         * 是否支持屏幕的双击事件,触发全屏,取消全屏事件
+         */
+        supportDblclickFullscreen?: boolean;
+        /**
+         * 是否显示网
+         */
+        showBandwidth?: boolean;
+        /**
+         * 配置操作按钮
+         */
+        operateBtns?: {
+            /** 是否显示全屏按钮 */
+            fullscreen?: boolean;
+            /** 是否显示截图按钮 */
+            screenshot?: boolean;
+            /** 是否显示播放暂停按钮 */
+            play?: boolean;
+            /** 是否显示声音按钮 */
+            audio?: boolean;
+            /** 是否显示录制按 */
+            record?: boolean;
+        };
+        /**
+         * 开启屏幕常亮,在手机浏览器上, canvas标签渲染视频并不会像video标签那样保持屏幕常亮
+         */
+        keepScreenOn?: boolean;
+        /**
+         * 是否开启声音,默认是关闭声音播放的
+         */
+        isNotMute?: boolean;
+        /**
+         * 加载过程中文案
+         */
+        loadingText?: string;
+        /**
+         * 背景图片
+         */
+        background?: string;
+        /**
+         * 是否开启MediaSource硬解码
+         * * 视频编码只支持H.264视频(Safari on iOS不支持)
+         * * 不支持 forceNoOffscreen 为 false (开启离屏渲染)
+         */
+        useMSE?: boolean;
+        /**
+         * 是否开启Webcodecs硬解码
+         * *  视频编码只支持H.264视频 (需在chrome 94版本以上,需要https或者localhost环境)
+         * *  支持 forceNoOffscreen 为 false (开启离屏渲染)
+         * */
+        useWCS?: boolean;
+        /**
+         * 是否开启键盘快捷键
+         * 目前支持的键盘快捷键有:esc -> 退出全屏;arrowUp -> 声音增加;arrowDown -> 声音减少;
+         */
+        hotKey?: boolean;
+        /**
+         *  在使用MSE或者Webcodecs 播放H265的时候,是否自动降级到wasm模式。
+         *  设置为false 则直接关闭播放,抛出Error 异常,设置为true 则会自动切换成wasm模式播放。
+         */
+        autoWasm?: boolean;
+        /**
+         * heartTimeout 心跳超时之后自动再播放,不再抛出异常,而直接重新播放视频地址。
+         */
+        heartTimeoutReplay?: boolean,
+        /**
+         * heartTimeoutReplay 从试次数,超过之后,不再自动播放
+         */
+        heartTimeoutReplayTimes?: number,
+        /**
+         * loadingTimeout loading之后自动再播放,不再抛出异常,而直接重新播放视频地址。
+         */
+        loadingTimeoutReplay?: boolean,
+        /**
+         * heartTimeoutReplay 从试次数,超过之后,不再自动播放
+         */
+        loadingTimeoutReplayTimes?: number
+        /**
+         * wasm解码报错之后,不再抛出异常,而是直接重新播放视频地址。
+         */
+        wasmDecodeErrorReplay?: boolean,
+        /**
+         * https://github.com/langhuihui/jessibuca/issues/152 解决方案
+         * 例如:WebGL图像预处理默认每次取4字节的数据,但是540x960分辨率下的U、V分量宽度是540/2=270不能被4整除,导致绿屏。
+         */
+        openWebglAlignment?: boolean,
+
+        /**
+         * webcodecs硬解码是否通过video标签渲染
+         */
+        wcsUseVideoRender?: boolean,
+
+        /**
+         * 底部控制台是否自动隐藏
+         */
+        controlAutoHide?: boolean,
+
+        /**
+         * 录制的视频格式
+         */
+        recordType?: 'webm' | 'mp4',
+
+        /**
+         * 是否使用web全屏(旋转90度)(只会在移动端生效)。
+         */
+        useWebFullScreen?: boolean,
+
+        /**
+         * 是否自动使用系统全屏
+         */
+        autoUseSystemFullScreen?: boolean,
+    }
+}
+
+
+declare class Jessibuca {
+
+    constructor(config?: Jessibuca.Config);
+
+    /**
+     * 是否开启控制台调试打印
+     @example
+     // 开启
+     jessibuca.setDebug(true)
+     // 关闭
+     jessibuca.setDebug(false)
+     */
+    setDebug(flag: boolean): void;
+
+    /**
+     * 静音
+     @example
+     jessibuca.mute()
+     */
+    mute(): void;
+
+    /**
+     * 取消静音
+     @example
+     jessibuca.cancelMute()
+     */
+    cancelMute(): void;
+
+    /**
+     * 留给上层用户操作来触发音频恢复的方法。
+     *
+     * iPhone,chrome等要求自动播放时,音频必须静音,需要由一个真实的用户交互操作来恢复,不能使用代码。
+     *
+     * https://developers.google.com/web/updates/2017/09/autoplay-policy-changes
+     */
+    audioResume(): void;
+
+    /**
+     *
+     * 设置超时时长, 单位秒
+     * 在连接成功之前和播放中途,如果超过设定时长无数据返回,则回调timeout事件
+
+     @example
+     jessibuca.setTimeout(10)
+
+     jessibuca.on('timeout',function(){
+     //
+     });
+     */
+    setTimeout(): void;
+
+    /**
+     * @param mode
+     *      0 视频画面完全填充canvas区域,画面会被拉伸  等同于参数 `isResize` 为false
+     *
+     *      1 视频画面做等比缩放后,高或宽对齐canvas区域,画面不被拉伸,但有黑边 等同于参数 `isResize` 为true
+     *
+     *      2 视频画面做等比缩放后,完全填充canvas区域,画面不被拉伸,没有黑边,但画面显示不全 等同于参数 `isFullResize` 为true
+     @example
+     jessibuca.setScaleMode(0)
+
+     jessibuca.setScaleMode(1)
+
+     jessibuca.setScaleMode(2)
+     */
+    setScaleMode(mode: number): void;
+
+    /**
+     * 暂停播放
+     *
+     * 可以在pause 之后,再调用 `play()`方法就继续播放之前的流。
+     @example
+     jessibuca.pause().then(()=>{
+     console.log('pause success')
+
+     jessibuca.play().then(()=>{
+
+     }).catch((e)=>{
+
+     })
+
+     }).catch((e)=>{
+     console.log('pause error',e);
+     })
+     */
+    pause(): Promise<void>;
+
+    /**
+     * 关闭视频,不释放底层资源
+     @example
+     jessibuca.close();
+     */
+    close(): void;
+
+    /**
+     * 关闭视频,释放底层资源
+     @example
+     jessibuca.destroy()
+     */
+    destroy(): void;
+
+    /**
+     * 清理画布为黑色背景
+     @example
+     jessibuca.clearView()
+     */
+    clearView(): void;
+
+    /**
+     * 播放视频
+     @example
+
+     jessibuca.play('url').then(()=>{
+     console.log('play success')
+     }).catch((e)=>{
+     console.log('play error',e)
+     })
+     // 添加请求头
+     jessibuca.play('url',{headers:{'Authorization':'test111'}}).then(()=>{
+     console.log('play success')
+     }).catch((e)=>{
+     console.log('play error',e)
+     })
+     */
+    play(url?: string, options?: {
+        headers: Object
+    }): Promise<void>;
+
+    /**
+     * 重新调整视图大小
+     */
+    resize(): void;
+
+    /**
+     * 设置最大缓冲时长,单位秒,播放器会自动消除延迟。
+     *
+     * 等同于 `videoBuffer` 参数。
+     *
+     @example
+     // 设置 200ms 缓冲
+     jessibuca.setBufferTime(0.2)
+     */
+    setBufferTime(time: number): void;
+
+    /**
+     * 设置旋转角度,只支持,0(默认) ,180,270 三个值。
+     *
+     * > 可用于实现监控画面小窗和全屏效果,由于iOS没有全屏API,此方法可以模拟页面内全屏效果而且多端效果一致。   *
+     @example
+     jessibuca.setRotate(0)
+
+     jessibuca.setRotate(90)
+
+     jessibuca.setRotate(270)
+     */
+    setRotate(deg: number): void;
+
+    /**
+     *
+     * 设置音量大小,取值0 — 1
+     *
+     * > 区别于 mute 和 cancelMute 方法,虽然设置setVolume(0) 也能达到 mute方法,但是mute 方法是不调用底层播放音频的,能提高性能。而setVolume(0)只是把声音设置为0 ,以达到效果。
+     * @param volume 当为0时,完全无声;当为1时,最大音量,默认值
+     @example
+     jessibuca.setVolume(0.2)
+
+     jessibuca.setVolume(0)
+
+     jessibuca.setVolume(1)
+     */
+    setVolume(volume: number): void;
+
+    /**
+     * 返回是否加载完毕
+     @example
+     var result = jessibuca.hasLoaded()
+     console.log(result) // true
+     */
+    hasLoaded(): boolean;
+
+    /**
+     * 开启屏幕常亮,在手机浏览器上, canvas标签渲染视频并不会像video标签那样保持屏幕常亮。
+     * H5目前在chrome\edge 84, android chrome 84及以上有原生亮屏API, 需要是https页面
+     * 其余平台为模拟实现,此时为兼容实现,并不保证所有浏览器都支持
+     @example
+     jessibuca.setKeepScreenOn()
+     */
+    setKeepScreenOn(): boolean;
+
+    /**
+     * 全屏(取消全屏)播放视频
+     @example
+     jessibuca.setFullscreen(true)
+     //
+     jessibuca.setFullscreen(false)
+     */
+    setFullscreen(flag: boolean): void;
+
+    /**
+     *
+     * 截图,调用后弹出下载框保存截图
+     * @param filename 可选参数, 保存的文件名, 默认 `时间戳`
+     * @param format   可选参数, 截图的格式,可选png或jpeg或者webp ,默认 `png`
+     * @param quality  可选参数, 当格式是jpeg或者webp时,压缩质量,取值0 ~ 1 ,默认 `0.92`
+     * @param type 可选参数, 可选download或者base64或者blob,默认`download`
+
+     @example
+
+     jessibuca.screenshot("test","png",0.5)
+
+     const base64 = jessibuca.screenshot("test","png",0.5,'base64')
+
+     const fileBlob = jessibuca.screenshot("test",'blob')
+     */
+    screenshot(filename?: string, format?: string, quality?: number, type?: string): void;
+
+    /**
+     * 开始录制。
+     * @param fileName 可选,默认时间戳
+     * @param fileType 可选,默认webm,支持webm 和mp4 格式
+
+     @example
+     jessibuca.startRecord('xxx','webm')
+     */
+    startRecord(fileName: string, fileType: string): void;
+
+    /**
+     * 暂停录制并下载。
+     @example
+     jessibuca.stopRecordAndSave()
+     */
+    stopRecordAndSave(): void;
+
+    /**
+     * 返回是否正在播放中状态。
+     @example
+     var result = jessibuca.isPlaying()
+     console.log(result) // true
+     */
+    isPlaying(): boolean;
+
+    /**
+     *   返回是否静音。
+     @example
+     var result = jessibuca.isMute()
+     console.log(result) // true
+     */
+    isMute(): boolean;
+
+    /**
+     * 返回是否正在录制。
+     @example
+     var result = jessibuca.isRecording()
+     console.log(result) // true
+     */
+    isRecording(): boolean;
+
+    /**
+     * 切换底部控制条 隐藏/显示
+     * @param isShow
+     *
+     * @example
+     * jessibuca.toggleControlBar(true) // 显示
+     * jessibuca.toggleControlBar(false)  // 隐藏
+     * jessibuca.toggleControlBar() // 切换 隐藏/显示
+     */
+    toggleControlBar(isShow:boolean): void;
+
+    /**
+     * 获取底部控制条是否显示
+     */
+    getControlBarShow(): boolean;
+
+    /**
+     * 监听 jessibuca 初始化事件
+     * @example
+     * jessibuca.on("load",function(){console.log('load')})
+     */
+    on(event: 'load', callback: () => void): void;
+
+    /**
+     * 视频播放持续时间,单位ms
+     * @example
+     * jessibuca.on('timeUpdate',function (ts) {console.log('timeUpdate',ts);})
+     */
+    on(event: 'timeUpdate', callback: () => void): void;
+
+    /**
+     * 当解析出视频信息时回调,2个回调参数
+     * @example
+     * jessibuca.on("videoInfo",function(data){console.log('width:',data.width,'height:',data.width)})
+     */
+    on(event: 'videoInfo', callback: (data: {
+        /** 视频宽 */
+        width: number;
+        /** 视频高 */
+        height: number;
+    }) => void): void;
+
+    /**
+     * 当解析出音频信息时回调,2个回调参数
+     * @example
+     * jessibuca.on("audioInfo",function(data){console.log('numOfChannels:',data.numOfChannels,'sampleRate',data.sampleRate)})
+     */
+    on(event: 'audioInfo', callback: (data: {
+        /** 声频通道 */
+        numOfChannels: number;
+        /** 采样率 */
+        sampleRate: number;
+    }) => void): void;
+
+    /**
+     * 信息,包含错误信息
+     * @example
+     * jessibuca.on("log",function(data){console.log('data:',data)})
+     */
+    on(event: 'log', callback: () => void): void;
+
+    /**
+     * 错误信息
+     * @example
+     * jessibuca.on("error",function(error){
+     if(error === Jessibuca.ERROR.fetchError){
+     //
+     }
+     else if(error === Jessibuca.ERROR.webcodecsH265NotSupport){
+     //
+     }
+     console.log('error:',error)
+     })
+     */
+    on(event: 'error', callback: (err: Jessibuca.ERROR) => void): void;
+
+    /**
+     * 当前网速, 单位KB 每秒1次,
+     * @example
+     * jessibuca.on("kBps",function(data){console.log('kBps:',data)})
+     */
+    on(event: 'kBps', callback: (value: number) => void): void;
+
+    /**
+     * 渲染开始
+     * @example
+     * jessibuca.on("start",function(){console.log('start render')})
+     */
+    on(event: 'start', callback: () => void): void;
+
+    /**
+     * 当设定的超时时间内无数据返回,则回调
+     * @example
+     * jessibuca.on("timeout",function(error){console.log('timeout:',error)})
+     */
+    on(event: 'timeout', callback: (error: Jessibuca.TIMEOUT) => void): void;
+
+    /**
+     * 当play()的时候,如果没有数据返回,则回调
+     * @example
+     * jessibuca.on("loadingTimeout",function(){console.log('timeout')})
+     */
+    on(event: 'loadingTimeout', callback: () => void): void;
+
+    /**
+     * 当播放过程中,如果超过timeout之后没有数据渲染,则抛出异常。
+     * @example
+     * jessibuca.on("delayTimeout",function(){console.log('timeout')})
+     */
+    on(event: 'delayTimeout', callback: () => void): void;
+
+    /**
+     * 当前是否全屏
+     * @example
+     * jessibuca.on("fullscreen",function(flag){console.log('is fullscreen',flag)})
+     */
+    on(event: 'fullscreen', callback: () => void): void;
+
+    /**
+     * 触发播放事件
+     * @example
+     * jessibuca.on("play",function(flag){console.log('play')})
+     */
+    on(event: 'play', callback: () => void): void;
+
+    /**
+     * 触发暂停事件
+     * @example
+     * jessibuca.on("pause",function(flag){console.log('pause')})
+     */
+    on(event: 'pause', callback: () => void): void;
+
+    /**
+     * 触发声音事件,返回boolean值
+     * @example
+     * jessibuca.on("mute",function(flag){console.log('is mute',flag)})
+     */
+    on(event: 'mute', callback: () => void): void;
+
+    /**
+     * 流状态统计,流开始播放后回调,每秒1次。
+     * @example
+     * jessibuca.on("stats",function(s){console.log("stats is",s)})
+     */
+    on(event: 'stats', callback: (stats: {
+        /** 当前缓冲区时长,单位毫秒 */
+        buf: number;
+        /** 当前视频帧率 */
+        fps: number;
+        /** 当前音频码率,单位byte */
+        abps: number;
+        /** 当前视频码率,单位byte */
+        vbps: number;
+        /** 当前视频帧pts,单位毫秒 */
+        ts: number;
+    }) => void): void;
+
+    /**
+     * 渲染性能统计,流开始播放后回调,每秒1次。
+     * @param performance 0: 表示卡顿,1: 表示流畅,2: 表示非常流程
+     * @example
+     * jessibuca.on("performance",function(performance){console.log("performance is",performance)})
+     */
+    on(event: 'performance', callback: (performance: 0 | 1 | 2) => void): void;
+
+    /**
+     * 录制开始的事件
+
+     * @example
+     * jessibuca.on("recordStart",function(){console.log("record start")})
+     */
+    on(event: 'recordStart', callback: () => void): void;
+
+    /**
+     * 录制结束的事件
+
+     * @example
+     * jessibuca.on("recordEnd",function(){console.log("record end")})
+     */
+    on(event: 'recordEnd', callback: () => void): void;
+
+    /**
+     * 录制的时候,返回的录制时长,1s一次
+
+     * @example
+     * jessibuca.on("recordingTimestamp",function(timestamp){console.log("recordingTimestamp is",timestamp)})
+     */
+    on(event: 'recordingTimestamp', callback: (timestamp: number) => void): void;
+
+    /**
+     * 监听调用play方法 经过 初始化-> 网络请求-> 解封装 -> 解码 -> 渲染 一系列过程的时间消耗
+     * @param event
+     * @param callback
+     */
+    on(event: 'playToRenderTimes', callback: (times: {
+        playInitStart: number, // 1 初始化
+        playStart: number, // 2 初始化
+        streamStart: number, // 3 网络请求
+        streamResponse: number, // 4 网络请求
+        demuxStart: number, // 5 解封装
+        decodeStart: number, // 6 解码
+        videoStart: number, // 7 渲染
+        playTimestamp: number,// playStart- playInitStart
+        streamTimestamp: number,// streamStart - playStart
+        streamResponseTimestamp: number,// streamResponse - streamStart
+        demuxTimestamp: number, // demuxStart - streamResponse
+        decodeTimestamp: number, // decodeStart - demuxStart
+        videoTimestamp: number,// videoStart - decodeStart
+        allTimestamp: number // videoStart - playInitStart
+    }) => void): void
+
+    /**
+     * 监听方法
+     *
+     @example
+
+     jessibuca.on("load",function(){console.log('load')})
+     */
+    on(event: string, callback: Function): void;
+
+}
+
+export default Jessibuca;

File diff suppressed because it is too large
+ 0 - 0
src/main/resources/web/jessibuca/jessibuca.js


File diff suppressed because it is too large
+ 0 - 0
src/main/resources/web/mpegts.js


File diff suppressed because it is too large
+ 0 - 0
src/main/resources/web/mpegts.js.map


+ 172 - 0
src/main/resources/web/offline_player.html

@@ -0,0 +1,172 @@
+<!DOCTYPE html>
+<html lang="zh-CN">
+<head>
+    <meta charset="UTF-8" />
+    <title>离线 WebSocket 播放器</title>
+    <style>
+        body { font-family: Arial, sans-serif; margin: 20px; }
+        h1 { margin-bottom: 10px; }
+        .section { margin-top: 16px; }
+        .row { margin: 8px 0; }
+        .controls { margin: 10px 0; display: flex; gap: 12px; align-items: center; flex-wrap: wrap; }
+        .status { margin-top: 10px; }
+        .status span { display: inline-block; min-width: 120px; }
+        #video { width: 100%; max-width: 900px; background: #000; }
+        button { padding: 6px 12px; }
+        input[type="text"] { padding: 6px; width: 420px; }
+        input[type="range"] { width: 180px; }
+        small.hint { color: #666; }
+    </style>
+</head>
+<body>
+    <h1>离线 WebSocket 播放器</h1>
+
+    <div class="section">
+        <div class="row">
+            <label for="wsUrl">WebSocket 地址(例如 ws://127.0.0.1:18090/realtime/42011878285-5):</label>
+            <input id="wsUrl" type="text" placeholder="请输入完整的 ws/wss 地址" />
+            <button id="btnConnect">连接</button>
+            <button id="btnDisconnect" disabled>断开</button>
+            <div class="row"><small class="hint">提示:本页面为离线文件,直接双击打开即可使用;所有资源均为本地引用。</small></div>
+        </div>
+        <div class="row">
+            <video id="video" controls muted></video>
+        </div>
+        <div class="controls">
+            <button id="btnPlay" disabled>播放</button>
+            <button id="btnPause" disabled>暂停</button>
+            <label>音量:<input id="volume" type="range" min="0" max="1" step="0.01" value="0.8" /></label>
+        </div>
+        <div class="status">
+            <span>连接状态:<strong id="connStatus">未连接</strong></span>
+            <span>播放状态:<strong id="playStatus">未播放</strong></span>
+        </div>
+    </div>
+
+    <!-- 本地引用 mpegts.js(确保同目录下存在 mpegts.js 文件) -->
+    <script src="./mpegts.js"></script>
+    <script>
+        const urlParams = new URLSearchParams(window.location.search);
+        const noAudio = urlParams.get('noAudio') === 'true';
+        let player = null;
+        const video = document.getElementById('video');
+        const btnConnect = document.getElementById('btnConnect');
+        const btnDisconnect = document.getElementById('btnDisconnect');
+        const btnPlay = document.getElementById('btnPlay');
+        const btnPause = document.getElementById('btnPause');
+        const volume = document.getElementById('volume');
+        const wsUrlInput = document.getElementById('wsUrl');
+        const connStatus = document.getElementById('connStatus');
+        const playStatus = document.getElementById('playStatus');
+
+        function setConnStatus(text) { connStatus.textContent = text; }
+        function setPlayStatus(text) { playStatus.textContent = text; }
+
+        function cleanupPlayer() {
+            try {
+                if (player) {
+                    player.unload();
+                    player.detachMediaElement();
+                    player.destroy();
+                }
+            } catch (e) {
+                console.warn('清理播放器时发生异常:', e);
+            } finally {
+                player = null;
+                btnDisconnect.disabled = true;
+                btnPlay.disabled = true;
+                btnPause.disabled = true;
+                setConnStatus('未连接');
+                setPlayStatus('未播放');
+            }
+        }
+
+        btnConnect.addEventListener('click', async () => {
+            const url = wsUrlInput.value.trim();
+            if (!url) {
+                alert('请先输入 WebSocket 地址');
+                return;
+            }
+            if (!window.mpegts || !window.mpegts.isSupported()) {
+                alert('当前浏览器不支持 MSE 或 mpegts.js');
+                return;
+            }
+
+            cleanupPlayer();
+            setConnStatus('连接中...');
+
+            try {
+                player = mpegts.createPlayer({
+                    type: 'flv',
+                    isLive: true,
+                    hasAudio: !noAudio,
+                    url: url
+                }, {
+                    enableStashBuffer: false,
+                    stashInitialSize: 128,
+                    autoCleanupSourceBuffer: true,
+                    lazyLoad: false,
+                    liveBufferLatencyChasing: true
+                });
+
+                player.on(mpegts.Events.ERROR, (e) => {
+                    console.error('播放器错误:', e);
+                    setConnStatus('错误');
+                });
+                player.on(mpegts.Events.LOADING_COMPLETE, () => {
+                    setConnStatus('已连接');
+                });
+                player.on(mpegts.Events.RECOVERED_EARLY_EOF, () => {
+                    console.warn('早期EOF已恢复');
+                });
+
+                player.attachMediaElement(video);
+                player.load();
+
+                btnDisconnect.disabled = false;
+                btnPlay.disabled = false;
+                btnPause.disabled = false;
+                setConnStatus('已连接');
+
+                try {
+                    await video.play();
+                    setPlayStatus('播放中');
+                } catch (err) {
+                    console.warn('自动播放被阻止:', err);
+                    setPlayStatus('已加载,待播放');
+                }
+            } catch (err) {
+                console.error('创建播放器失败:', err);
+                setConnStatus('连接失败');
+                cleanupPlayer();
+            }
+        });
+
+        btnDisconnect.addEventListener('click', () => {
+            cleanupPlayer();
+        });
+
+        btnPlay.addEventListener('click', async () => {
+            try {
+                await video.play();
+                setPlayStatus('播放中');
+            } catch (err) {
+                console.warn('播放失败:', err);
+            }
+        });
+        btnPause.addEventListener('click', () => {
+            try {
+                video.pause();
+                setPlayStatus('已暂停');
+            } catch (err) {
+                console.warn('暂停失败:', err);
+            }
+        });
+        volume.addEventListener('input', () => {
+            video.volume = parseFloat(volume.value);
+        });
+
+        window.addEventListener('beforeunload', cleanupPlayer);
+    </script>
+</body>
+</html>

+ 338 - 0
src/main/resources/web/player.html

@@ -0,0 +1,338 @@
+<!DOCTYPE html>
+<html>
+<head>
+    <meta charset="UTF-8">
+    <title>WebSocket 播放器测试</title>
+    <style>
+        body { font-family: Arial, sans-serif; margin: 20px; }
+        .controls { margin: 10px 0; display: flex; gap: 12px; align-items: center; flex-wrap: wrap; }
+        .status { margin-top: 10px; }
+        .status span { display: inline-block; min-width: 120px; }
+        .section { margin-top: 16px; }
+        .row { margin: 8px 0; }
+        #video { width: 100%; max-width: 900px; background: #000; }
+        button { padding: 6px 12px; }
+        input[type="text"] { padding: 6px; width: 280px; }
+        input[type="range"] { width: 180px; }
+    </style>
+</head>
+<body>
+    <h1>WebSocket 播放器测试页面</h1>
+
+    <div class="section">
+        <div class="row">
+            <label for="streamId">流ID(SIM卡号+逻辑通道号,例如 42011878285-5):</label>
+            <input id="streamId" type="text" placeholder="请输入流ID,例如 42011878285-5" />
+            <button id="btnConnect">连接</button>
+            <button id="btnDisconnect" disabled>断开</button>
+        </div>
+        <div class="row">
+            <video id="video" controls muted></video>
+        </div>
+        <div class="controls">
+            <button id="btnPlay" disabled>播放</button>
+            <button id="btnPause" disabled>暂停</button>
+            <label>音量:<input id="volume" type="range" min="0" max="1" step="0.01" value="0.8" /></label>
+            <label>忽略音频超时(ms):<input id="audioTimeout" type="number" min="1000" step="500" value="2000" /></label>
+            <label><input id="ignoreAudioOnTimeout" type="checkbox" unchecked /> 启用音频缺失自动忽略</label>
+        </div>
+        <div class="status">
+            <span>连接状态:<strong id="connStatus">未连接</strong></span>
+            <span>播放状态:<strong id="playStatus">未播放</strong></span>
+        </div>
+    </div>
+
+    <!-- 引用已下载的最新版 mpegts.js(位于 resources/web/ 目录) -->
+    <script src="/mpegts.js"></script>
+    <script>
+        let player = null;
+        const video = document.getElementById('video');
+        const btnConnect = document.getElementById('btnConnect');
+        const btnDisconnect = document.getElementById('btnDisconnect');
+        const btnPlay = document.getElementById('btnPlay');
+        const btnPause = document.getElementById('btnPause');
+        const volume = document.getElementById('volume');
+        const streamIdInput = document.getElementById('streamId');
+        const connStatus = document.getElementById('connStatus');
+        const playStatus = document.getElementById('playStatus');
+
+        const audioTimeoutInput = document.getElementById('audioTimeout');
+        const ignoreAudioToggle = document.getElementById('ignoreAudioOnTimeout');
+
+        let currentStreamUrl = null;
+        let audioWatchTimer = null;
+        let audioLastBytes = 0;
+        let audioLastIncreaseAt = 0;
+        let audioIgnored = false;
+
+        function setConnStatus(text) { connStatus.textContent = text; }
+        function setPlayStatus(text) { playStatus.textContent = text; }
+
+        function buildWsUrl(streamId) {
+            const host = window.location.hostname;
+            // WebSocketServer 默认端口 18090,路径 /realtime/{streamId}
+            const protocol = window.location.protocol === 'https:' ? 'wss' : 'ws';
+            return `${protocol}://${host}:18090/realtime/${encodeURIComponent(streamId)}`;
+        }
+
+        // 获取URL参数
+        function getUrlParams() {
+            const params = {};
+            const queryString = window.location.search.substring(1);
+            const pairs = queryString.split('&');
+            for (const pair of pairs) {
+                const [key, value] = pair.split('=').map(decodeURIComponent);
+                if (key) params[key] = value;
+            }
+            return params;
+        }
+
+        // 自动连接并播放
+        function autoConnectAndPlay() {
+            const params = getUrlParams();
+            if (params.sim && params.channel !== undefined) {
+                // 构建流ID:SIM卡号+逻辑通道号
+                const streamId = `${params.sim}-${params.channel}`;
+                streamIdInput.value = streamId;
+                
+                // 延迟执行连接,确保页面元素完全加载
+                setTimeout(() => {
+                    btnConnect.click();
+                }, 500);
+            }
+        }
+
+        // 获取已解码的音频字节数
+        function getDecodedAudioBytes(el) {
+            try {
+                if (typeof el.webkitAudioDecodedByteCount === 'number') {
+                    return el.webkitAudioDecodedByteCount;
+                }
+                if (typeof el.mozAudioDecodedFrames === 'number') {
+                    return el.mozAudioDecodedFrames;
+                }
+            } catch (_) {}
+            return 0;
+        }
+
+        // 开始监听音频播放状态
+        function startAudioWatch() {
+            // 当成功连接后,如果页面是从参数自动打开的,自动点击播放按钮
+            if (btnConnect.disabled && btnPlay.disabled === false) {
+                setTimeout(() => {
+                    btnPlay.click();
+                }, 300);
+            }
+            // 若未启用或已忽略音频,跳过
+            if (!ignoreAudioToggle.checked || audioIgnored) return;
+            audioLastBytes = getDecodedAudioBytes(video);
+            audioLastIncreaseAt = performance.now();
+            // 每秒检查一次音频解码字节是否增长
+            stopAudioWatch();
+            audioWatchTimer = setInterval(async () => {
+                if (!player || audioIgnored) {
+                    stopAudioWatch();
+                    return;
+                }
+                const now = performance.now();
+                const timeoutMs = parseInt(audioTimeoutInput.value, 10) || 2000;
+                const curBytes = getDecodedAudioBytes(video);
+                if (curBytes > audioLastBytes) {
+                    audioLastBytes = curBytes;
+                    audioLastIncreaseAt = now;
+                    return; // 正常有音频帧,继续观察
+                }
+                // 未增长,检查是否超过阈值
+                if ((now - audioLastIncreaseAt) >= timeoutMs) {
+                    console.warn(`在 ${timeoutMs}ms 内未接收到音频单元,自动切换为视频播放(忽略音频)`);
+                    await switchToVideoOnly();
+                }
+            }, 1000);
+        }
+
+        function stopAudioWatch() {
+            if (audioWatchTimer) {
+                clearInterval(audioWatchTimer);
+                audioWatchTimer = null;
+            }
+        }
+
+        async function switchToVideoOnly() {
+            if (audioIgnored) return;
+            audioIgnored = true;
+            stopAudioWatch();
+            try {
+                if (player) {
+                    player.unload();
+                    player.detachMediaElement();
+                    player.destroy();
+                    player = null;
+                }
+            } catch (e) {
+                console.warn('切换到视频模式前释放播放器异常:', e);
+            }
+            try {
+                // 重新创建为仅视频模式
+                player = mpegts.createPlayer({
+                    type: 'flv',
+                    isLive: true,
+                    url: currentStreamUrl,
+                    hasAudio: false,
+                    hasVideo: true
+                }, {
+                    enableStashBuffer: false,
+                    stashInitialSize: 128,
+                    autoCleanupSourceBuffer: true,
+                    lazyLoad: false,
+                    liveBufferLatencyChasing: true
+                });
+
+                player.on(mpegts.Events.ERROR, (e) => {
+                    console.error('播放器错误:', e);
+                    setConnStatus('错误');
+                });
+                player.on(mpegts.Events.LOADING_COMPLETE, () => {
+                    setConnStatus('已连接');
+                });
+                player.on(mpegts.Events.RECOVERED_EARLY_EOF, () => {
+                    console.warn('早期EOF已恢复');
+                });
+
+                player.attachMediaElement(video);
+                player.load();
+                setConnStatus('已连接(视频)');
+                try {
+                    await video.play();
+                    setPlayStatus('播放中(视频)');
+                } catch (err) {
+                    console.warn('自动播放被阻止(视频):', err);
+                    setPlayStatus('已加载(视频),待播放');
+                }
+            } catch (err) {
+                console.error('切换为视频模式失败:', err);
+                setConnStatus('切换失败');
+            }
+        }
+
+        function cleanupPlayer() {
+            try {
+                stopAudioWatch();
+                audioIgnored = false;
+                if (player) {
+                    player.unload();
+                    player.detachMediaElement();
+                    player.destroy();
+                }
+            } catch (e) {
+                console.warn('清理播放器时发生异常:', e);
+            } finally {
+                player = null;
+                currentStreamUrl = null;
+                btnDisconnect.disabled = true;
+                btnPlay.disabled = true;
+                btnPause.disabled = true;
+                setConnStatus('未连接');
+                setPlayStatus('未播放');
+            }
+        }
+
+        btnConnect.addEventListener('click', async () => {
+            const streamId = streamIdInput.value.trim();
+            if (!streamId) {
+                alert('请先输入流ID');
+                return;
+            }
+            if (!window.mpegts || !window.mpegts.isSupported()) {
+                alert('当前浏览器不支持 MSE 或 mpegts.js');
+                return;
+            }
+
+            cleanupPlayer();
+            const url = buildWsUrl(streamId);
+            currentStreamUrl = url;
+            setConnStatus('连接中...');
+
+            try {
+                player = mpegts.createPlayer({
+                    type: 'flv',
+                    isLive: true,
+                    url: url
+                }, {
+                    enableStashBuffer: false,
+                    stashInitialSize: 128,
+                    autoCleanupSourceBuffer: true,
+                    lazyLoad: false,
+                    liveBufferLatencyChasing: true
+                });
+
+                player.on(mpegts.Events.ERROR, (e) => {
+                    console.error('播放器错误:', e);
+                    setConnStatus('错误');
+                });
+                player.on(mpegts.Events.LOADING_COMPLETE, () => {
+                    setConnStatus('已连接');
+                });
+                player.on(mpegts.Events.RECOVERED_EARLY_EOF, () => {
+                    console.warn('早期EOF已恢复');
+                });
+
+                player.attachMediaElement(video);
+                player.load();
+                btnDisconnect.disabled = false;
+                btnPlay.disabled = false;
+                btnPause.disabled = false;
+                setConnStatus('已连接');
+
+                // 启动音频缺失监控
+                audioIgnored = false;
+                startAudioWatch();
+
+                // 自动播放(浏览器可能因策略拒绝,故捕获)
+                try {
+                    await video.play();
+                    setPlayStatus('播放中');
+                } catch (err) {
+                    console.warn('自动播放被阻止:', err);
+                    setPlayStatus('已加载,待播放');
+                }
+            } catch (err) {
+                console.error('创建播放器失败:', err);
+                setConnStatus('连接失败');
+                cleanupPlayer();
+            }
+        });
+
+        btnDisconnect.addEventListener('click', () => {
+            cleanupPlayer();
+        });
+
+        btnPlay.addEventListener('click', async () => {
+            try {
+                await video.play();
+                setPlayStatus('播放中');
+            } catch (err) {
+                console.warn('播放失败:', err);
+            }
+        });
+        btnPause.addEventListener('click', () => {
+            try {
+                video.pause();
+                setPlayStatus('已暂停');
+            } catch (err) {
+                console.warn('暂停失败:', err);
+            }
+        });
+        volume.addEventListener('input', () => {
+            video.volume = parseFloat(volume.value);
+        });
+
+        // 页面卸载时清理资源
+        window.addEventListener('beforeunload', cleanupPlayer);
+
+        // 页面加载完成后尝试自动连接并播放
+        window.addEventListener('DOMContentLoaded', () => {
+            autoConnectAndPlay();
+        });
+    </script>
+</body>
+</html>

+ 232 - 0
src/main/resources/web/playerWhthoutAudio.html

@@ -0,0 +1,232 @@
+<!DOCTYPE html>
+<html>
+<head>
+    <meta charset="UTF-8">
+    <title>WebSocket 播放器(无音频)测试</title>
+    <style>
+        body { font-family: Arial, sans-serif; margin: 20px; }
+        .controls { margin: 10px 0; display: flex; gap: 12px; align-items: center; flex-wrap: wrap; }
+        .status { margin-top: 10px; }
+        .status span { display: inline-block; min-width: 120px; }
+        .section { margin-top: 16px; }
+        .row { margin: 8px 0; }
+        #video { width: 100%; max-width: 900px; background: #000; }
+        button { padding: 6px 12px; }
+        input[type="text"] { padding: 6px; width: 280px; }
+    </style>
+</head>
+<body>
+    <h1>WebSocket 播放器(仅视频,无音频)</h1>
+
+    <div class="section">
+        <div class="row">
+            <label for="streamId">流ID(SIM卡号+逻辑通道号,例如 42011878285-5):</label>
+            <input id="streamId" type="text" placeholder="请输入流ID,例如 42011878285-5" />
+            <button id="btnConnect">连接</button>
+            <button id="btnDisconnect" disabled>断开</button>
+        </div>
+        <div class="row">
+            <video id="video" controls></video>
+        </div>
+        <div class="controls">
+            <button id="btnPlay" disabled>播放</button>
+            <button id="btnPause" disabled>暂停</button>
+        </div>
+        <div class="status">
+            <span>连接状态:<strong id="connStatus">未连接</strong></span>
+            <span>播放状态:<strong id="playStatus">未播放</strong></span>
+        </div>
+    </div>
+
+    <!-- 引用 mpegts.js(位于 resources/web/ 目录) -->
+    <script src="/mpegts.js"></script>
+    <script>
+        let player = null;
+        const video = document.getElementById('video');
+        const btnConnect = document.getElementById('btnConnect');
+        const btnDisconnect = document.getElementById('btnDisconnect');
+        const btnPlay = document.getElementById('btnPlay');
+        const btnPause = document.getElementById('btnPause');
+        const streamIdInput = document.getElementById('streamId');
+        const connStatus = document.getElementById('connStatus');
+        const playStatus = document.getElementById('playStatus');
+
+        let currentStreamUrl = null;
+
+        function setConnStatus(text) { connStatus.textContent = text; }
+        function setPlayStatus(text) { playStatus.textContent = text; }
+
+        function buildWsUrl(streamId) {
+            const host = window.location.hostname;
+            const protocol = window.location.protocol === 'https:' ? 'wss' : 'ws';
+            return `${protocol}://${host}:18090/realtime/${encodeURIComponent(streamId)}`;
+        }
+
+        // 获取URL参数
+        function getUrlParams() {
+            const params = {};
+            const queryString = window.location.search.substring(1);
+            const pairs = queryString.split('&');
+            for (const pair of pairs) {
+                const [key, value] = pair.split('=').map(decodeURIComponent);
+                if (key) params[key] = value;
+            }
+            return params;
+        }
+
+        // 自动连接并播放
+        function autoConnectAndPlay() {
+            const params = getUrlParams();
+            if (params.sim && params.channel !== undefined) {
+                // 构建流ID:SIM卡号+逻辑通道号
+                const streamId = `${params.sim}-${params.channel}`;
+                streamIdInput.value = streamId;
+                
+                // 延迟执行连接,确保页面元素完全加载
+                setTimeout(() => {
+                    btnConnect.click();
+                }, 500);
+            }
+        }
+
+        function cleanupPlayer() {
+            try {
+                if (player) {
+                    player.unload();
+                    player.detachMediaElement();
+                    player.destroy();
+                }
+            } catch (e) {
+                console.warn('清理播放器时发生异常:', e);
+            } finally {
+                player = null;
+                currentStreamUrl = null;
+                btnDisconnect.disabled = true;
+                btnPlay.disabled = true;
+                btnPause.disabled = true;
+                setConnStatus('未连接');
+                setPlayStatus('未播放');
+            }
+        }
+
+        btnConnect.addEventListener('click', async () => {
+            // 设置视频元素属性,增强自动播放成功率
+            video.autoplay = true;
+            video.muted = true; // 无音频版本也设置muted以提高自动播放成功率
+            video.playsInline = true; // 移动端支持
+            const streamId = streamIdInput.value.trim();
+            if (!streamId) {
+                alert('请先输入流ID');
+                return;
+            }
+            if (!window.mpegts || !window.mpegts.isSupported()) {
+                alert('当前浏览器不支持 MSE 或 mpegts.js');
+                return;
+            }
+
+            cleanupPlayer();
+            const url = buildWsUrl(streamId);
+            currentStreamUrl = url;
+            setConnStatus('连接中...');
+
+            try {
+                player = mpegts.createPlayer({
+                    type: 'flv',
+                    isLive: true,
+                    url: url,
+                    hasAudio: false,
+                    hasVideo: true
+                }, {
+                    enableStashBuffer: false,
+                    stashInitialSize: 128,
+                    autoCleanupSourceBuffer: true,
+                    lazyLoad: false,
+                    liveBufferLatencyChasing: true
+                });
+
+                player.on(mpegts.Events.ERROR, (e) => {
+                    console.error('播放器错误:', e);
+                    setConnStatus('错误');
+                });
+                player.on(mpegts.Events.LOADING_COMPLETE, () => {
+                    setConnStatus('已连接(视频)');
+                });
+                player.on(mpegts.Events.RECOVERED_EARLY_EOF, () => {
+                    console.warn('早期EOF已恢复');
+                });
+
+                player.attachMediaElement(video);
+                player.load();
+                btnDisconnect.disabled = false;
+                btnPlay.disabled = false;
+                btnPause.disabled = false;
+                setConnStatus('已连接(视频)');
+
+                // 播放器初始化后立即执行播放操作,使用mpegts.js播放器的play方法
+                console.log('尝试自动播放视频');
+                try {
+                    // 直接调用播放器的play方法
+                    player.play();
+                    setPlayStatus('播放中(视频)');
+                } catch (err) {
+                    console.warn('播放器自动播放失败:', err);
+                    setPlayStatus('已加载(视频),待播放');
+                    // 多阶段尝试自动播放策略
+                    const playAttempts = [300, 600, 1000]; // 不同时间点的尝试
+                    playAttempts.forEach((delay, index) => {
+                        setTimeout(() => {
+                            try {
+                                console.log(`第${index + 1}次尝试自动播放`);
+                                player.play();
+                                setPlayStatus('播放中(视频)');
+                            } catch (retryErr) {
+                                console.warn(`第${index + 1}次自动播放尝试失败`, retryErr);
+                                // 最后一次尝试失败后,尝试直接调用视频元素的播放
+                                if (index === playAttempts.length - 1) {
+                                    try {
+                                        video.play();
+                                        setPlayStatus('播放中(视频)');
+                                    } catch (finalErr) {
+                                        console.warn('所有自动播放尝试失败,需要用户交互');
+                                    }
+                                }
+                            }
+                        }, delay);
+                    });
+                }
+            } catch (err) {
+                console.error('创建播放器失败:', err);
+                setConnStatus('连接失败');
+                cleanupPlayer();
+            }
+        });
+
+        btnDisconnect.addEventListener('click', () => {
+            cleanupPlayer();
+        });
+
+        btnPlay.addEventListener('click', async () => {
+            try {
+                await video.play();
+                setPlayStatus('播放中(视频)');
+            } catch (err) {
+                console.warn('播放失败:', err);
+            }
+        });
+        btnPause.addEventListener('click', () => {
+            try {
+                video.pause();
+                setPlayStatus('已暂停');
+            } catch (err) {
+                console.warn('暂停失败:', err);
+            }
+        });
+
+        window.addEventListener('beforeunload', cleanupPlayer);
+        // 页面加载完成后尝试自动连接并播放
+        window.addEventListener('DOMContentLoaded', () => {
+            autoConnectAndPlay();
+        });
+    </script>
+</body>
+</html>

+ 150 - 0
src/native/CMakeLists.txt

@@ -0,0 +1,150 @@
+# CMake project for JNI wrapper
+cmake_minimum_required(VERSION 3.20)
+project(aac_jni LANGUAGES C)
+
+add_library(aac_jni SHARED aac_jni.c)
+
+# 自动处理 JAVA_HOME(跨平台)
+# 1) 命令行 -DJAVA_HOME 传入优先
+# 2) 环境变量 JAVA_HOME 其次
+# 3) find_package(Java) 自动探测兜底
+set(JAVA_HOME "" CACHE PATH "Path to JDK home")
+if(NOT JAVA_HOME AND DEFINED ENV{JAVA_HOME})
+  set(JAVA_HOME "$ENV{JAVA_HOME}")
+endif()
+if(NOT JAVA_HOME)
+  find_package(Java COMPONENTS Development)
+  # 可兼容多版本JDK;如需限定最低版本,可启用如下检查:
+  # if(DEFINED Java_VERSION_STRING AND Java_VERSION_STRING VERSION_LESS 17)
+  #   message(FATAL_ERROR "需要 JDK 17 及以上,当前: ${Java_VERSION_STRING}")
+  # endif()
+  if(Java_FOUND AND Java_JAVAC_EXECUTABLE)
+    get_filename_component(_javac_dir "${Java_JAVAC_EXECUTABLE}" DIRECTORY)
+    get_filename_component(JAVA_HOME "${_javac_dir}/.." ABSOLUTE)
+    set(JAVA_HOME "${JAVA_HOME}" CACHE PATH "Path to JDK home" FORCE)
+  endif()
+endif()
+if(NOT JAVA_HOME)
+  message(FATAL_ERROR "JAVA_HOME 未设置。请通过 -DJAVA_HOME=... 或设置环境变量 JAVA_HOME,或确保 CMake 能自动探测到 JDK。")
+endif()
+message(STATUS "Using JAVA_HOME: ${JAVA_HOME}")
+
+# 平台相关配置
+if(WIN32)
+    # Windows 平台配置
+    set(PLATFORM_DIR "win32")
+    set(NATIVE_PLATFORM_DIR "windows/x64")
+    set(FDK_AAC_LIB "${CMAKE_SOURCE_DIR}/../../native/windows/x64/Release/fdk-aac.lib")
+    
+    # 为了避免绝对路径引用,将 JDK 头文件复制到构建目录下的相对路径
+    set(JDK_LOCAL_INCLUDE_DIR "${CMAKE_BINARY_DIR}/jdk/include")
+    set(JDK_LOCAL_PLATFORM_INCLUDE_DIR "${JDK_LOCAL_INCLUDE_DIR}/${PLATFORM_DIR}")
+    file(MAKE_DIRECTORY "${JDK_LOCAL_INCLUDE_DIR}")
+    file(MAKE_DIRECTORY "${JDK_LOCAL_PLATFORM_INCLUDE_DIR}")
+    file(COPY "${JAVA_HOME}/include/" DESTINATION "${JDK_LOCAL_INCLUDE_DIR}")
+    file(COPY "${JAVA_HOME}/include/${PLATFORM_DIR}/" DESTINATION "${JDK_LOCAL_PLATFORM_INCLUDE_DIR}")
+    
+    # Windows 包含目录
+    target_include_directories(aac_jni PRIVATE
+        "${JDK_LOCAL_INCLUDE_DIR}"
+        "${JDK_LOCAL_PLATFORM_INCLUDE_DIR}"
+        "${CMAKE_SOURCE_DIR}/../../native/include/fdk-aac"
+    )
+    
+    # Windows 链接库
+    target_link_libraries(aac_jni PRIVATE
+        "${FDK_AAC_LIB}"
+    )
+    
+    # Windows 输出目录
+    set_target_properties(aac_jni PROPERTIES
+        OUTPUT_NAME "aac_jni"
+        RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}/../../native/windows/x64/"
+    )
+    
+    # 让 MSVC 以 UTF-8 编译,避免 C4819 警告
+    target_compile_options(aac_jni PRIVATE "$<$<C_COMPILER_ID:MSVC>:/utf-8>")
+elseif(UNIX AND NOT APPLE)
+    # Linux 平台配置
+    set(PLATFORM_DIR "linux")
+    set(NATIVE_PLATFORM_DIR "linux")
+    
+    # 确保 JDK 头文件路径正确
+    set(JDK_INCLUDE_DIR "${JAVA_HOME}/include")
+    set(JDK_PLATFORM_INCLUDE_DIR "${JDK_INCLUDE_DIR}/${PLATFORM_DIR}")
+    
+    # 验证 JDK 头文件路径是否存在
+    if(NOT EXISTS "${JDK_INCLUDE_DIR}/jni.h")
+        message(WARNING "JNI 头文件 jni.h 未找到在 ${JDK_INCLUDE_DIR}")
+        # 尝试常见的 Linux JDK 路径
+        set(ALTERNATIVE_JDK_PATHS
+            "/usr/lib/jvm/default/include"
+            "/usr/lib/jvm/java-11-openjdk-amd64/include"
+            "/usr/lib/jvm/java-17-openjdk-amd64/include"
+        )
+        foreach(ALT_PATH IN LISTS ALTERNATIVE_JDK_PATHS)
+            if(EXISTS "${ALT_PATH}/jni.h")
+                set(JDK_INCLUDE_DIR "${ALT_PATH}")
+                set(JDK_PLATFORM_INCLUDE_DIR "${JDK_INCLUDE_DIR}/${PLATFORM_DIR}")
+                message(STATUS "找到替代 JDK 头文件路径: ${JDK_INCLUDE_DIR}")
+                break()
+            endif()
+        endforeach()
+    endif()
+    
+    # Linux 包含目录
+    target_include_directories(aac_jni PRIVATE
+        "${JDK_INCLUDE_DIR}"
+        "${JDK_PLATFORM_INCLUDE_DIR}"
+        "${CMAKE_SOURCE_DIR}/../../native/include/fdk-aac"
+    )
+    
+    # Linux 链接库 - 尝试多种可能的库文件路径
+    set(FDK_AAC_LIB_PATHS
+        "${CMAKE_SOURCE_DIR}/../../native/linux/libfdk-aac.so"
+        "/usr/lib/x86_64-linux-gnu/libfdk-aac.so"
+        "/usr/lib/libfdk-aac.so"
+    )
+    
+    # 查找存在的 fdk-aac 库
+    foreach(LIB_PATH IN LISTS FDK_AAC_LIB_PATHS)
+        if(EXISTS "${LIB_PATH}")
+            set(FDK_AAC_LIB "${LIB_PATH}")
+            message(STATUS "找到 fdk-aac 库: ${FDK_AAC_LIB}")
+            break()
+        endif()
+    endforeach()
+    
+    # 如果找不到库文件,尝试使用系统库
+    if(NOT DEFINED FDK_AAC_LIB)
+        message(STATUS "未找到预编译的 fdk-aac 库,尝试使用系统库")
+        find_library(FDK_AAC_LIB NAMES fdk-aac)
+        if(FDK_AAC_LIB)
+            message(STATUS "找到系统 fdk-aac 库: ${FDK_AAC_LIB}")
+        else()
+            message(FATAL_ERROR "未找到 fdk-aac 库,请确保已安装或放置在正确位置")
+        endif()
+    endif()
+    
+    # 链接库
+    target_link_libraries(aac_jni PRIVATE
+        "${FDK_AAC_LIB}"
+    )
+    
+    # Linux 输出目录
+    set_target_properties(aac_jni PROPERTIES
+        OUTPUT_NAME "aac_jni"
+        LIBRARY_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}/../../native/linux/"
+    )
+    
+    # Linux 编译选项
+    target_compile_options(aac_jni PRIVATE
+        -fPIC
+        -Wall
+        -Wextra
+        -Wpedantic
+        -std=c99  # 使用 C99 标准,确保 size_t 等类型正确定义
+    )
+else()
+    message(FATAL_ERROR "当前平台不支持。支持的平台:Windows 和 Linux。")
+endif()

+ 98 - 0
src/native/aac_jni.c

@@ -0,0 +1,98 @@
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jni.h"
+#include "aacenc_lib.h"
+#include "com_jttserver_codec_nativeaac_AacEncoderNative.h"
+
+/*
+ * JNI桥接层:FDK-AAC 编码器
+ * - 初始化编码器并配置参数
+ * - 编码一帧交错PCM(S16LE)
+ * - 关闭并释放资源
+ */
+typedef struct {
+    HANDLE_AACENCODER enc;
+    int channels;
+    int samplerate;
+    int frameSize; // 1024 for AAC-LC
+    int useAdts;
+} AacCtx;
+
+JNIEXPORT jlong JNICALL Java_com_jttserver_codec_nativeaac_AacEncoderNative_initEncoder
+  (JNIEnv* env, jclass cls, jint sampleRate, jint channels, jint bitrate, jint aot, jboolean useAdts) {
+    AacCtx* ctx = (AacCtx*)calloc(1, sizeof(AacCtx));
+    if (!ctx) return 0;
+
+    if (aacEncOpen(&ctx->enc, 0, channels) != AACENC_OK) { free(ctx); return 0; }
+
+    if (aacEncoder_SetParam(ctx->enc, AACENC_AOT, aot) != AACENC_OK) { aacEncClose(&ctx->enc); free(ctx); return 0; }
+    if (aacEncoder_SetParam(ctx->enc, AACENC_SAMPLERATE, sampleRate) != AACENC_OK) { aacEncClose(&ctx->enc); free(ctx); return 0; }
+    if (aacEncoder_SetParam(ctx->enc, AACENC_CHANNELMODE, channels == 1 ? MODE_1 : MODE_2) != AACENC_OK) { aacEncClose(&ctx->enc); free(ctx); return 0; }
+    if (aacEncoder_SetParam(ctx->enc, AACENC_BITRATE, bitrate) != AACENC_OK) { aacEncClose(&ctx->enc); free(ctx); return 0; }
+    if (aacEncoder_SetParam(ctx->enc, AACENC_AFTERBURNER, 1) != AACENC_OK) { aacEncClose(&ctx->enc); free(ctx); return 0; }
+    if (aacEncoder_SetParam(ctx->enc, AACENC_TRANSMUX, useAdts ? TT_MP4_ADTS : TT_MP4_RAW) != AACENC_OK) { aacEncClose(&ctx->enc); free(ctx); return 0; }
+
+    ctx->channels = channels;
+    ctx->samplerate = sampleRate;
+    ctx->frameSize = 1024;
+    ctx->useAdts = useAdts ? 1 : 0;
+    return (jlong)(intptr_t)ctx;
+}
+
+JNIEXPORT jbyteArray JNICALL Java_com_jttserver_codec_nativeaac_AacEncoderNative_encodeFrame
+  (JNIEnv* env, jclass cls, jlong handle, jshortArray pcmInterleaved) {
+    AacCtx* ctx = (AacCtx*)(intptr_t)handle;
+    if (!ctx || !pcmInterleaved) return NULL;
+
+    jsize samples = (*env)->GetArrayLength(env, pcmInterleaved);
+    jshort* pcm = (*env)->GetShortArrayElements(env, pcmInterleaved, NULL);
+
+    AACENC_BufDesc inDesc = {0}, outDesc = {0};
+    AACENC_InArgs inArgs = {0};
+    AACENC_OutArgs outArgs = {0};
+
+    int inId = IN_AUDIO_DATA;
+    int inElemSize = sizeof(short);
+    int inSizeBytes = samples * sizeof(short);
+    void* inBuf = (void*)pcm;
+
+    unsigned char outBuf[8192]; // 输出缓冲,足够容纳单帧 ADTS/RAW 数据
+    int outId = OUT_BITSTREAM_DATA;
+    int outElemSize = 1;
+    int outSizeBytes = sizeof(outBuf);
+    void* outPtr = (void*)outBuf;
+
+    inDesc.numBufs = 1;
+    inDesc.bufs = &inBuf;
+    inDesc.bufferIdentifiers = &inId;
+    inDesc.bufSizes = &inSizeBytes;
+    inDesc.bufElSizes = &inElemSize;
+
+    outDesc.numBufs = 1;
+    outDesc.bufs = &outPtr;
+    outDesc.bufferIdentifiers = &outId;
+    outDesc.bufSizes = &outSizeBytes;
+    outDesc.bufElSizes = &outElemSize;
+
+    // FDK-AAC 期望 numInSamples 为“每声道样本数”
+    inArgs.numInSamples = samples / ctx->channels; // 每声道样本数
+
+    AACENC_ERROR err = aacEncEncode(ctx->enc, &inDesc, &outDesc, &inArgs, &outArgs);
+    (*env)->ReleaseShortArrayElements(env, pcmInterleaved, pcm, 0);
+    if (err != AACENC_OK) return NULL;
+
+    int n = outArgs.numOutBytes;
+    jbyteArray out = (*env)->NewByteArray(env, n);
+    if (!out) return NULL;
+    (*env)->SetByteArrayRegion(env, out, 0, n, (jbyte*)outBuf);
+    return out;
+}
+
+JNIEXPORT void JNICALL Java_com_jttserver_codec_nativeaac_AacEncoderNative_close
+  (JNIEnv* env, jclass cls, jlong handle) {
+    AacCtx* ctx = (AacCtx*)(intptr_t)handle;
+    if (!ctx) return;
+    if (ctx->enc) aacEncClose(&ctx->enc);
+    free(ctx);
+}

+ 681 - 0
src/test/java/com/jttserver/codec/FlvPacketizerTest.java

@@ -0,0 +1,681 @@
+package com.jttserver.codec;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import com.jttserver.protocol.Jtt1078NaluPacket;
+import com.jttserver.protocol.Jtt1078PacketParams;
+import com.jttserver.utils.CommonUtils;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+public class FlvPacketizerTest {
+    private FlvPacketizer flvPacketizer;
+
+    
+    @BeforeEach
+    void setUp() {
+        flvPacketizer = new FlvPacketizer();
+    }
+
+    /**
+     * 测试创建FLV头的方法
+     * 验证生成的FLV头是否符合FLV文件格式规范
+     */
+    @Test
+    void testCreateFlvHeader() {
+        // 创建FLV包头
+        byte[] header = flvPacketizer.createFlvHeader();
+        
+        // FLV头应该有9字节 + PreviousTagSize0 4字节 = 13字节
+        assertEquals(13, header.length);
+        
+        // 检查FLV标识(ASCII码:F=0x46, L=0x4C, V=0x56)
+        assertEquals(0x46, header[0]); // F
+        assertEquals(0x4C, header[1]); // L
+        assertEquals(0x56, header[2]); // V
+        
+        // 检查版本号(应为1)
+        assertEquals(0x01, header[3]);
+        
+        // 检查类型标志(视频+音频)
+        assertEquals(0x05, header[4]);
+        
+        // 检查头长度
+        assertEquals(0x00, header[5]);
+        assertEquals(0x00, header[6]);
+        assertEquals(0x00, header[7]);
+        assertEquals(0x09, header[8]);
+        
+        // 检查PreviousTagSize0
+        assertEquals(0x00, header[9]);
+        assertEquals(0x00, header[10]);
+        assertEquals(0x00, header[11]);
+        assertEquals(0x00, header[12]);
+    }
+
+    
+    /**
+     * 测试处理H.264的SPS和PPS序列参数集
+     * 这个测试用例验证了当处理H.264视频流时,先接收SPS再接收PPS的正确流程
+     */
+    @Test
+    void testProcessH264SpsThenPps() {
+        // 定义测试用例的通道ID
+        String channelId = "test_channel_1";
+        
+        // 创建SPS数据 (简化数据)
+        // SPS (Sequence Parameter Set) 是H.264视频流中包含图像序列级参数的数据
+        byte[] spsData = new byte[]{0x67, 0x42, 0x00, 0x0A, (byte)0xF8, 0x41, (byte)0xA2};
+        
+        // 创建PPS数据 (简化数据)
+        // PPS (Picture Parameter Set) 是H.264视频流中包含图像级参数的数据
+        byte[] ppsData = new byte[]{0x68, (byte)0xCB, (byte)0x8C, (byte)0xB2};
+        
+        // 创建参数对象,用于配置数据包处理参数
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98; // H.264视频负载类型
+        
+        // 先处理SPS
+        // 根据设计,单独处理SPS时应返回空数组,因为需要等待PPS一起处理
+        byte[] result1 = flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        // SPS处理应该返回空数组,因为需要等待PPS
+        assertEquals(0, result1.length);
+        
+        // 再处理PPS
+        byte[] result2 = flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        // PPS处理应该返回序列头
+        assertTrue(result2.length > 0);
+        // 返回的是flv头+序列头
+        int baseIndex = 13;
+
+        assertTrue(result2.length >14, "不够flv头长度");
+        // 检查是否是视频标签 (tag type = 0x09)
+        assertEquals(0x09, result2[baseIndex + 0]);
+        
+        assertTrue(result2.length >14 + 11 + 1, "不够flv头长度+AVC序列头长度");
+        // 检查AVC序列头标记 (AVCPacketType = 0x00)
+        // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+        assertEquals(0x00, result2[baseIndex+ 11 + 1]); // AVCPacketType
+    }
+
+    
+    /**
+     * 测试处理H.264的PPS然后SPS数据的情况
+     * 该测试验证了在处理H.264编码时,先处理PPS参数集后处理SPS参数集的正确流程
+     */
+    @Test
+    void testProcessH264PpsThenSps() {
+        String channelId = "test_channel_2"; // 测试频道ID,用于标识不同的视频流
+        
+        // 创建SPS数据 (简化数据)
+        // SPS (Sequence Parameter Set) 是H.264编码中的序列参数集,包含视频编码的基本参数
+        // 如图像尺寸、帧率、编码 profile 等基本信息
+        byte[] spsData = new byte[]{0x67, 0x42, 0x00, 0x0A, (byte)0xF8, 0x41, (byte)0xA2};
+        
+        // 创建PPS数据 (简化数据)
+        // PPS (Picture Parameter Set) 是H.264编码中的图像参数集,包含图像编码的特定参数
+        // 如熵编码模式、块大小等特定于编码的参数
+        byte[] ppsData = new byte[]{0x68, (byte)0xCB, (byte)0x8C, (byte)0xB2};
+        
+        // 创建参数对象
+        // Jtt1078PacketParams 用于封装FLV打包的参数配置
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98; // H.264负载类型,根据JTT1078标准定义
+        
+        // 先处理PPS
+        // 预期结果:处理PPS时应该返回空数组,因为需要等待SPS信息才能构建完整的视频序列头
+        byte[] result1 = flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        // PPS处理应该返回空数组,因为需要等待SPS
+        assertEquals(0, result1.length);
+        
+        // 再处理SPS
+        byte[] result2 = flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        // SPS处理应该返回序列头
+        assertTrue(result2.length > 0);
+        
+        // 返回的是flv头+序列头
+        int baseIndex = 13;
+
+        assertTrue(result2.length >14, "不够flv头长度");
+        // 检查是否是视频标签 (tag type = 0x09)
+        assertEquals(0x09, result2[baseIndex + 0]);
+        
+        assertTrue(result2.length >14 + 11 + 1, "不够flv头长度+AVC序列头长度");
+        // 检查AVC序列头标记 (AVCPacketType = 0x00)
+        // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+        assertEquals(0x00, result2[baseIndex + 11 + 1]); // AVCPacketType
+    }
+
+    
+    /**
+     * 测试处理H.264视频帧在序列头之后的情况
+     * 此测试验证了在正确设置SPS和PPS后,处理IDR帧的正确性
+     */
+    @Test
+    void testProcessH264VideoFrameAfterSequenceHeader() {
+        String channelId = "test_channel_3"; // 测试频道ID
+        
+        // 创建SPS数据 (简化数据)
+        // SPS (Sequence Parameter Set) 是H.264中包含图像参数集的数据
+        byte[] spsData = new byte[]{0x67, 0x42, 0x00, 0x0A, (byte)0xF8, 0x41, (byte)0xA2};
+        
+        // 创建PPS数据 (简化数据)
+        // PPS (Picture Parameter Set) 是H.264中包含图像参数集的数据
+        byte[] ppsData = new byte[]{0x68, (byte)0xCB, (byte)0x8C, (byte)0xB2};
+        
+        // 创建IDR帧数据
+        // IDR帧是一种关键帧,包含完整的图像信息
+        byte[] idrData = new byte[]{0x65, (byte)0x88, (byte)0x84, 0x00, 0x00, 0x03, 0x00};
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98; // H.264
+        
+        // 先处理SPS和PPS以建立序列头
+        flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        
+        // 处理IDR帧
+        byte[] result = flvPacketizer.processVideoNalu(channelId, idrData, params, 1000);
+        
+        // 检查返回结果不为空
+        assertTrue(result.length > 0);
+        
+        // 检查是否是视频标签 (tag type = 0x09)
+        assertEquals(0x09, result[0]);
+        
+        // 检查帧类型 (IDR帧应该是0x17)
+        // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+        assertEquals(0x17, result[11]); // FrameType + CodecID
+    }
+
+    
+    /**
+     * 测试按顺序处理H.265的VPS、SPS和PPS数据
+     * 验证FLV封装器正确处理H.265视频参数集并生成正确的序列头
+     */
+    @Test
+    void testProcessH265VpsSpsPpsInOrder() {
+        String channelId = "test_channel_4"; // 测试通道ID
+        
+        // 创建VPS数据
+        byte[] vpsData = new byte[]{0x40, 0x01, 0x0C, 0x01, (byte)0xFF, (byte)0xFF, 0x01, 0x40, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, (byte)0x7B, (byte)0xAC, 0x09};
+        
+        // 创建SPS数据
+        byte[] spsData = new byte[]{0x42, 0x01, 0x01, 0x01, 0x40, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, (byte)0x7B, (byte)0xA0, 0x03, (byte)0xC0, (byte)0x80, 0x32, 0x16, (byte)0x5A, (byte)0xC0, 0x50, 0x04, 0x04, 0x04, 0x04, (byte)0x80};
+        
+        // 创建PPS数据
+        byte[] ppsData = new byte[]{0x44, 0x01, (byte)0xC1, 0x72, (byte)0xB4, 0x62, 0x40};
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 99; // H.265
+        
+        // 处理VPS
+        byte[] result1 = flvPacketizer.processVideoNalu(channelId, vpsData, params, 0);
+        assertEquals(0, result1.length);
+        
+        // 处理SPS
+        byte[] result2 = flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        assertEquals(0, result2.length);
+        
+        // 处理PPS
+        byte[] result3 = flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        // PPS处理应该返回序列头
+        assertTrue(result3.length > 0);
+        
+        // 返回的是flv头+序列头
+        int baseIndex = 13;
+
+        assertTrue(result3.length >14, "不够flv头长度");
+
+        // 检查是否是视频标签 (tag type = 0x09)
+        assertEquals(0x09, result3[baseIndex + 0]);
+        
+        assertTrue(result3.length >14 + 11 + 1, "不够flv头长度+HEVC序列头长度");
+        // 检查HEVC序列头标记 (AVCPacketType = 0x00)
+        // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+        assertEquals(0x00, result3[baseIndex + 11 + 1]); // AVCPacketType
+    }
+
+    
+    /**
+     * 测试H.265的VPS、SPS、PPS数据乱序处理的情况
+     * 此测试验证当VPS、SPS、PPS数据以乱序方式到达时,FLV封装器是否能正确处理
+     * 并生成符合预期的序列头数据
+     */
+    @Test
+    void testProcessH265VpsSpsPpsOutOfOrder() {
+        // 定义测试通道ID
+        String channelId = "test_channel_5";
+        
+        // 创建VPS数据
+        byte[] vpsData = new byte[]{0x40, 0x01, 0x0C, 0x01, (byte)0xFF, (byte)0xFF, 0x01, 0x40, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, (byte)0x7B, (byte)0xAC, 0x09};
+        
+        // 创建SPS数据
+        byte[] spsData = new byte[]{0x42, 0x01, 0x01, 0x01, 0x40, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, (byte)0x7B, (byte)0xA0, 0x03, (byte)0xC0, (byte)0x80, 0x32, 0x16, (byte)0x5A, (byte)0xC0, 0x50, 0x04, 0x04, 0x04, 0x04, (byte)0x80};
+        
+        // 创建PPS数据
+        byte[] ppsData = new byte[]{0x44, 0x01, (byte)0xC1, 0x72, (byte)0xB4, 0x62, 0x40};
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 99; // H.265
+        
+        // 处理PPS
+        byte[] result1 = flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        assertEquals(0, result1.length);
+        
+        // 处理VPS
+        byte[] result2 = flvPacketizer.processVideoNalu(channelId, vpsData, params, 0);
+        assertEquals(0, result2.length);
+        
+        // 处理SPS
+        byte[] result3 = flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        // SPS处理应该返回序列头
+        assertTrue(result3.length > 0);
+        
+        // 返回的是flv头+序列头
+        int baseIndex = 13;
+
+        assertTrue(result3.length >14, "不够flv头长度");
+
+
+        // 检查是否是视频标签 (tag type = 0x09)
+        assertEquals(0x09, result3[baseIndex + 0]);
+        
+        assertTrue(result3.length >14 + 11 + 1, "不够flv头长度+HEVC序列头长度");
+        // 检查HEVC序列头标记 (AVCPacketType = 0x00)
+        // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+        assertEquals(0x00, result3[baseIndex + 11 + 1]); // AVCPacketType
+    }
+
+    
+    /**
+     * 测试清除通道功能的方法
+     * 验证清除通道后,重新处理相同通道的数据是否能正常工作
+     */
+    @Test
+    void testClearChannel() {
+        // 定义测试用的通道ID
+        String channelId = "test_channel_6";
+        
+        // 创建SPS数据 (简化数据)
+        byte[] spsData = new byte[]{0x67, 0x42, 0x00, 0x0A, (byte)0xF8, 0x41, (byte)0xA2};
+        
+        // 创建PPS数据 (简化数据)
+        byte[] ppsData = new byte[]{0x68, (byte)0xCB, (byte)0x8C, (byte)0xB2};
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98; // H.264
+        
+        // 处理SPS和PPS
+        flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        
+        // 清除通道信息
+        flvPacketizer.clearChannel(channelId);
+        
+        // 再次处理相同通道的数据应该能正常工作(就像第一次一样)
+        byte[] result1 = flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        assertEquals(0, result1.length);
+        
+        byte[] result2 = flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        assertTrue(result2.length > 0); // 应该能再次生成序列头
+    }
+
+    
+    /**
+     * 测试H.264 IDR帧(关键帧)的封装
+     * 验证在SPS/PPS处理完成后,IDR帧能否正确封装为FLV视频标签
+     */
+    @Test
+    void testProcessH264IdrFrame() {
+        String channelId = "test_channel_7";
+        
+        // 创建SPS数据
+        byte[] spsData = new byte[]{0x67, 0x42, 0x00, 0x0A, (byte)0xF8, 0x41, (byte)0xA2};
+        
+        // 创建PPS数据
+        byte[] ppsData = new byte[]{0x68, (byte)0xCB, (byte)0x8C, (byte)0xB2};
+        
+        // 创建IDR帧数据 (0x65表示IDR帧)
+        byte[] idrFrameData = new byte[]{0x65, (byte)0x88, (byte)0x84, 0x00, 0x00, 0x03, 0x00};
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98; // H.264
+        
+        // 首先处理SPS和PPS以建立序列头
+        flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        
+        // 处理IDR帧
+        byte[] result = flvPacketizer.processVideoNalu(channelId, idrFrameData, params, 1000);
+        
+        // 验证结果
+        assertTrue(result.length > 0, "IDR帧处理结果不应为空");
+        assertEquals(0x09, result[0], "FLV标签类型应为视频标签(0x09)");
+        
+        // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+        assertEquals(0x17, result[11], "IDR帧的FrameType和CodecID应为0x17");
+        assertEquals(0x01, result[12], "AVCPacketType应为0x01(NALU单元)");
+    }
+
+    /**
+     * 测试H.264 P帧(预测帧)的封装
+     * 验证在SPS/PPS处理完成后,P帧能否正确封装为FLV视频标签
+     */
+    @Test
+    void testProcessH264PFrame() {
+        String channelId = "test_channel_8";
+        
+        // 创建SPS数据
+        byte[] spsData = new byte[]{0x67, 0x42, 0x00, 0x0A, (byte)0xF8, 0x41, (byte)0xA2};
+        
+        // 创建PPS数据
+        byte[] ppsData = new byte[]{0x68, (byte)0xCB, (byte)0x8C, (byte)0xB2};
+        
+        // 创建P帧数据 (0x41表示P帧)
+        byte[] pFrameData = new byte[]{0x41, (byte)0x9A, (byte)0x84, 0x00, 0x00, 0x03, 0x00};
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98; // H.264
+        
+        // 首先处理SPS和PPS以建立序列头
+        flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        
+        // 处理P帧
+        byte[] result = flvPacketizer.processVideoNalu(channelId, pFrameData, params, 2000);
+        
+        // 验证结果
+        assertTrue(result.length > 0, "P帧处理结果不应为空");
+        assertEquals(0x09, result[0], "FLV标签类型应为视频标签(0x09)");
+        
+        // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+        assertEquals(0x27, result[11], "P帧的FrameType和CodecID应为0x27");
+        assertEquals(0x01, result[12], "AVCPacketType应为0x01(NALU单元)");
+    }
+
+    /**
+     * 测试H.265 IDR帧的封装
+     * 验证在VPS/SPS/PPS处理完成后,H.265 IDR帧能否正确封装为FLV视频标签
+     */
+    @Test
+    void testProcessH265IdrFrame() {
+        String channelId = "test_channel_9";
+        
+        // 创建VPS数据
+        byte[] vpsData = new byte[]{0x40, 0x01, 0x0C, 0x01, (byte)0xFF, (byte)0xFF, 0x01, 0x40, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, (byte)0x7B, (byte)0xAC, 0x09};
+        
+        // 创建SPS数据
+        byte[] spsData = new byte[]{0x42, 0x01, 0x01, 0x01, 0x40, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, (byte)0x7B, (byte)0xA0, 0x03, (byte)0xC0, (byte)0x80, 0x32, 0x16, (byte)0x5A, (byte)0xC0, 0x50, 0x04, 0x04, 0x04, 0x04, (byte)0x80};
+        
+        // 创建PPS数据
+        byte[] ppsData = new byte[]{0x44, 0x01, (byte)0xC1, 0x72, (byte)0xB4, 0x62, 0x40};
+        
+        // 创建H.265 IDR帧数据 (0x26表示IDR帧)
+        byte[] idrFrameData = new byte[]{0x26, 0x01, (byte)0x88, (byte)0x84, 0x00, 0x00, 0x03, 0x00};
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 99; // H.265
+        
+        // 首先处理VPS、SPS和PPS以建立序列头
+        flvPacketizer.processVideoNalu(channelId, vpsData, params, 0);
+        flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        
+        // 处理IDR帧
+        byte[] result = flvPacketizer.processVideoNalu(channelId, idrFrameData, params, 1000);
+        
+        // 验证结果
+        assertTrue(result.length > 0, "H.265 IDR帧处理结果不应为空");
+        assertEquals(0x09, result[0], "FLV标签类型应为视频标签(0x09)");
+        
+        // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+        assertEquals(0x1C, result[11], "H.265 IDR帧的FrameType和CodecID应为0x1C");
+        assertEquals(0x01, result[12], "AVCPacketType应为0x01(NALU单元)");
+    }
+
+    /**
+     * 测试H.265 P帧的封装
+     * 验证在VPS/SPS/PPS处理完成后,H.265 P帧能否正确封装为FLV视频标签
+     */
+    @Test
+    void testProcessH265PFrame() {
+        String channelId = "test_channel_10";
+        
+        // 创建VPS数据
+        byte[] vpsData = new byte[]{0x40, 0x01, 0x0C, 0x01, (byte)0xFF, (byte)0xFF, 0x01, 0x40, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, (byte)0x7B, (byte)0xAC, 0x09};
+        
+        // 创建SPS数据
+        byte[] spsData = new byte[]{0x42, 0x01, 0x01, 0x01, 0x40, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, 0x00, 0x03, 0x00, (byte)0x7B, (byte)0xA0, 0x03, (byte)0xC0, (byte)0x80, 0x32, 0x16, (byte)0x5A, (byte)0xC0, 0x50, 0x04, 0x04, 0x04, 0x04, (byte)0x80};
+        
+        // 创建PPS数据
+        byte[] ppsData = new byte[]{0x44, 0x01, (byte)0xC1, 0x72, (byte)0xB4, 0x62, 0x40};
+        
+        // 创建H.265 P帧数据 (0x02表示P帧)
+        byte[] pFrameData = new byte[]{0x02, 0x01, (byte)0x9A, (byte)0x84, 0x00, 0x00, 0x03, 0x00};
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 99; // H.265
+        
+        // 首先处理VPS、SPS和PPS以建立序列头
+        flvPacketizer.processVideoNalu(channelId, vpsData, params, 0);
+        flvPacketizer.processVideoNalu(channelId, spsData, params, 0);
+        flvPacketizer.processVideoNalu(channelId, ppsData, params, 0);
+        
+        // 处理P帧
+        byte[] result = flvPacketizer.processVideoNalu(channelId, pFrameData, params, 2000);
+        
+        // 验证结果
+        assertTrue(result.length > 0, "H.265 P帧处理结果不应为空");
+        assertEquals(0x09, result[0], "FLV标签类型应为视频标签(0x09)");
+        
+        // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+        assertEquals(0x2C, result[11], "H.265 P帧的FrameType和CodecID应为0x2C");
+        assertEquals(0x01, result[12], "AVCPacketType应为0x01(NALU单元)");
+    }
+
+    /**
+     * 测试处理空的NALU数据
+     * 验证当传入空的NALU数据时,系统能正确处理而不抛出异常
+     */
+    @Test
+    void testProcessEmptyNaluData() {
+        String channelId = "test_channel_11";
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98; // H.264
+        
+        // 处理空的NALU数据
+        byte[] result = flvPacketizer.processVideoNalu(channelId, new byte[0], params, 0);
+        
+        // 验证结果
+        assertEquals(0, result.length, "处理空NALU数据应返回空数组");
+    }
+
+    /**
+     * 测试处理null的NALU数据
+     * 验证当传入null的NALU数据时,系统能正确处理而不抛出异常
+     */
+    @Test
+    void testProcessNullNaluData() {
+        String channelId = "test_channel_12";
+        
+        // 创建参数对象
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98; // H.264
+        
+        // 处理null的NALU数据
+        byte[] result = flvPacketizer.processVideoNalu(channelId, null, params, 0);
+        
+        // 验证结果
+        assertEquals(0, result.length, "处理null NALU数据应返回空数组");
+    }
+
+    /**
+     * 测试创建音频标签的方法
+     * 验证生成的音频标签是否符合FLV文件格式规范
+     */
+    @Test
+    void testCreateAudioTag() {
+        // 准备测试数据
+        byte[] audioData = new byte[]{0x12, 0x10, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05};
+        
+        try {
+            // 测试AAC序列头包类型
+            byte[] sequenceHeaderResult = flvPacketizer.createAudioTag((byte) 0x00, audioData, 1000L);
+
+            assertTrue(sequenceHeaderResult.length > 0, "音频标签结果不应为空");
+            assertEquals(0x08, sequenceHeaderResult[0], "FLV标签类型应为音频标签(0x08)");
+            
+            // 验证时间戳
+            // 时间戳在FLV标签中占4个字节(3个字节基本时间戳+1个扩展时间戳)
+            assertEquals(0x00, sequenceHeaderResult[4], "时间戳高位应为0x00");
+            assertEquals(0x03, sequenceHeaderResult[5], "时间戳中位应为0x03");
+            assertEquals((byte) 0xE8, sequenceHeaderResult[6], "时间戳低位应为0xE8");
+            
+            // 测试AAC原始数据包类型
+            byte[] rawDataResult = flvPacketizer.createAudioTag((byte) 0x01, audioData, 2000L);
+            
+            assertTrue(rawDataResult.length > 0, "音频标签结果不应为空");
+            assertEquals(0x08, rawDataResult[0], "FLV标签类型应为音频标签(0x08)");
+            
+            // 验证音频头信息
+            // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+            // 音频头信息在第12个字节(索引为11)
+            assertEquals((byte) 0xAF, rawDataResult[11], "音频头信息应为0xAF (AAC, 44kHz, 16-bit, Stereo)");
+            assertEquals(0x01, rawDataResult[12], "AAC包类型应为0x01 (raw data)");
+            
+        } catch (Exception e) {
+            fail("测试音频标签创建方法时发生异常: " + e.getMessage());
+        }
+    }
+
+    /**
+     * 测试创建视频标签的方法
+     * 验证生成的视频标签是否符合FLV文件格式规范
+     */
+    @Test
+    void testCreateVideoTag() {
+        // 准备测试数据
+        byte[] videoData = new byte[]{0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x00, 0x0A, (byte)0xF8, 0x41, (byte)0xA2};
+        
+        try {
+            // 测试创建视频标签
+            byte[] result = flvPacketizer.createVideoTag((byte) 0x17, (byte) 0x01, 0, videoData, 1000L);
+            
+            assertTrue(result.length > 0, "视频标签结果不应为空");
+            assertEquals(0x09, result[0], "FLV标签类型应为视频标签(0x09)");
+            
+            // 验证时间戳
+            // 时间戳在FLV标签中占4个字节(3个字节基本时间戳+1个扩展时间戳)
+            assertEquals(0x00, result[4], "时间戳高位应为0x00");
+            assertEquals(0x03, result[5], "时间戳中位应为0x03");
+            assertEquals((byte) 0xE8, result[6], "时间戳低位应为0xE8");
+            
+            // 验证视频头信息
+            // Tag头: 1字节TagType + 3字节DataSize + 3字节Timestamp + 1字节TimestampExtended + 3字节StreamID = 11字节
+            assertEquals(0x17, result[11], "帧类型和编解码器标识应为0x17");
+            assertEquals(0x01, result[12], "AVC包类型应为0x01");
+            
+        } catch (Exception e) {
+            fail("测试视频标签创建方法时发生异常: " + e.getMessage());
+        }
+    }
+
+    
+    @Test
+    void testCreateAudioTagStructure() {
+        byte[] data = new byte[] { 0x01, 0x02, 0x03 };
+        long timestamp = 0x01020304L; // 16909060
+        byte aacPacketType = 1;
+
+        byte[] tag = flvPacketizer.createAudioTag(aacPacketType, data, timestamp);
+        assertNotNull(tag);
+        assertTrue(tag.length > 0);
+
+        // 前缀字节:TagType
+        assertEquals(0x08, tag[0] & 0xFF, "TagType 应为音频 0x08");
+
+        // DataLength (UI24) = 2 (音频信息) + data.length
+        int dataLength = ((tag[1] & 0xFF) << 16) | ((tag[2] & 0xFF) << 8) | (tag[3] & 0xFF);
+        assertEquals(2 + data.length, dataLength, "DataLength 应为 2 + 数据长度");
+
+        // Timestamp: 24bit + Extended 8bit,高位在扩展字段
+        // 期望: 0x01020304 -> [0x02,0x03,0x04] + [0x01]
+        assertEquals(0x02, tag[4] & 0xFF);
+        assertEquals(0x03, tag[5] & 0xFF);
+        assertEquals(0x04, tag[6] & 0xFF);
+        assertEquals(0x01, tag[7] & 0xFF);
+
+        // StreamID (3字节全0)
+        assertEquals(0x00, tag[8] & 0xFF);
+        assertEquals(0x00, tag[9] & 0xFF);
+        assertEquals(0x00, tag[10] & 0xFF);
+
+        // SoundFormat/Rate/Size/Type 固定写入 0xAF
+        assertEquals(0xAF, tag[11] & 0xFF);
+
+        // AACPacketType
+        assertEquals(aacPacketType, tag[12]);
+
+        // 数据负载
+        assertEquals(data[0], tag[13]);
+        assertEquals(data[1], tag[14]);
+        assertEquals(data[2], tag[15]);
+
+        // PreviousTagSize 为整个Tag长度(不含自身4字节)
+        int previousTagSize = ((tag[tag.length - 4] & 0xFF) << 24)
+                | ((tag[tag.length - 3] & 0xFF) << 16)
+                | ((tag[tag.length - 2] & 0xFF) << 8)
+                | (tag[tag.length - 1] & 0xFF);
+        assertEquals(tag.length - 4, previousTagSize, "PreviousTagSize 应等于总长度减4");
+    }
+
+    @Test
+    void testCreateAacSequenceHeaderTag() {
+        int sampleRate = 8000; // 频率索引应为 11
+        int channels = 1;
+        long timestamp = 0x00000010L;
+
+        byte[] tag = flvPacketizer.createAacSequenceHeader(sampleRate, channels, timestamp);
+        assertNotNull(tag);
+        assertTrue(tag.length > 0);
+
+        // 基本头检查
+        assertEquals(0x08, tag[0] & 0xFF, "TagType 应为音频 0x08");
+        int dataLength = ((tag[1] & 0xFF) << 16) | ((tag[2] & 0xFF) << 8) | (tag[3] & 0xFF);
+        assertEquals(2 + 2, dataLength, "序列头数据长度应为2字节ASC + 2字节音频信息");
+
+        // AACPacketType=0
+        assertEquals(0xAF, tag[11] & 0xFF);
+        assertEquals(0x00, tag[12] & 0xFF, "AACPacketType 应为 0 (sequence header)");
+
+        // AudioSpecificConfig 校验:profile=2(AAC-LC), freqIndex=11(8000Hz), channelConfig=1
+        byte asc0 = tag[13];
+        byte asc1 = tag[14];
+        // asc0 = (profile << 3) | (freqIndex >> 1) => (2<<3)=16, (11>>1)=5 -> 21 (0x15)
+        assertEquals(0x15, asc0 & 0xFF, "ASC[0] 应为 0x15 对应 AAC-LC@8000Hz");
+        // asc1 = ((freqIndex & 1) << 7) | (channels << 3) => 1<<7=128, 1<<3=8 -> 136 (0x88)
+        assertEquals(0x88, asc1 & 0xFF, "ASC[1] 应为 0x88 对应 1声道");
+
+        // PreviousTagSize 校验
+        int previousTagSize = ((tag[tag.length - 4] & 0xFF) << 24)
+                | ((tag[tag.length - 3] & 0xFF) << 16)
+                | ((tag[tag.length - 2] & 0xFF) << 8)
+                | (tag[tag.length - 1] & 0xFF);
+        assertEquals(tag.length - 4, previousTagSize, "PreviousTagSize 应等于总长度减4");
+        // 打印tag 数据
+        System.out.println("FLV AAC Sequence Header tag (" + tag.length + " bytes): " + CommonUtils.bytesToHex(tag));
+    }
+
+}

+ 205 - 0
src/test/java/com/jttserver/codec/Jtt1078MessageDecoderTest.java

@@ -0,0 +1,205 @@
+package com.jttserver.codec;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.embedded.EmbeddedChannel;
+import org.junit.jupiter.api.Test;
+
+
+import static org.junit.jupiter.api.Assertions.*;
+
+public class Jtt1078MessageDecoderTest {
+    @Test
+    void testSingleCompletePacket() {
+        // 测试单个完整的数据包
+
+        // 创建嵌入式通道用于测试
+        EmbeddedChannel channel = new EmbeddedChannel(new Jtt1078MessageDecoder());
+
+        // 构造一个完整的JTT1078数据包
+        // 头部: 0x30316364 (4字节)
+        // 包头(总共18字节)
+        // 数据长度字段为2字节,位于包头的最后2个字节,设为5
+        // 数据部分: 5字节
+        byte[] packet = new byte[] {
+                0x30, 0x31, 0x63, 0x64, // 头部标识 0x30316364 (4字节)
+                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, // 10字节填充 (第5-15字节)
+                (byte) 0x40, // 第15字节,前4位为0100,表示包头长度为18字节
+                0x00, 0x05, // 数据长度字段,表示5字节数据 (位于包头最后2个字节,大端序)
+                0x11, 0x22, 0x33, 0x44, 0x55 // 5字节实际数据
+        };
+
+        // 写入完整数据包
+        channel.writeInbound(Unpooled.copiedBuffer(packet));
+
+        // 读取解码后的数据
+        Object result = channel.readInbound();
+
+        // 验证解码结果
+        assertNotNull(result, "Result should not be null");
+        assertTrue(result instanceof byte[], "Result should be byte[]");
+        byte[] decodedPacket = (byte[]) result;
+        assertEquals(packet.length, decodedPacket.length);
+        assertArrayEquals(packet, decodedPacket);
+
+        // 验证没有更多数据
+        assertNull(channel.readInbound());
+        assertFalse(channel.finish()); // 通道应该没有更多数据,所以finish应该返回false
+    }
+
+    @Test
+    void testStickyPackets() {
+        // 测试粘包情况:两个完整数据包连接在一起
+        EmbeddedChannel channel = new EmbeddedChannel(new Jtt1078MessageDecoder());
+
+        // 构造两个数据包 (包头长度18字节)
+        byte[] packet1 = new byte[] {
+                0x30, 0x31, 0x63, 0x64, // 头部标识 (4字节)
+                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, // 10字节填充 (第5-15字节)
+                (byte) 0x40, // 第15字节,前4位为0100,表示包头长度为18字节
+                0x00, 0x03, // 数据长度: 3字节 (位于包头最后2个字节,大端序)
+                0x11, 0x22, 0x33 // 数据 (3字节)
+        };
+
+        byte[] packet2 = new byte[] {
+                0x30, 0x31, 0x63, 0x64, // 头部标识 (4字节)
+                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,  // 10字节填充 (第5-15字节)
+                (byte) 0x40, // 第15字节,前4位为0100,表示包头长度为18字节
+                0x00, 0x02, // 数据长度: 2字节 (位于包头最后2个字节,大端序)
+                0x44, 0x55 // 数据 (2字节)
+        };
+
+        // 将两个包粘在一起
+        byte[] stickyPacket = new byte[packet1.length + packet2.length];
+        System.arraycopy(packet1, 0, stickyPacket, 0, packet1.length);
+        System.arraycopy(packet2, 0, stickyPacket, packet1.length, packet2.length);
+
+        // 写入粘包数据
+        channel.writeInbound(Unpooled.copiedBuffer(stickyPacket));
+
+        // 检查通道状态
+        System.out.println("Sticky packet test:");
+        System.out.println("Has inbound messages: " + channel.inboundMessages().size());
+
+        // 应该能正确解析出两个数据包
+        Object result1 = channel.readInbound();
+        Object result2 = channel.readInbound();
+
+        System.out.println("Result1: " + result1);
+        System.out.println("Result2: " + result2);
+
+        // 验证第一个包
+        assertNotNull(result1, "First packet should not be null");
+        assertTrue(result1 instanceof byte[], "First packet should be byte[]");
+        byte[] decodedPacket1 = (byte[]) result1;
+        assertEquals(packet1.length, decodedPacket1.length);
+        assertArrayEquals(packet1, decodedPacket1);
+
+        // 验证第二个包
+        assertNotNull(result2, "Second packet should not be null");
+        assertTrue(result2 instanceof byte[], "Second packet should be byte[]");
+        byte[] decodedPacket2 = (byte[]) result2;
+        assertEquals(packet2.length, decodedPacket2.length);
+        assertArrayEquals(packet2, decodedPacket2);
+
+        // 验证没有更多数据
+        assertNull(channel.readInbound());
+        assertFalse(channel.finish()); // 通道应该没有更多数据,所以finish应该返回false
+    }
+
+    @Test
+    void testFragmentedPacket() {
+        // 测试拆包情况:一个数据包分两次到达
+        EmbeddedChannel channel = new EmbeddedChannel(new Jtt1078MessageDecoder());
+
+        // 构造一个完整数据包
+        byte[] fullPacket = new byte[] {
+                0x30, 0x31, 0x63, 0x64, // 头部标识 (4字节)
+                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, // 10字节填充 (第5-15字节)
+                (byte) 0x40, // 第15字节,前4位为0100,表示包头长度为18字节
+                0x00, 0x04, // 数据长度: 4字节 (位于包头最后2个字节,大端序)
+                0x11, 0x22, 0x33, 0x44 // 数据 (4字节)
+        };
+
+        // 第一次只发送前10字节
+        ByteBuf firstPart = Unpooled.copiedBuffer(fullPacket, 0, 10);
+        channel.writeInbound(firstPart); // 不应该产生输出
+
+        // 验证还没有解码出数据
+        assertNull(channel.readInbound(), "Should not have decoded data after first part");
+
+        // 第二次发送剩余部分
+        ByteBuf secondPart = Unpooled.copiedBuffer(fullPacket, 10, fullPacket.length - 10);
+        channel.writeInbound(secondPart);
+
+        // 现在应该能读取到解码后的数据
+        Object result = channel.readInbound();
+        assertNotNull(result, "Result should not be null after second write");
+        assertTrue(result instanceof byte[], "Result should be byte[]");
+        byte[] decodedPacket = (byte[]) result;
+        assertEquals(fullPacket.length, decodedPacket.length);
+        assertArrayEquals(fullPacket, decodedPacket);
+
+        // 验证没有更多数据
+        assertNull(channel.readInbound());
+        assertFalse(channel.finish()); // 通道应该没有更多数据,所以finish应该返回false
+    }
+
+    @Test
+    void testMultipleFragmentedPackets() {
+        // 测试多个拆包情况:第一个包分两段,第二个包完整
+        EmbeddedChannel channel = new EmbeddedChannel(new Jtt1078MessageDecoder());
+
+        // 构造两个数据包
+        byte[] packet1 = new byte[] {
+                0x30, 0x31, 0x63, 0x64, // 头部标识 (4字节)
+                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, // 10字节填充 (第5-15字节)
+                (byte) 0x40, // 第15字节,前4位为0100,表示包头长度为18字节
+                0x00, 0x02, // 数据长度: 2字节 (位于包头最后2个字节,大端序)
+                0x11, 0x22 // 数据 (2字节)
+        };
+
+        byte[] packet2 = new byte[] {
+                0x30, 0x31, 0x63, 0x64, // 头部标识 (4字节)
+                0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,  // 10字节填充 (第5-15字节)
+                (byte) 0x40, // 第15字节,前4位为0100,表示包头长度为18字节
+                0x00, 0x03, // 数据长度: 3字节 (位于包头最后2个字节,大端序)
+                0x33, 0x44, 0x55 // 数据 (3字节)
+        };
+
+        // 第一次发送第一个包的前15字节
+        byte[] firstChunk = new byte[15];
+        System.arraycopy(packet1, 0, firstChunk, 0, 15); // packet1的前15字节
+
+        channel.writeInbound(Unpooled.copiedBuffer(firstChunk));
+
+        // 第二次发送第一个包的剩余部分 + 第二个包的完整数据
+        byte[] secondChunk = new byte[packet1.length - 15 + packet2.length];
+        System.arraycopy(packet1, 15, secondChunk, 0, packet1.length - 15); // packet1的剩余部分
+        System.arraycopy(packet2, 0, secondChunk, packet1.length - 15, packet2.length); // 完整的packet2
+
+        channel.writeInbound(Unpooled.copiedBuffer(secondChunk));
+
+        // 应该能正确解析出两个数据包
+        Object result1 = channel.readInbound();
+        Object result2 = channel.readInbound();
+
+        // 验证第一个包
+        assertNotNull(result1, "First packet should not be null");
+        assertTrue(result1 instanceof byte[], "First packet should be byte[]");
+        byte[] decodedPacket1 = (byte[]) result1;
+        assertEquals(packet1.length, decodedPacket1.length);
+        assertArrayEquals(packet1, decodedPacket1);
+
+        // 验证第二个包
+        assertNotNull(result2, "Second packet should not be null");
+        assertTrue(result2 instanceof byte[], "Second packet should be byte[]");
+        byte[] decodedPacket2 = (byte[]) result2;
+        assertEquals(packet2.length, decodedPacket2.length);
+        assertArrayEquals(packet2, decodedPacket2);
+
+        // 验证没有更多数据
+        assertNull(channel.readInbound());
+        assertFalse(channel.finish()); // 通道应该没有更多数据,所以finish应该返回false
+    }
+}

+ 36 - 0
src/test/java/com/jttserver/codec/nativeaac/AacSmokeTest.java

@@ -0,0 +1,36 @@
+package com.jttserver.codec.nativeaac;
+
+import java.io.FileOutputStream;
+// 测试类,用于验证 AacEncoderNative 类的基本功能,这个测试类不会在mvn test中运行,需要手动运行
+
+import com.jttserver.codec.nativeaac.AacEncoderNative;
+
+@SuppressWarnings("unused")
+public class AacSmokeTest {
+
+    public static void main(String[] args) throws Exception {
+        int sampleRate = 8000; // 采样率
+        int channels = 1; // 声道数
+        int bitrate = 16000; // 码率
+        int aot = 2; // AAC-LC 音频对象类型
+        boolean useAdts = true; // 是否使用 ADTS 封装
+
+        // 初始化编码器
+        long handle = AacEncoderNative.initEncoder(sampleRate, channels, bitrate, aot, useAdts);
+
+        String testFileName = "test_" + System.currentTimeMillis() + ".aac";
+
+        try (FileOutputStream fos = new FileOutputStream(testFileName)) {
+            short[] pcmFrame = new short[1024 * channels];
+            for (int i = 0; i < 100; i++) {
+                byte[] adts = AacEncoderNative.encodeFrame(handle, pcmFrame);
+                if (adts != null && adts.length > 0) {
+                    fos.write(adts);
+                }
+            }
+        }
+
+        AacEncoderNative.close(handle);
+        System.out.println("Wrote " + testFileName + ". Open it with VLC to validate.");
+    }
+}

+ 64 - 0
src/test/java/com/jttserver/config/ConfigManagerTest.java

@@ -0,0 +1,64 @@
+package com.jttserver.config;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.*;
+
+public class ConfigManagerTest {
+
+    @BeforeEach
+    public void setUp() {
+        // 在每个测试之前,将设备管理功能设置为默认状态(禁用)
+        ConfigManager.disableDeviceManagement();
+    }
+
+    @Test
+    public void testDefaultState() {
+        // 测试默认状态下设备管理功能是否禁用
+        assertFalse(ConfigManager.isDeviceManagementEnabled(),
+                "设备管理功能默认应该处于禁用状态");
+    }
+
+    @Test
+    public void testEnableDeviceManagement() {
+        // 测试启用设备管理功能
+        ConfigManager.enableDeviceManagement();
+        assertTrue(ConfigManager.isDeviceManagementEnabled(),
+                "启用设备管理功能后应该返回true");
+    }
+
+    @Test
+    public void testDisableDeviceManagement() {
+        // 先启用设备管理功能
+        ConfigManager.enableDeviceManagement();
+        assertTrue(ConfigManager.isDeviceManagementEnabled(),
+                "启用设备管理功能后应该返回true");
+
+        // 再禁用设备管理功能
+        ConfigManager.disableDeviceManagement();
+        assertFalse(ConfigManager.isDeviceManagementEnabled(),
+                "禁用设备管理功能后应该返回false");
+    }
+
+    @Test
+    public void testMultipleEnableCalls() {
+        // 测试多次启用设备管理功能
+        ConfigManager.enableDeviceManagement();
+        ConfigManager.enableDeviceManagement();
+        ConfigManager.enableDeviceManagement();
+
+        assertTrue(ConfigManager.isDeviceManagementEnabled(),
+                "多次启用设备管理功能后应该仍然处于启用状态");
+    }
+
+    @Test
+    public void testMultipleDisableCalls() {
+        // 测试多次禁用设备管理功能
+        ConfigManager.disableDeviceManagement();
+        ConfigManager.disableDeviceManagement();
+        ConfigManager.disableDeviceManagement();
+
+        assertFalse(ConfigManager.isDeviceManagementEnabled(),
+                "多次禁用设备管理功能后应该仍然处于禁用状态");
+    }
+}

+ 206 - 0
src/test/java/com/jttserver/device/DeviceManagerTest.java

@@ -0,0 +1,206 @@
+package com.jttserver.device;
+
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import com.jttserver.config.ConfigManager;
+
+import java.util.Collection;
+import java.util.Map;
+
+
+
+import static org.junit.jupiter.api.Assertions.*;
+
+/**
+ * DeviceManager测试类
+ */
+public class DeviceManagerTest {
+
+    @BeforeEach
+    public void setUp() {
+        // 在每个测试之前,确保设备管理功能处于启用状态
+        ConfigManager.enableDeviceManagement();
+
+        // 清空设备信息映射表
+        // 使用反射获取私有字段并清空
+        try {
+            java.lang.reflect.Field deviceInfoMapField = DeviceManager.class.getDeclaredField("deviceInfoMap");
+            deviceInfoMapField.setAccessible(true);
+            ((Map<?, ?>) deviceInfoMapField.get(null)).clear();
+        } catch (Exception e) {
+            fail("无法清理设备信息映射表: " + e.getMessage());
+        }
+    }
+
+    @AfterEach
+    public void tearDown() {
+        // 在每个测试之后,恢复默认状态
+        ConfigManager.disableDeviceManagement();
+    }
+
+    @Test
+    public void testregisterDevice() {
+        // 测试添加设备
+        String channelId = "test-channel-1";
+        DeviceManager.DeviceInfo deviceInfo = new DeviceManager.DeviceInfo(channelId, "127.0.0.1:8080");
+
+        DeviceManager.registerDevice(channelId, deviceInfo);
+
+        Collection<DeviceManager.DeviceInfo> devices = DeviceManager.getConnectedDevices();
+        assertEquals(1, devices.size());
+        assertTrue(devices.stream().anyMatch(d -> d.getChannelId().equals(channelId)));
+    }
+
+    @Test
+    public void testunregisterDevice() {
+        // 测试移除设备
+        String channelId = "test-channel-1";
+        DeviceManager.DeviceInfo deviceInfo = new DeviceManager.DeviceInfo(channelId, "127.0.0.1:8080");
+
+        DeviceManager.registerDevice(channelId, deviceInfo);
+        assertEquals(1, DeviceManager.getConnectedDevices().size());
+
+        DeviceManager.unregisterDevice(channelId);
+        assertEquals(0, DeviceManager.getConnectedDevices().size());
+    }
+
+    @Test
+    public void testUpdateDeviceActiveTime() {
+        // 测试更新设备活动时间
+        String channelId = "test-channel-1";
+        DeviceManager.DeviceInfo deviceInfo = new DeviceManager.DeviceInfo(channelId, "127.0.0.1:8080");
+        long initialActiveTime = deviceInfo.getLastActiveTime();
+
+        DeviceManager.registerDevice(channelId, deviceInfo);
+
+        // 等待一小段时间
+        try {
+            Thread.sleep(10);
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+        }
+
+        DeviceManager.updateDeviceActiveTime(channelId);
+
+        DeviceManager.DeviceInfo updatedDeviceInfo = DeviceManager.getConnectedDevices().iterator().next();
+        assertTrue(updatedDeviceInfo.getLastActiveTime() > initialActiveTime);
+    }
+
+    @Test
+    public void testUpdateDeviceSimCardNumber() {
+        // 测试更新设备SIM卡号
+        String channelId = "test-channel-1";
+        DeviceManager.DeviceInfo deviceInfo = new DeviceManager.DeviceInfo(channelId, "127.0.0.1:8080");
+        String simCardNumber = "123456789012345";
+
+        DeviceManager.registerDevice(channelId, deviceInfo);
+        DeviceManager.updateDeviceSimCardNumber(channelId, simCardNumber);
+
+        DeviceManager.DeviceInfo updatedDeviceInfo = DeviceManager.getConnectedDevices().iterator().next();
+        assertEquals(simCardNumber, updatedDeviceInfo.getSimCardNumber());
+    }
+
+    @Test
+    public void testUpdateDeviceLogicChannelNumber() {
+        // 测试更新设备逻辑通道号
+        String channelId = "test-channel-1";
+        DeviceManager.DeviceInfo deviceInfo = new DeviceManager.DeviceInfo(channelId, "127.0.0.1:8080");
+        byte logicChannelNumber = 3;
+
+        DeviceManager.registerDevice(channelId, deviceInfo);
+        DeviceManager.updateDeviceLogicChannelNumber(channelId, logicChannelNumber);
+
+        DeviceManager.DeviceInfo updatedDeviceInfo = DeviceManager.getConnectedDevices().iterator().next();
+        assertEquals(logicChannelNumber, updatedDeviceInfo.getLogicChannelNumber());
+    }
+
+    @Test
+    public void testGetConnectedDevices() {
+        // 测试获取连接设备列表
+        String channelId1 = "test-channel-1";
+        String channelId2 = "test-channel-2";
+        DeviceManager.DeviceInfo deviceInfo1 = new DeviceManager.DeviceInfo(channelId1, "127.0.0.1:8080");
+        DeviceManager.DeviceInfo deviceInfo2 = new DeviceManager.DeviceInfo(channelId2, "127.0.0.1:8081");
+
+        DeviceManager.registerDevice(channelId1, deviceInfo1);
+        DeviceManager.registerDevice(channelId2, deviceInfo2);
+
+        Collection<DeviceManager.DeviceInfo> devices = DeviceManager.getConnectedDevices();
+        assertEquals(2, devices.size());
+        assertTrue(devices.stream().anyMatch(d -> d.getChannelId().equals(channelId1)));
+        assertTrue(devices.stream().anyMatch(d -> d.getChannelId().equals(channelId2)));
+    }
+
+    @Test
+    public void testDeviceManagementDisabled() {
+        // 测试设备管理功能禁用时的行为
+        ConfigManager.disableDeviceManagement();
+
+        String channelId = "test-channel-1";
+        DeviceManager.DeviceInfo deviceInfo = new DeviceManager.DeviceInfo(channelId, "127.0.0.1:8080");
+
+        DeviceManager.registerDevice(channelId, deviceInfo);
+        Collection<DeviceManager.DeviceInfo> devices = DeviceManager.getConnectedDevices();
+        assertTrue(devices.isEmpty());
+
+        // 即使添加了设备,禁用状态下也应该无法获取
+        DeviceManager.unregisterDevice(channelId);
+        DeviceManager.updateDeviceActiveTime(channelId);
+        DeviceManager.updateDeviceSimCardNumber(channelId, "123456789012345");
+        DeviceManager.updateDeviceLogicChannelNumber(channelId, (byte) 1);
+
+        // 确保不会抛出异常
+        assertDoesNotThrow(() -> {
+            DeviceManager.registerDevice(channelId, deviceInfo);
+            DeviceManager.unregisterDevice(channelId);
+            DeviceManager.updateDeviceActiveTime(channelId);
+            DeviceManager.updateDeviceSimCardNumber(channelId, "123456789012345");
+            DeviceManager.updateDeviceLogicChannelNumber(channelId, (byte) 1);
+        });
+    }
+
+    @Test
+    public void testDeviceInfoConstructor() {
+        // 测试DeviceInfo构造函数
+        String channelId = "test-channel-1";
+        String remoteAddress = "127.0.0.1:8080";
+        DeviceManager.DeviceInfo deviceInfo = new DeviceManager.DeviceInfo(channelId, remoteAddress);
+
+        assertEquals(channelId, deviceInfo.getChannelId());
+        assertEquals(remoteAddress, deviceInfo.getRemoteAddress());
+        assertTrue(deviceInfo.getConnectTime() > 0);
+        assertTrue(deviceInfo.getLastActiveTime() > 0);
+        assertEquals(deviceInfo.getConnectTime(), deviceInfo.getLastActiveTime());
+    }
+
+    @Test
+    public void testDeviceInfoSettersAndGetters() {
+        // 测试DeviceInfo的setter和getter方法
+        DeviceManager.DeviceInfo deviceInfo = new DeviceManager.DeviceInfo("test-channel", "127.0.0.1:8080");
+
+        // 测试SIM卡号
+        String simCardNumber = "987654321012345";
+        deviceInfo.setSimCardNumber(simCardNumber);
+        assertEquals(simCardNumber, deviceInfo.getSimCardNumber());
+
+        // 测试逻辑通道号
+        byte logicChannelNumber = 5;
+        deviceInfo.setLogicChannelNumber(logicChannelNumber);
+        assertEquals(logicChannelNumber, deviceInfo.getLogicChannelNumber());
+
+        // 测试活动时间更新
+        long oldActiveTime = deviceInfo.getLastActiveTime();
+
+        // 等待一小段时间
+        try {
+            Thread.sleep(10);
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+        }
+
+        deviceInfo.updateLastActiveTime();
+        assertTrue(deviceInfo.getLastActiveTime() > oldActiveTime);
+    }
+}

+ 228 - 0
src/test/java/com/jttserver/protocol/Jtt1078NaluPacketTest.java

@@ -0,0 +1,228 @@
+package com.jttserver.protocol;
+
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.BeforeEach;
+import static org.junit.jupiter.api.Assertions.*;
+
+/*
+ * jtt1078多个数据包拼包逻辑
+ */
+public class Jtt1078NaluPacketTest {
+    private Jtt1078NaluPacket naluPacket;
+
+    @BeforeEach
+    void setUp() {
+        naluPacket = new Jtt1078NaluPacket();
+    }
+
+    /**
+     * 创建测试用的Jtt1078Packet
+     * 
+     * @param mFlag                M边界帧标志
+     * @param subpackageFlag       分包处理标记
+     * @param packetSequenceNumber 包序号
+     * @param naluData             NALU数据
+     * @return Jtt1078Packet对象
+     */
+    private Jtt1078PacketParser.Jtt1078Packet createTestPacket(byte mFlag, byte subpackageFlag,
+            int packetSequenceNumber, byte[] naluData) {
+        Jtt1078PacketParser.Jtt1078Packet packet = new Jtt1078PacketParser.Jtt1078Packet();
+        packet.mFlag = mFlag;
+        packet.payloadType = 98; // H.264
+        packet.packetSequenceNumber = packetSequenceNumber;
+        packet.simCardNumber = new byte[] { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06 };
+        packet.logicChannelNumber = 1;
+        packet.dataType = 0x00; // I帧
+        packet.subpackageFlag = subpackageFlag;
+        packet.timestamp = new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 };
+        packet.lastIFrameInterval = 0;
+        packet.lastFrameInterval = 0;
+        packet.dataBodyLength = naluData.length;
+        packet.naluData = naluData != null ? naluData : new byte[0];
+        return packet;
+    }
+
+    /**
+     * 测试添加单个原子数据包带有完整nalu包
+     */
+    @Test
+    void testAddSinglePacket() {
+        // 创建一个原子包(subpackageFlag=0)
+        Jtt1078PacketParser.Jtt1078Packet packet = createTestPacket((byte) 0, (byte) 0, 1, new byte[] { 1, 2, 3, 4 });
+
+        // 添加包并检查是否完成
+        boolean isComplete = naluPacket.addPacket(packet);
+
+        // 原子包应该立即完成
+        assertTrue(isComplete, "原子包应该立即完成");
+
+        // 检查数据是否正确
+        byte[] completeData = naluPacket.getCompleteNaluData();
+        assertArrayEquals(new byte[] { 1, 2, 3, 4 }, completeData, "数据应该匹配");
+    }
+
+    /**
+     * 测试添加两个数据包拼接的nalu包
+     */
+    @Test
+    void testAddFirstAndLastPackets() {
+        // 创建首包(subpackageFlag=1)
+        Jtt1078PacketParser.Jtt1078Packet firstPacket = createTestPacket((byte) 0, (byte) 1, 1, new byte[] { 1, 2 });
+
+        // 添加首包
+        boolean isFirstComplete = naluPacket.addPacket(firstPacket);
+        assertFalse(isFirstComplete, "首包不应该完成NALU单元");
+
+        // 创建末包(subpackageFlag=2)带M标志
+        Jtt1078PacketParser.Jtt1078Packet lastPacket = createTestPacket((byte) 1, (byte) 2, 2, new byte[] { 3, 4 });
+
+        // 添加末包
+        boolean isLastComplete = naluPacket.addPacket(lastPacket);
+        assertTrue(isLastComplete, "带M标志的末包应该完成NALU单元");
+
+        // 检查完整数据
+        byte[] completeData = naluPacket.getCompleteNaluData();
+        assertArrayEquals(new byte[] { 1, 2, 3, 4 }, completeData, "组合数据应该匹配");
+    }
+
+    /**
+     * 测试添加3数据包拼接的nalu包
+     */
+    @Test
+    void testAddFirstMiddleAndLastPackets() {
+        // 创建首包(subpackageFlag=1)
+        Jtt1078PacketParser.Jtt1078Packet firstPacket = createTestPacket((byte) 0, (byte) 1, 1, new byte[] { 1, 2 });
+        naluPacket.addPacket(firstPacket);
+
+        // 创建中间包(subpackageFlag=3)
+        Jtt1078PacketParser.Jtt1078Packet middlePacket = createTestPacket((byte) 0, (byte) 3, 2, new byte[] { 3, 4 });
+        boolean isMiddleComplete = naluPacket.addPacket(middlePacket);
+        assertFalse(isMiddleComplete, "中间包不应该完成NALU单元");
+
+        // 创建末包(subpackageFlag=2)带M标志
+        Jtt1078PacketParser.Jtt1078Packet lastPacket = createTestPacket((byte) 1, (byte) 2, 3, new byte[] { 5, 6 });
+        boolean isLastComplete = naluPacket.addPacket(lastPacket);
+        assertTrue(isLastComplete, "带M标志的末包应该完成NALU单元");
+
+        // 检查完整数据
+        byte[] completeData = naluPacket.getCompleteNaluData();
+        assertArrayEquals(new byte[] { 1, 2, 3, 4, 5, 6 }, completeData, "组合数据应该匹配");
+    }
+
+    /**
+     * 测试添加新的起始包,丢弃之前的数据包
+     */
+    @Test
+    void testDiscardPreviousPacketsWhenNewFirstPacketArrives() {
+
+        // 添加一个未完成的包
+        Jtt1078PacketParser.Jtt1078Packet firstPacket = createTestPacket((byte) 0, (byte) 1, 1, new byte[] { 1, 2 });
+        naluPacket.addPacket(firstPacket);
+
+        // 检查包是否已添加
+        assertFalse(naluPacket.isEmpty(), "应该有包数据");
+
+        // 添加另一个首包,应该丢弃之前的包
+        Jtt1078PacketParser.Jtt1078Packet newFirstPacket = createTestPacket((byte) 0, (byte) 1, 2, new byte[] { 3, 4 });
+        naluPacket.addPacket(newFirstPacket);
+
+        // 添加末包完成新序列
+        Jtt1078PacketParser.Jtt1078Packet lastPacket = createTestPacket((byte) 1, (byte) 2, 3, new byte[] { 5, 6 });
+        boolean isComplete = naluPacket.addPacket(lastPacket);
+
+        // 检查完整数据是否只包含新的包序列
+        byte[] completeData = naluPacket.getCompleteNaluData();
+        assertArrayEquals(new byte[] { 3, 4, 5, 6 }, completeData, "应该只包含新序列的数据");
+        assertTrue(isComplete, "新序列应该完成");
+    }
+
+    /**
+     * 测试只添加中间包,直接丢弃
+     */
+    @Test
+    void testDiscardMiddlePacketWithoutPreviousPackets() {
+        // 直接添加中间包,没有之前的包
+        Jtt1078PacketParser.Jtt1078Packet middlePacket = createTestPacket((byte) 0, (byte) 3, 1, new byte[] { 1, 2 });
+        boolean isComplete = naluPacket.addPacket(middlePacket);
+
+        assertFalse(isComplete, "没有起始包的中间包应该被丢弃");
+        assertTrue(naluPacket.isEmpty(), "不应该有任何包数据");
+    }
+
+    /**
+     * 测试只添加末包,直接丢弃
+     */
+    @Test
+    void testDiscardLastPacketWithoutPreviousPackets() {
+        // 直接添加末包,没有之前的包
+        Jtt1078PacketParser.Jtt1078Packet lastPacket = createTestPacket((byte) 1, (byte) 2, 1, new byte[] { 1, 2 });
+        boolean isComplete = naluPacket.addPacket(lastPacket);
+
+        assertFalse(isComplete, "没有起始包的末包应该被丢弃");
+        assertTrue(naluPacket.isEmpty(), "不应该有任何包数据");
+    }
+
+    /**
+     * 测试包序列号的连续性(目前不连续只提示警告,不丢弃)
+     */
+    @Test
+    void testPacketSequenceNumberContinuity() {
+        // 创建首包
+        Jtt1078PacketParser.Jtt1078Packet firstPacket = createTestPacket((byte) 0, (byte) 1, 1, new byte[] { 1 });
+        naluPacket.addPacket(firstPacket);
+
+        // 创建不连续的包(序号跳跃)
+        Jtt1078PacketParser.Jtt1078Packet jumpPacket = createTestPacket((byte) 0, (byte) 3, 3, new byte[] { 2 });
+        // 这里我们无法直接测试日志输出,但可以确保处理逻辑正常进行
+        naluPacket.addPacket(jumpPacket);
+
+        // 添加末包完成序列
+        Jtt1078PacketParser.Jtt1078Packet lastPacket = createTestPacket((byte) 1, (byte) 2, 4, new byte[] { 3 });
+        boolean isComplete = naluPacket.addPacket(lastPacket);
+
+        assertFalse(isComplete, "包序号不连续应该被丢弃");
+        assertTrue(naluPacket.isEmpty(), "不应该有任何包数据,之前的包也应该被丢弃");
+    }
+
+    /**
+     * 测试清空功能
+     */
+    @Test
+    void testClearFunction() {
+        // 添加一些数据
+        Jtt1078PacketParser.Jtt1078Packet packet = createTestPacket((byte) 0, (byte) 1, 1, new byte[] { 1, 2 });
+        naluPacket.addPacket(packet);
+
+        // 确保有数据
+        assertFalse(naluPacket.isEmpty(), "应该有包数据");
+
+        // 清空
+        naluPacket.clear();
+
+        // 检查是否清空
+        assertTrue(naluPacket.isEmpty(), "清空后应该没有包数据");
+    }
+
+    /**
+     * 测试获取最新参数功能
+     */
+    @Test
+    void testGetLatestParams() {
+        // 添加首包
+        Jtt1078PacketParser.Jtt1078Packet firstPacket = createTestPacket((byte) 0, (byte) 1, 1, new byte[] { 1, 2 });
+        naluPacket.addPacket(firstPacket);
+
+        // 添加末包
+        Jtt1078PacketParser.Jtt1078Packet lastPacket = createTestPacket((byte) 1, (byte) 2, 2, new byte[] { 3, 4 });
+        naluPacket.addPacket(lastPacket);
+
+        // 获取最新参数
+        Jtt1078PacketParams params = naluPacket.getLatestParams();
+
+        assertNotNull(params, "应该能获取到参数");
+        assertEquals(1, params.mFlag, "M标志应该匹配");
+        assertEquals(2, params.subpackageFlag, "分包标记应该匹配");
+        assertEquals(2, params.packetSequenceNumber, "包序号应该匹配");
+    }
+
+}

+ 247 - 0
src/test/java/com/jttserver/protocol/Jtt1078PacketParserTest.java

@@ -0,0 +1,247 @@
+package com.jttserver.protocol;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+/** 
+ * 测试Jtt1078PacketParser类
+ */
+class Jtt1078PacketParserTest {
+
+
+    /**
+     * 将十六进制字符串转换为字节数组
+     *
+     * @param hexString 十六进制字符串
+     * @return 字节数组
+     */
+    private byte[] hexStringToByteArray(String hexString) {
+        int len = hexString.length();
+        byte[] data = new byte[len / 2];
+        for (int i = 0; i < len; i += 2) {
+            data[i / 2] = (byte) ((Character.digit(hexString.charAt(i), 16) << 4)
+                    + Character.digit(hexString.charAt(i+1), 16));
+        }
+        return data;
+    }
+
+     
+    @Test
+    void testByteParsingLogic() {
+        // 同样的代码测试解析逻辑
+
+        // 测试第5字节解析逻辑
+        byte fifthByte = (byte) 0x81; // 10000001
+        byte mFlag = (byte) ((fifthByte >> 7) & 0x01); // 应该是1
+        byte payloadType = (byte) (fifthByte & 0x7F); // 应该是1
+        
+        assertEquals(1, mFlag);
+        assertEquals(1, payloadType);
+        
+        // 测试第15字节解析逻辑
+        byte fifteenthByte = 0x02; // 00000010
+        byte dataType = (byte) ((fifteenthByte >> 4) & 0x0F); // 应该是0
+        byte subpackageFlag = (byte) (fifteenthByte & 0x0F); // 应该是2
+        
+        assertEquals(0, dataType);
+        assertEquals(2, subpackageFlag);
+    }
+
+
+    @Test
+    void testParseWithHexData() {
+        // 使用1078视频raw数据测试
+        String hexData = "303163648162000001380013899902010000016BB392CA7C0000000003b600000001674d00149db85825a100000300010000030008840000000168ee3c800000000106e501ab800000000165b800000303fabc11ff64c622eedfedd87fc1c413b3d5a4b602df6282226efe3cb96e93820d78e7193c68ebf22f7fb182b5f32a0e35d236ea45a8fb487769b0354aac0498ccc58e2dd3af04d28c354d608c9b0b794c10e13aa5c450731378135aac4ac2780de4ba66b6108f1eb68f5f256b461c3727c995c219b128b8f03c1ebcf9260fe614fe5910c753cd6b93bea25ac1a849a3f3fed54897b7d9da7c2ba45916379764211bee142306893874834835334ee8cef218450716764d5d43561dd3a71f2bc464f20b71ef1a679ed170422279cf68b192d7411725f3a256334cc03e52c0398dc1a5477c2359fcfad05bef252a8f2a64b47c00e4260fc7f53fb4dce7975cc56cb742259897e1bbb294bc20530e6d83a77efa229aee56a7e55d537044a12a1747b7bae6919175b0cfa3d4d2cebac33da7b87f46a7b1fa693b99d9d6224457accddc0560a6358c2b1214d7b24e67b289876655c98f4c79a5cbe948c563f8ba913bff3ffead4cd04927709e82f836267c18346285eba3731c75324e38c53ba3856cf08f3d2bf47b17e7d1d2c4762a45fc2dd09d1cafb7e7c982c0ce8b08ab4339cc0f50cdd56b1b40e6e82401671925b383047358431d392cf666a3da83887976def40cfb6f517af9cbd7a9708d9d0156aa9c9d3aded45b60f03d97c456fdccf0c3a7c8c2443fccce0dcbf348ebec0975482089ff4f5dc2db25dc25c1325b45f179e02ae71249a377bd4662d2baa93e8792e6391471e2afced5ae5c92681af1015b98b6a93f03b0bb56d194e1684fcd29b4e691f0844d13b9787a62f3591cf819da316c0c62607efd2c108b91291c7b93e78b09fab10c4d42d6c119614ee0d2b2f91ffb349522d30b336fe3c3ec683b1748541fc502905dc1c40822c76b618b6f6654909bfcc4d24b8652aee4cea8b2707400ced930aba5e57483882cf16972404d3daee6a2fd7cd524a9b8012585600f661311ad77806ee56c882ffa3c1792c16f7775026067f394cc889a09cad85f4c1a6fbad42535cd30f8ee267638629043950a31623dd969c3fa9d6fb2138d37593cf68812167c82886815442d9fbadeaf0ac1cecfffe7c87b95e663ede4f4a56d6be924fe8dc8119b3f2d5a16ef44bf07c934138e9cb4a7a07ee6c9f8dd6aae40c6161c7762920167fe754063d6fc9a2ecf6d12fe3523d57a4efa8fc9ddeb98206e23d43e2868abe68c97b9d53b96d63c849de85c7b8cfef54d0fe7843564c24fe0e2874c899e8316da92f8c56a3ffcb75c911fe31c";
+
+        // 将十六进制字符串转换为字节数组
+        byte[] data = hexStringToByteArray(hexData);
+
+        // 创建ByteBuf
+        ByteBuf buf = Unpooled.wrappedBuffer(data);
+
+        // 解析数据包
+        Jtt1078PacketParser.Jtt1078Packet packet = Jtt1078PacketParser.parse(buf);
+
+         // 输出解析结果
+        System.out.println("Parsed packet: " + packet.toString());
+
+        // 验证解析结果
+        assertNotNull(packet);
+        assertEquals(0, packet.mFlag); // M边界帧标志 (第5字节最高位)
+        assertEquals(98, packet.payloadType); // 负载类型 (第5字节低7位)
+        assertEquals(0, packet.packetSequenceNumber); // 包序号
+        assertArrayEquals(new byte[] { 0x01, 0x38, 0x00, 0x13, (byte) 0x89, (byte) 0x99 }, packet.simCardNumber); // SIM卡号
+        assertEquals(2, packet.logicChannelNumber); // 逻辑通道号
+        assertEquals(0, packet.dataType); // 数据类型 (第15字节高4位)
+        assertEquals(1, packet.subpackageFlag); // 分包处理标记 (第15字节低4位)
+
+        // 验证时间戳存在
+        assertNotNull(packet.timestamp);
+        assertEquals(8, packet.timestamp.length);
+        assertArrayEquals(
+                new byte[] { 0x00, 0x00, 0x01, (byte) 0x6B, (byte) 0xB3, (byte) 0x92, (byte) 0xCA, (byte) 0x7C },
+                packet.timestamp);
+
+        // 验证帧间隔存在(因为是视频帧)
+        assertEquals(0, packet.lastIFrameInterval);
+        assertEquals(0, packet.lastFrameInterval);
+
+        // 验证数据体长度和数据
+        assertEquals(950, packet.dataBodyLength);
+        assertNotNull(packet.naluData);
+        assertEquals(packet.dataBodyLength, packet.naluData.length);
+
+    }
+
+    @Test
+    void testParseWithHexData2() {
+        // 使用1078测试数据HEX.TXT中的数据
+        String hexData = "30316364816200d4042011878285051300000000000d6b1700c8005003b62cb9efd6c824894b43c1465d04889a73ff95c73b0b1dd776f4c59cfad6e1bd6a99a4e640acab79d8f9f2544aa25e64d876ea9e0f6d01b4618cd132527b422596182bb1461f8a1be7c8048510b9139e4fe2d8a59ff5f216e4e5349eeef94a5e344281fcdec687548d9c7c1472908858b0f4a7a2cf9a73649b270fe4c29ba14860832e96d95099dc7a8460faac6c74471a8a98015aee5915cd1f8cab4ea35343a3e702760ffb903a5dc502b7835fb629833df18d4e45eda90c49c669341f21c78780e055d93feb340461ce851fa62c9cae8a7bf7b8b641c5318684776fa012066e6b8dfa16a98d9ecd59e6a43ee2f5397fd5943d83db56d8914aff2c757efb9eba7bb101bcb9a7d4c4351558dabe0ac9247542821d11b5c7781f99404c51a183cac4468360fc89cae75bbe3aebc5a2b41883ba0fd6867e364e72818215b751ccb4198fb43f9648b5bfc5d566e31328ebe45ec8f909a936629d71579cee9f53304323210aaa3cb2521b1ef4f85a29a7687446f2b862619aa8c513c644a10e2f1fc3945630d6fffeeede1e00ae19ba2c4c5822989da1ebfd7f034a17799d1ea71b63691cc5ea597fbfe2c4b4c3341b9d3a97fd43b710980e25f345bb8f549bcd761951b00d140dc899bcf7fa9ebce34872c44dc087912cb9a57ba11de3791cd93acd03dcbb3781d6749d9986f728aee8e13bd602d71199e2649c016212a4b4ef928b55b1e87f09a961b33307ac5ae5bab5a8e8c26bc0282cb8b03b6986c6faca8d8ee13ecaf857f531bcc8ed373640c0ab113a65c253ffd63a92fbbc4147cb75e58170bd1ac5890220e8d96dcce234ab9d992a912138a3354e9712582aea7f110f332498ae5e5bc16fe479ba335ca38f65b7e36cc2f8d1635045c675df76d2641bf1a262edbece95f8b8bd354126d9050e97c4e18134e3968fc17c641f5292d48c76bb02cec6b459a4836f20ef258de52d186698d819d9a2c5a61cf6af399e6496b97c6413fe87b9818c59b9e48181d60e9ad4518c2b5e77de597acc53873ddfebc947dd1400b22842337cc87be44e355a726f13283b235c442dd262ed48fee7ca916fed51689bf6cad8bd832d1b25cb5759a3feaf5be3bd07a7c9c20cea2e461849dbbf55bb4b6edc8125df0efbd52d97247791e3c9a38d9fdc227de4befa0099106eafa5453d5160957d755d6db46bfead069e38bcab24869a085c49714f8e238177fcb4c07e9dc0eaeb37a898ca47ebb42f08a44f042a13cf3c7968fe3563b74c61b9005fe0598720a2955f38f1d02b4a2ab0b97b399fcab7cf09f2ded39bbfb49f549bd41b5ca6fca5dc3807f175f8f7bd518571732e";
+
+        // 将十六进制字符串转换为字节数组
+        byte[] data = hexStringToByteArray(hexData);
+
+        // 创建ByteBuf
+        ByteBuf buf = Unpooled.wrappedBuffer(data);
+
+        // 解析数据包
+        Jtt1078PacketParser.Jtt1078Packet packet = Jtt1078PacketParser.parse(buf);
+
+        // 输出解析结果
+        System.out.println("Parsed packet: " + packet.toString());
+
+        // 验证解析结果
+        assertNotNull(packet);
+        assertEquals(0, packet.mFlag); // M边界帧标志 (第5字节最高位)
+        assertEquals(98, packet.payloadType); // 负载类型 (第5字节低7位)
+        assertEquals(212, packet.packetSequenceNumber); // 包序号
+        assertArrayEquals(new byte[] { 0x04, (byte) 0x20, 0x11, (byte) 0x87, (byte) 0x82, (byte) 0x85 },
+                packet.simCardNumber); // SIM卡号
+        assertEquals(5, packet.logicChannelNumber); // 逻辑通道号
+        assertEquals(1, packet.dataType); // 数据类型 (第15字节高4位)
+        assertEquals(3, packet.subpackageFlag); // 分包处理标记 (第15字节低4位)
+
+        // 验证时间戳存在
+        assertNotNull(packet.timestamp);
+        assertEquals(8, packet.timestamp.length);
+        assertArrayEquals(new byte[] { 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, (byte) 0x6B, (byte) 0x17 },
+                packet.timestamp);
+
+        // 验证帧间隔存在(因为是视频帧)
+        assertEquals(200, packet.lastIFrameInterval);
+        assertEquals(80, packet.lastFrameInterval);
+
+        // 验证数据体长度和数据
+        assertEquals(950, packet.dataBodyLength);
+        assertNotNull(packet.naluData);
+        assertEquals(packet.dataBodyLength, packet.naluData.length);
+
+    }
+   
+    @Test
+    void testParseTransparentDataPacket() {
+        // 构造一个完整的JTT1078透传数据包进行测试 (数据类型为0100)
+
+        // 包头标识 0x30316364 (固定4字节)
+        byte[] packetHeader = new byte[] {
+            0x30, 0x31, 0x63, 0x64,  // 包头标识
+            (byte) 0x81,                    // 版本号
+            (byte) 0xDB,             // M标志位(1) + 负载类型(91)
+            0x00, 0x01,              // 包序号
+            0x01, 0x34, 0x02, 0x48, 0x03, (byte) 0x91, // SIM卡号 BCD格式
+            0x03,                    // 逻辑通道号
+            0x40,                    // 数据类型(0100=透传数据) + 分包处理标记(0000=独立包)
+            0x00, 0x10               // 数据体长度 (16字节)
+        };
+        
+        // 构造数据体 (16字节透传数据)
+        byte[] dataBody = new byte[16];
+        for (int i = 0; i < dataBody.length; i++) {
+            dataBody[i] = (byte) (i & 0xFF);
+        }
+        
+        // 构造完整数据包 (包含包头和数据体,但不包含长度字段,因为解析器接收的是完整包)
+        byte[] fullPacket = new byte[packetHeader.length + dataBody.length];
+        System.arraycopy(packetHeader, 0, fullPacket, 0, packetHeader.length);
+        System.arraycopy(dataBody, 0, fullPacket, packetHeader.length, dataBody.length);
+        
+        // 创建ByteBuf
+        ByteBuf buf = Unpooled.wrappedBuffer(fullPacket);
+        
+        // 解析数据包
+        Jtt1078PacketParser.Jtt1078Packet packet = Jtt1078PacketParser.parse(buf);
+        
+        // 验证解析结果
+        assertNotNull(packet);
+        assertEquals(1, packet.mFlag); // M边界帧标志
+        assertEquals(91, packet.payloadType); // 负载类型为透传数据
+        assertEquals(1, packet.packetSequenceNumber); // 包序号
+        assertArrayEquals(new byte[]{0x01, 0x34, 0x02, 0x48, 0x03, (byte)0x91}, packet.simCardNumber); // SIM卡号
+        assertEquals(3, packet.logicChannelNumber); // 逻辑通道号
+        assertEquals(4, packet.dataType); // 数据类型 (0100=透传数据)
+        assertEquals(0, packet.subpackageFlag); // 分包处理标记 (0000=独立包)
+        
+        // 透传数据不包含时间戳
+        assertNotNull(packet.timestamp);
+        assertEquals(0, packet.timestamp.length);
+        
+        // 透传数据不包含帧间隔信息
+        assertEquals(-1, packet.lastIFrameInterval);
+        assertEquals(-1, packet.lastFrameInterval);
+        
+        // 验证数据体长度和数据
+        assertEquals(16, packet.dataBodyLength);
+        assertNotNull(packet.naluData);
+        assertEquals(packet.dataBodyLength, packet.naluData.length);
+        
+        // 输出解析结果
+        System.out.println("Parsed transparent data packet: " + packet.toString());
+    }
+    
+    @Test
+    void testParseAudioData() {
+        // 使用用户提供的音频测试数据HEX
+        String hexData = "30316364819A01B8042011878285053000000000000D220900A80001520034FE0A009896A092A1C480A2B71D8821CB9813D0891200B210AF3815A90A78993030D10A309C2985AA3A2D087918B93108913B8B7AAA16D10008891B6A02A1369A091B023F90A519B491920AB506C012A80A51C3A9A00BB79130C2B0939D2810BF20281B81239F2A0A80798A02018ABB3412197B308B1B215A49B1B7191E928992B39019F5900993C183C1A689039A6E93981A08008107822E8200090A889B38371E21A3";
+
+        // 将十六进制字符串转换为字节数组
+        byte[] data = hexStringToByteArray(hexData);
+
+        // 创建ByteBuf
+        ByteBuf buf = Unpooled.wrappedBuffer(data);
+
+        // 解析数据包
+        Jtt1078PacketParser.Jtt1078Packet packet = Jtt1078PacketParser.parse(buf);
+
+        // 验证解析结果
+        assertNotNull(packet);
+        assertEquals(1, packet.mFlag); // M边界帧标志 (第5字节最高位)
+        assertEquals(26, packet.payloadType); // 负载类型为音频 (第5字节低7位)
+        assertEquals(440, packet.packetSequenceNumber); // 包序号
+        assertArrayEquals(new byte[] { 0x04, (byte) 0x20, 0x11, (byte) 0x87, (byte) 0x82, (byte) 0x85 },
+                packet.simCardNumber); // SIM卡号
+        assertEquals(5, packet.logicChannelNumber); // 逻辑通道号
+        assertEquals(3, packet.dataType); // 数据类型 (第15字节高4位,0表示视频I帧)
+        assertEquals(0, packet.subpackageFlag); // 分包处理标记 (第15字节低4位)
+
+        // 验证时间戳存在(因为数据类型不是透传数据)
+        assertNotNull(packet.timestamp);
+        assertEquals(8, packet.timestamp.length);
+
+        // 验证帧间隔不存在(因为是视频帧类型)
+        assertEquals(-1, packet.lastIFrameInterval);
+        assertEquals(-1, packet.lastFrameInterval);
+
+        // 验证数据体长度和数据
+        assertEquals(168, packet.dataBodyLength);
+        assertNotNull(packet.naluData);
+        assertEquals(packet.dataBodyLength, packet.naluData.length);
+
+        // 输出解析结果
+        System.out.println("Parsed audio packet: " + packet.toString());
+    }
+
+
+}

+ 139 - 0
src/test/java/com/jttserver/relay/FlvStreamRelayTest.java

@@ -0,0 +1,139 @@
+package com.jttserver.relay;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import com.jttserver.protocol.Jtt1078NaluPacket;
+import com.jttserver.protocol.Jtt1078PacketParams;
+
+import java.io.ByteArrayOutputStream;
+import static org.junit.jupiter.api.Assertions.*;
+
+
+
+public class FlvStreamRelayTest {
+    private FlvStreamRelay flvStreamRelay;
+
+    @BeforeEach
+    void setUp() {
+        flvStreamRelay = new FlvStreamRelay();
+    }
+
+    @Test
+    void testInitSegmentAndAvcSequenceHeader() {
+        String channelId = "test_channel_avc";
+
+        // 简化的 H.264 SPS/PPS,与已有 FlvPacketizerTest 中的数据保持一致
+        byte[] spsData = new byte[]{0x67, 0x42, 0x00, 0x0A, (byte)0xF8, 0x41, (byte)0xA2};
+        byte[] ppsData = new byte[]{0x68, (byte)0xCB, (byte)0x8C, (byte)0xB2};
+
+        // 参数:payloadType=98 表示 H.264
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98;
+
+        // 先写入SPS,再写入PPS,触发AVC序列头生成
+        flvStreamRelay.publishVideo(channelId, spsData, params, 0);
+        flvStreamRelay.publishVideo(channelId, ppsData, params, 0);
+
+        // 验证初始化段(包含 FLV 头 + 序列头)
+        byte[] init = flvStreamRelay.getChannelInitVideoSegment(channelId);
+        assertNotNull(init);
+        assertTrue(init.length > 13, "初始化段应包含文件头和至少一个视频Tag");
+
+        // 校验 FLV 头(13字节:9字节头+4字节PreviousTagSize0)
+        assertEquals(0x46, init[0]); // 'F'
+        assertEquals(0x4C, init[1]); // 'L'
+        assertEquals(0x56, init[2]); // 'V'
+        assertEquals(0x01, init[3]); // 版本
+        assertEquals(0x05, init[4]); // 音视频标志
+        assertEquals(0x00, init[5]);
+        assertEquals(0x00, init[6]);
+        assertEquals(0x00, init[7]);
+        assertEquals(0x09, init[8]); // 头长度 9
+        assertEquals(0x00, init[9]);
+        assertEquals(0x00, init[10]);
+        assertEquals(0x00, init[11]);
+        assertEquals(0x00, init[12]); // PreviousTagSize0
+
+        // 首个Tag应为视频Tag(AVC序列头)
+        int tagStart = 13; // FLV头后的位置
+        assertEquals(0x09, init[tagStart], "首个Tag应为视频类型(0x09)");
+
+         // 验证序列头标记:在Tag的11字节头之后,payload首字节为 FrameType+CodecID,下一字节为 AVCPacketType
+        int payloadStart = tagStart + 11;
+        assertTrue(init.length > payloadStart + 2, "初始化段中的视频Tag的payload应存在");
+        assertEquals(0x17, init[payloadStart], "AVC序列头的FrameType+CodecID应为0x17");
+        assertEquals(0x00, init[payloadStart + 1], "AVCPacketType应为0x00(序列头)");
+
+         // 替代验证:使用初始化段校验序列头,不依赖内存缓冲
+        byte[] initAgain = flvStreamRelay.getChannelInitVideoSegment(channelId);
+        assertNotNull(initAgain);
+        assertTrue(initAgain.length > tagStart + 11 + 2, "初始化段应包含FLV头和至少一个视频序列头Tag");
+        assertEquals(0x09, initAgain[tagStart + 0], "初始化段首个Tag应为视频类型(0x09)");
+        int bufPayloadStart = 11;
+        assertEquals(0x17, initAgain[tagStart + bufPayloadStart], "序列头的FrameType+CodecID应为0x17");
+        assertEquals(0x00, initAgain[tagStart + bufPayloadStart + 1], "AVCPacketType应为0x00(序列头)");
+    }
+
+    // 验证 publishVideo 对包含多个 NALU 的字节流进行切割并逐一发布
+    @Test
+    void testPublishVideoSplitMultipleNalues_H264CombinedSpsPps() {
+        String channelId = "test_channel_avc_multi";
+
+        byte[] startCode = new byte[]{0x00, 0x00, 0x00, 0x01};
+        byte[] spsData = new byte[]{0x67, 0x42, 0x00, 0x0A, (byte)0xF8, 0x41, (byte)0xA2};
+        byte[] ppsData = new byte[]{0x68, (byte)0xCB, (byte)0x8C, (byte)0xB2};
+
+        // 参数:payloadType=98 表示 H.264
+        Jtt1078PacketParams params = new Jtt1078PacketParams();
+        params.payloadType = 98;
+
+        // 构造包含两个NALU单元的字节流:起始码+SPS + 起始码+PPS
+        ByteArrayOutputStream multi = new ByteArrayOutputStream();
+        multi.write(startCode, 0, startCode.length);
+        multi.write(spsData, 0, spsData.length);
+        multi.write(startCode, 0, startCode.length);
+        multi.write(ppsData, 0, ppsData.length);
+        byte[] combined = multi.toByteArray();
+
+        // 一次性发布多NALU字节流,内部应正确切割并逐一处理
+        flvStreamRelay.publishVideo(channelId, combined, params, 0);
+
+        // 验证初始化段(包含 FLV 头 + 序列头)
+        byte[] init = flvStreamRelay.getChannelInitVideoSegment(channelId);
+        assertNotNull(init);
+        assertTrue(init.length > 13, "初始化段应包含文件头和至少一个视频Tag");
+        // 校验 FLV 头(13字节:9字节头+4字节PreviousTagSize0)
+        assertEquals(0x46, init[0]); // 'F'
+        assertEquals(0x4C, init[1]); // 'L'
+        assertEquals(0x56, init[2]); // 'V'
+        assertEquals(0x01, init[3]); // 版本
+        assertEquals(0x05, init[4]); // 音视频标志
+        assertEquals(0x00, init[5]);
+        assertEquals(0x00, init[6]);
+        assertEquals(0x00, init[7]);
+        assertEquals(0x09, init[8]); // 头长度 9
+        assertEquals(0x00, init[9]);
+        assertEquals(0x00, init[10]);
+        assertEquals(0x00, init[11]);
+        assertEquals(0x00, init[12]); // PreviousTagSize0
+
+        // 首个Tag应为视频Tag(AVC序列头)
+        int tagStart = 13; // FLV头后的位置
+        assertEquals(0x09, init[tagStart], "首个Tag应为视频类型(0x09)");
+        // 验证序列头标记
+        int payloadStart = tagStart + 11;
+        assertTrue(init.length > payloadStart + 2, "初始化段中的视频Tag的payload应存在");
+        assertEquals(0x17, init[payloadStart], "AVC序列头的FrameType+CodecID应为0x17");
+        assertEquals(0x00, init[payloadStart + 1], "AVCPacketType应为0x00(序列头)");
+
+        // 替代验证:使用初始化段再次校验序列头,不依赖内存缓冲
+        byte[] initAgain = flvStreamRelay.getChannelInitVideoSegment(channelId);
+        assertNotNull(initAgain);
+        assertTrue(initAgain.length > tagStart + 11 + 2, "初始化段应包含FLV头和至少一个视频序列头Tag");
+        assertEquals(0x09, initAgain[tagStart + 0], "初始化段首个Tag应为视频类型(0x09)");
+        int bufPayloadStart = 11;
+        assertEquals(0x17, initAgain[tagStart + bufPayloadStart], "序列头的FrameType+CodecID应为0x17");
+        assertEquals(0x00, initAgain[tagStart + bufPayloadStart + 1], "AVCPacketType应为0x00(序列头)");
+    }
+}

Some files were not shown because too many files changed in this diff