|
|||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | ||||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |
java.lang.Object | +--com.speech4j.sound.Util
Utility class
Method Summary | |
static double |
bytes2sec(javax.sound.sampled.AudioFormat format,
long size)
Converts length in bytes to length in seconds given particular AudioFormat |
static java.lang.String |
bytes2String(byte[] ba)
Converts byte array to human readable string by converting each byte to two chars, example 3D |
static byte[] |
convertSound(AudioBuffer buffer,
javax.sound.sampled.AudioFormat targetFormat)
|
static byte[] |
convertSound(byte[] sound,
javax.sound.sampled.AudioFormat sourceFormat,
javax.sound.sampled.AudioFormat targetFormat)
|
static byte[] |
convertSound(byte[] sound,
int offset,
int length,
javax.sound.sampled.AudioFormat sourceFormat,
javax.sound.sampled.AudioFormat targetFormat)
|
static void |
createWavHeader(java.io.DataOutput out,
int dataSize,
javax.sound.sampled.AudioFormat format)
|
static void |
createWavHeader(java.io.RandomAccessFile raf,
javax.sound.sampled.AudioFormat format)
|
static double |
fileLength(java.lang.String fileName)
Calculates duration of audio file |
static AudioBuffer |
merge(javax.sound.sampled.AudioFormat format,
AudioBuffer[] sourceBuffers)
Merges array of audio buffers into one audio buffer. |
static void |
merge(javax.sound.sampled.AudioFormat format,
AudioBuffer[] sourceBuffers,
java.lang.String fileName)
Merges array of AudioBuffers and outputs to file. |
static void |
merge(javax.sound.sampled.AudioFormat format,
java.lang.String[] sourceFiles,
java.lang.String fileName)
Merges array of audio files and outputs to file. |
static void |
mergeAudio(java.lang.String fileName,
javax.sound.sampled.AudioFormat format,
java.lang.String[] sourceFiles)
Merges audio files |
static java.lang.String |
randomString()
Generates pseudo-random string. |
static AudioBuffer |
silence(double sec,
javax.sound.sampled.AudioFormat format)
Generates AudioBuffer of silence. |
static byte[] |
string2Bytes(java.lang.String s)
Converts string to byte array. |
Methods inherited from class java.lang.Object |
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait |
Method Detail |
public static java.lang.String bytes2String(byte[] ba)
ba
- Array of bytes
public static byte[] string2Bytes(java.lang.String s)
s
- String where every byte is represented by 2 chars, example: 3D
public static void mergeAudio(java.lang.String fileName, javax.sound.sampled.AudioFormat format, java.lang.String[] sourceFiles) throws java.io.IOException, javax.sound.sampled.UnsupportedAudioFileException, SoundException
fileName
- Name of the output file.format
- Output format.sourceFiles
- Array of source files.
java.io.IOException
javax.sound.sampled.UnsupportedAudioFileException
SoundException
public static java.lang.String randomString()
public static AudioBuffer silence(double sec, javax.sound.sampled.AudioFormat format)
sec
- Duration of silence in secondsformat
- AudioFormat
public static double fileLength(java.lang.String fileName) throws java.io.IOException, javax.sound.sampled.UnsupportedAudioFileException
fileName
- Audio file
java.io.IOException
javax.sound.sampled.UnsupportedAudioFileException
public static double bytes2sec(javax.sound.sampled.AudioFormat format, long size)
format
- AudioFormatsize
- Size in bytes
public static AudioBuffer merge(javax.sound.sampled.AudioFormat format, AudioBuffer[] sourceBuffers) throws java.io.IOException, SoundException
format
parameter is null then format of the resulting
buffer is "maximum" of input buffers, for example if there are two input
buffers, one is 16kHz 16bit Stereo and the other is 44kHz 8bit Mono then the
resuting buffer will be 44kHz 16bit Stereo
format
- Format of output buffer. Can be null.sourceBuffers
- Array of input audio buffers.
java.io.IOException
SoundException
public static void merge(javax.sound.sampled.AudioFormat format, AudioBuffer[] sourceBuffers, java.lang.String fileName) throws java.io.IOException, SoundException
format
parameter is null then format of the resulting
buffer is "maximum" of input buffers, for example if there are two input
buffers, one is 16kHz 16bit Stereo and the other is 44kHz 8bit Mono then the
resuting buffer will be 44kHz 16bit Stereo
format
- Output format. Can be null.sourceBuffers
- input audio buffers.fileName
- output file name.
java.io.IOException
SoundException
public static void createWavHeader(java.io.RandomAccessFile raf, javax.sound.sampled.AudioFormat format) throws java.io.IOException
java.io.IOException
public static void createWavHeader(java.io.DataOutput out, int dataSize, javax.sound.sampled.AudioFormat format) throws java.io.IOException
java.io.IOException
public static void merge(javax.sound.sampled.AudioFormat format, java.lang.String[] sourceFiles, java.lang.String fileName) throws java.io.IOException, SoundException, javax.sound.sampled.UnsupportedAudioFileException
format
parameter is null then format of the resulting
buffer is "maximum" of input buffers, for example if there are two input
buffers, one is 16kHz 16bit Stereo and the other is 44kHz 8bit Mono then the
resuting buffer will be 44kHz 16bit Stereo
format
- Output format. Can be null.fileName
- output file name.
java.io.IOException
SoundException
javax.sound.sampled.UnsupportedAudioFileException
public static byte[] convertSound(AudioBuffer buffer, javax.sound.sampled.AudioFormat targetFormat) throws SoundException
SoundException
public static byte[] convertSound(byte[] sound, javax.sound.sampled.AudioFormat sourceFormat, javax.sound.sampled.AudioFormat targetFormat) throws SoundException
SoundException
public static byte[] convertSound(byte[] sound, int offset, int length, javax.sound.sampled.AudioFormat sourceFormat, javax.sound.sampled.AudioFormat targetFormat) throws SoundException
SoundException
|
|||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | ||||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |