double SineWave(unsigned long i,unsigned long T,unsigned long sr,double vol)
{
/*Sine curve*/
return sin(2*M_PI*(double)(T * i)/ (double)sr) * vol;
}
それを次のように修正する。時刻 i を必要としなくなり、代わりに静的変数として energy を用いる。double SineWave(unsigned long T,unsigned long sr,double vol)
{
/*Sine curve*/
static double energy;
energy += ((double)T / (double)sr);
energy = (energy - floor(energy));
return sin(2*M_PI*energy) * vol;
}
この2つのコードは、基本周波数が変動しないときには同じ値を返す。変更後の関数では基本周波数が変動しても波形が急激に変動することはない。よく考えれば当たり前の話であった。さらに子音の発音処理も追加して、以下のコードをかいた。
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#define PCM 1
#define BUFSIZE (1000)
#ifndef UCHAR_MAX
#define UCHAR_MAX (255)
#endif
#ifndef UCHAR_MIN
#define UCHAR_MIN (0)
#endif
#ifndef SHRT_MAX
#define SHRT_MAX (32767)
#endif
#ifndef SHRT_MIN
#define SHRT_MIN (-32768)
#endif
#ifndef _MAX_PATH
#define _MAX_PATH (255)
#endif
#define FORMANT_LENGTH (5)
#define IIR_LENGTH (3)
/*---------------------------*/
/* 発音ブロック データ構造体 */
/*---------------------------*/
typedef struct Voice_datum
{
double ffb[FORMANT_LENGTH];/*フォルマント(子音)*/
double volb;/*子音の音量*/
unsigned long lengthb[3];/*子音の入り方:フェードイン・長さ・フェードアウト*/
unsigned long lengthv[3];/*母音の入り方:空白・フェードイン・フォルマント移動帯*/
double ffv[FORMANT_LENGTH];/*フォルマント(母音)*/
double volv;/*母音の音量*/
double fc;/*基本周波数*/
unsigned long length;/*全声部の長さ*/
int conti;/*連結するかどうか。*/
}VoiceData;
typedef struct Speak_tuple
{
double fc;
double vol;
unsigned long length;
int vnum;
int cnum;
int conti;
}SpeakData;
/*-----------------------------*/
/* Waveファイル エラーチェック */
/*-----------------------------*/
void openerror(char *filename)
{
fprintf(stderr, "Can't open file: %s\n", filename);
exit(1);
}
void formaterror(FILE *fp)
{
fprintf(stderr, "File format error : %ld\n", ftell(fp));
exit(1);
}
short datacheck(short data, short max, short min)
{
// オーバーフローする場合 + か - を表示
if (data > max) {
fprintf(stderr, "+");
data = max;
}
if (data < min) {
fprintf(stderr, "-");
data = min;
}
return data;
}
/*--------------------------------------*/
/* 16 bit データ -32768~32767 (無音 0) */
/* 8 bit データ 0~255 (無音 128) */
/*--------------------------------------*/
void _write_1ch1byte(short *data, short *dummy,FILE *wav, unsigned long count)
{
for(;count>0;(count--,data++))
{
datacheck(*data, UCHAR_MAX, UCHAR_MIN);
fputc(*data, wav);
}
}
void _write_1ch2byte(short *data, short *dummy,FILE *wav, unsigned long count)
{
for(;count>0;(count--,data++))
{
datacheck(*data, SHRT_MAX, SHRT_MIN);
fwrite(data, sizeof(short), 1, wav);
}
}
void _write_2ch1byte(short *left, short *right, FILE *wav, unsigned long count)
{
for(;count>0;(count--,left++,right++))
{
datacheck(*left, UCHAR_MAX, UCHAR_MIN);
datacheck(*right, UCHAR_MAX, UCHAR_MIN);
fputc(*left, wav);
fputc(*right, wav);
}
}
void _write_2ch2byte(short *left, short *right, FILE *wav, unsigned long count)
{
for(;count>0;(count--,left++,right++))
{
datacheck(*left, SHRT_MAX, SHRT_MIN);
datacheck(*right, SHRT_MAX, SHRT_MIN);
fwrite(left, sizeof(short), 1, wav);
fwrite(right, sizeof(short), 1, wav);
}
}
/*----------------------*/
/* wav ファイル書き出し */
/*----------------------*/
void wavwrite(unsigned long length, short *chL, short *chR, char *filename, unsigned short ch, unsigned short bytes, unsigned long sr)
{
unsigned long file_size, var_long;
unsigned short var_short;
FILE *file_wave;
void (*datawrite[2][2])(short *left, short *right, FILE *wav, unsigned long count)=
{
{_write_1ch1byte,_write_1ch2byte},
{_write_2ch1byte,_write_2ch2byte}
};
file_size = length * ch * bytes;
if ((file_wave = fopen(filename, "wb")) == NULL)
{
openerror(filename);
}
fwrite("RIFF", 1, 4, file_wave);
/* ファイルサイズ */
var_long = file_size + 36; fwrite(&var_long, sizeof(long), 1, file_wave);
/* WAVEヘッダ */
fwrite("WAVE", 1, 4, file_wave);
/* chunkID (fmt チャンク) */
fwrite("fmt ", 1, 4, file_wave);
/* chunkSize (fmt チャンクのバイト数 無圧縮 wav は 16) */
var_long = 16; fwrite(&var_long, sizeof(long), 1, file_wave);
/* wFromatTag (無圧縮 PCM = 1) */
var_short = PCM; fwrite(&var_short, sizeof(short), 1, file_wave);
/* dwChannels (モノラル = 1, ステレオ = 2) */
fwrite(&ch, 2, 1, file_wave);
/* dwSamplesPerSec (サンプリングレート(Hz)) */
fwrite(&sr, sizeof(long), 1, file_wave);
/* wdAvgBytesPerSec (Byte/秒) */
var_long = bytes * ch * sr; fwrite(&var_long, sizeof(long), 1, file_wave);
/* wBlockAlign (Byte/サンプル*チャンネル) */
var_short = bytes * ch; fwrite(&var_short, sizeof(short), 1, file_wave);
/* wBitsPerSample (bit/サンプル) */
var_short = bytes * 8; fwrite(&var_short, sizeof(short), 1, file_wave);
/* chunkID (data チャンク) */
fwrite("data", 1, 4, file_wave);
/* chunkSize (データ長 Byte) */
fwrite(&file_size, 4, 1, file_wave);
/* ヘッダ (44 Byte) 書き込みここまで */
if (ftell(file_wave) != 44)
{
fprintf(stderr, "%s : wav header write error\n", filename);
exit(1);
}
/* waveformData (データ) 書き込み */
datawrite[ch-1][bytes-1](chL,chR,file_wave,length);
fclose(file_wave);
}
/*----------------------*/
/* 各種波形生成エンジン */
/*----------------------*/
double SineWave(unsigned long T,unsigned long sr,double vol)
{
/*Sine curve*/
static double energy;
energy += ((double)T / (double)sr);
energy = (energy - floor(energy));
return sin(2*M_PI*energy) * vol;
}
double Wave(unsigned long T,unsigned long sr,double vol)
{
/*Rosenburg波を出力する関数*/
static double energy;
double tau = 0.75; /*声門解放期における声門拡張期の割合*/
double tau2= 0.8; /*一連の運動における声門解放期の割合*/
energy += ((double)T / (double)sr);
energy = (energy - floor(energy));
if(energy <= tau*tau2){
return vol*( (3*energy*energy)/(tau*tau) - (2*energy*energy*energy)/(tau*tau*tau)-0.5);
}else if(energy <tau2){
return vol*(1 - (energy-tau)/((1-tau)*(1-tau))-0.5);
}else{
return vol*(-0.5);
}
}
double GaussNoise(double vol)
{
/*擬似的なノイズ出力*/
return vol* (double)(rand() + rand()) / (double)(RAND_MAX);
}
/*-------------------------*/
/* IIRフィルター作成・適用 */
/*-------------------------*/
void IIR_resonator(double fc, double Q, double a[], double b[])
{
fc = tan(M_PI * fc) / (2.0 * M_PI);
a[0] = 1.0 + 2.0 * M_PI * fc / Q + 4.0 * M_PI * M_PI * fc * fc;
a[1] = (8.0 * M_PI * M_PI * fc * fc - 2.0) / a[0];
a[2] = (1.0 - 2.0 * M_PI * fc / Q + 4.0 * M_PI * M_PI * fc * fc) / a[0];
b[0] = 2.0 * M_PI * fc / Q / a[0];
b[1] = 0.0;
b[2] = -2.0 * M_PI * fc / Q / a[0];
a[0] = 1.0;
}
void IIR_apply(double a[],double b[],double tap[],double output[],double base)
{
int k;
for(k=IIR_LENGTH-1;k>0;k--)
{
tap[k]= tap[k-1];
output[k]= output[k-1];
}
tap[0]=base;output[0] = 0;
for(k=0;k<IIR_LENGTH;k++)
{
output[0] += (b[k] * tap[k]);
}
for(k=1;k<IIR_LENGTH;k++)
{
output[0] -= a[k] * output[k];
}
}
/*--------------------*/
/* さまざまの補助関数 */
/*--------------------*/
double _slide(unsigned long a,unsigned long b,double x1,double x2)
{
double w;
if((b==0) || (a > b))
{
return x2;
}else{
w = ((double)a)/((double)b);
}
w = (w<0.5)? w*w*2 : -2 * w * (w - 2) -1;
//w = sin(M_PI * (w -0.5)) * 0.5 + 0.5;
return (1-w) * x1 + w * x2;
}
/*------------------*/
/* 音声生成エンジン */
/*------------------*/
double *Voice_production(unsigned long chunks,double *wave_output,unsigned long sr,VoiceData *Voice1)
{
unsigned long h,i,j;
double av[IIR_LENGTH],bv[IIR_LENGTH],ab[IIR_LENGTH],bb[IIR_LENGTH];
double voicev,tapv[FORMANT_LENGTH][IIR_LENGTH]={0},outputv[FORMANT_LENGTH][IIR_LENGTH]={0},basev;
double voiceb,tapb[FORMANT_LENGTH][IIR_LENGTH]={0},outputb[FORMANT_LENGTH][IIR_LENGTH]={0},baseb;
for(h=0;h<chunks;h++)
{
printf("開始(音声生成%d)\n",h);
for(i=0;i<Voice1[h].length;i++){
wave_output[i] = 0;
/*母音*/
if(i > Voice1[h].lengthv[0]){
if(i < (Voice1[h].lengthv[0] + Voice1[h].lengthv[1])){
basev = Wave(Voice1[h].fc,sr,_slide(i-Voice1[h].lengthv[0],(Voice1[h].lengthv[0] + Voice1[h].lengthv[1]),0,Voice1[h].volv));
}else if(/*(Voice1[h].conti !=0 ) &&*/(h+1<chunks) && (i > (Voice1[h].lengthv[0] + Voice1[h].lengthv[1]+ Voice1[h].lengthv[2]))){
basev = Wave(_slide(i-(Voice1[h].lengthv[0] + Voice1[h].lengthv[1] + Voice1[h].lengthv[2]),Voice1[h].length -(Voice1[h].lengthv[0] + Voice1[h].lengthv[1] + Voice1[h].lengthv[2]),Voice1[h].fc,Voice1[h+1].fc),sr,_slide(i-(Voice1[h].lengthv[0] + Voice1[h].lengthv[1] + Voice1[h].lengthv[2]),Voice1[h].length -(Voice1[h].lengthv[0] + Voice1[h].lengthv[1] + Voice1[h].lengthv[2]),Voice1[h].volv,Voice1[h+1].volv));
}else{
basev = Wave(Voice1[h].fc,sr,Voice1[h].volv);
}
voicev = 0;
for(j=0;j<3;j++){
if((Voice1[h].conti != 0) && (h+1 < chunks) && (i> Voice1[h].lengthv[0] + Voice1[h].lengthv[1] + Voice1[h].lengthv[2])){
IIR_resonator(_slide(i-(Voice1[h].lengthv[0] + Voice1[h].lengthv[1] + Voice1[h].lengthv[2] ),Voice1[h].length - (Voice1[h].lengthv[0] + Voice1[h].lengthv[1] + Voice1[h].lengthv[2]),Voice1[h].ffv[j],Voice1[h+1].ffv[j])/44100,20,av,bv);
}else{
IIR_resonator(Voice1[h].ffv[j]/44100,20,av,bv);
}
IIR_apply(av,bv,tapv[j],outputv[j],basev);
voicev += outputv[j][0];
}
if ((i > Voice1[h].lengthv[0])){
wave_output[i] += _slide(i,Voice1[h].lengthv[1],0,voicev)/3.0;
}else{
wave_output[i] += voicev /3.0;
}
}
/*子音*/
if(i < Voice1[h].lengthb[0] + Voice1[h].lengthb[1] + Voice1[h].lengthb[2]){
if(i< Voice1[h].lengthb[0]){
baseb = GaussNoise(_slide(i,Voice1[h].lengthb[0],0,Voice1[h].volb));
}else if(Voice1[h].lengthb[0] + Voice1[h].lengthb[1] < i){
baseb = GaussNoise(_slide(i - (Voice1[h].lengthb[0] + Voice1[h].lengthb[1]),Voice1[h].length - (Voice1[h].lengthb[0] + Voice1[h].lengthb[1]),Voice1[h].volb,0));
}else{
baseb = GaussNoise(Voice1[h].volb);
}
voiceb = 0;
for(j=0;j<3;j++){
IIR_resonator(Voice1[h].ffb[j]/44100,7,ab,bb);
IIR_apply(ab,bb,tapb[j],outputb[j],baseb);
voiceb += outputb[j][0];
}
wave_output[i] +=voiceb/3.0;
}
}
wave_output+=i;
}
printf("実行終了(音声生成)\n");
return wave_output;
}
/*----------------------*/
/* 母音と子音の列から、 */
/* 発音ブロック列を作る */
/*----------------------*/
VoiceData *formants(unsigned long chunks,VoiceData *voices,unsigned long sr,SpeakData *Speak1){
unsigned long h,i;
double ffv[][FORMANT_LENGTH]=
{
//a/0
{800,1300,2500,3500,4500},
//i/1
{250,1900,2600,3500,4500},
//u/2
{400,1400,2200,3500,4500},
//e/3
{450,1800,2400,3500,4500},
//o/4
{450,900,2600,3500,4500},
};
VoiceData consonant[]=
{
///00
{{0},0.0,{0},{0,0,500},{0},1,1,0,1},
//k/01
{{800,1100,1400,3500,4500},0.8,{200,0,2200},{2200,200,0},{0},1,1,0,1},
//kh/02
{{800,3200,3700,3500,4500},0.8,{0,0,2200},{1600,800,0},{0},1,1,0,1},
//k,/03
{{400,2300,4600,3500,4500},0.8,{0,0,2400},{1800,600,0},{0},1,1,0,1},
//s/04
{{250,400,4500,3500,4500},0.5,{500,0,5500},{4000,2000,0},{0},1,1,0,1},
//sh/05
{{500,2100,4300,3500,4500},0.6,{500,0,5500},{4000,2000,0},{0},1,1,0,1},
//t/06
{{500,2100,4300,3500,4500},1.0,{0,200,400},{500,400,0},{0},1,1,0,1},
//ch/07
{{3900,4900,5400,3500,4500},0.6,{0,0,2000},{1500,500,0},{0},1,1,0,1},
//ts/08
{{1500,4000,4700,3500,4500},0.6,{0,100,1000},{900,500,0},{0},1,1,0,1},
//h/09
{{250,10,1400,3500,4500},0.6,{500,1000,3000},{3500,500,0},{0},1,1,0,1},
//h,/10
{{250,400,4500,3500,4500},0.5,{1000,800,3000},{3500,500,0},{0},1,1,0,1},
//y/11
{{0},0,{0},{0,300,0},{200,2100,3100,3500,4500},0.6,1,3000,1},
//m/12
{{0},0,{0},{300,900,300},{200,250,300, 3500, 4500},0.4,1,1500,1},
//w/13
{{0},0,{0},{0,1500,1500},{400,800,2200,3500,4500},1.0,1,3000,1},
//r/14
{{0},0,{0},{0,2000,0},{300,2600,4900,3500,4500},0.8,1,3000,1},
//rj/15
{{0},0,{0},{0,1200,1200},{300,2600,4900,3500,4500},0.8,1,2400,1},
//n/16
{{0},0,{0},{500,1000,0},{250,1400,2500,3500,4500},0.3,1,3000,1},
//b/17
{{200,250,400,3500,4500},0.3,{500,0,1000},{1000,1000,0},{500,1300,2500,3500,4500},0.8,1,2000,1},
//z/18
{{150,250,4900,3500,4500},0.6,{500,0,2000},{1000,1000,0},{500,1300,2500,3500,4500},0.8,1,3000,1},
//dg/19
{{150,3600,4500,3500,4500},0.6,{500,0,2000},{1000,1000,0},{400,2000,2800,3500,4500},0.6,1,2500,1},
};
for(h=0;h<chunks;h++)
{
*voices = consonant[Speak1[h].cnum];
if(Speak1[h].cnum > 10)
{
(*voices).volb *= Speak1[h].vol;
(*voices).volv *= Speak1[h].vol;
(*voices).fc *= Speak1[h].fc;
(*voices).conti = 1;
Speak1[h].length -= (*voices).length;
voices++;
*voices = consonant[0];
}
for(i=0;i < FORMANT_LENGTH;i++)
{
(*voices).ffv[i] = ffv[Speak1[h].vnum][i];
}
(*voices).volb *= Speak1[h].vol;
(*voices).volv *= Speak1[h].vol;
(*voices).fc *= Speak1[h].fc;
(*voices).conti *= Speak1[h].conti;
(*voices).length = Speak1[h].length;
(*voices).lengthv[2]=(Speak1[h].length-(1500+(*voices).lengthv[0] + (*voices).lengthv[1]))*0.5;
voices++;
}
return voices;
}
/*----------------------*/
/* アルファベット列から */
/* 母音と子音の列を作る */
/*----------------------*/
/*
SpeakData *makespeech(SpeakData *speech,char str1[])
{
}
*/
/*----------*/
/* main関数 */
/*----------*/
int main(void)
{
unsigned long i,j,k;
short *chL,*chR;
double *chwaveL,*chwaveR;
unsigned long sr = 44100;
unsigned short bytes = 2;
unsigned short ch = 1;
unsigned long points;
VoiceData voices[10],*ptr;
SpeakData speech[10]={
{65*2,18000,8000,0,0,1},
{75*2,26000,8000,3,0,1},
{80*2,22000,8000,1,0,1},
{50*2,20000,8000,2,0,1},
{65*2,18000,8000,3,0,1},
{75*2,26000,8000,4,0,1},
{80*2,22000,8000,0,0,1},
{50*2,20000,8000,4,0,1},
};
ptr = formants(8,voices,sr,speech);
/*サウンド生成*/
points=64000;
chwaveL = (double *)calloc(points,sizeof(double));
chL = (short *)calloc(points,sizeof(short));
printf("実行終了(準備)\n");
Voice_production(8,chwaveL,sr,voices);
for(i=0;i<points;i++){
chL[i] = (short)floor(chwaveL[i]);
}
wavwrite(points,chL,chR ,"output.wav",ch,bytes,sr);
free(chwaveL);
free(chL);
return 0;
}
このコードをコンパイルして実行すると、「あえいうえおあお」としゃべるWAVEファイルが出力される。変数speechを変更することでさまざまな値をしゃべらせることができる。詳しい解説は完成版ができたときにまとめて行う。