forked from xiaozhi/xiaozhi-esp32
Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e1e5387a78 | ||
|
|
dfd3069ee9 | ||
|
|
1619217bd9 | ||
|
|
023dd7fb27 | ||
|
|
3efef0cf20 | ||
|
|
80e02d7c70 | ||
|
|
8e2cf90d86 | ||
|
|
34ab004c38 | ||
|
|
11c79bf086 |
@@ -4,7 +4,7 @@
|
||||
# CMakeLists in this exact order for cmake to work correctly
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
|
||||
set(PROJECT_VER "1.5.0")
|
||||
set(PROJECT_VER "1.5.1")
|
||||
|
||||
# Add this line to disable the specific warning
|
||||
add_compile_options(-Wno-missing-field-initializers)
|
||||
|
||||
@@ -223,14 +223,14 @@ config USE_WECHAT_MESSAGE_STYLE
|
||||
config USE_AUDIO_PROCESSOR
|
||||
bool "启用音频降噪、增益处理"
|
||||
default y
|
||||
depends on IDF_TARGET_ESP32S3 && USE_AFE
|
||||
depends on IDF_TARGET_ESP32S3 && SPIRAM
|
||||
help
|
||||
需要 ESP32 S3 与 AFE 支持
|
||||
|
||||
config USE_WAKE_WORD_DETECT
|
||||
bool "启用唤醒词检测"
|
||||
default y
|
||||
depends on IDF_TARGET_ESP32S3 && USE_AFE
|
||||
depends on IDF_TARGET_ESP32S3 && SPIRAM
|
||||
help
|
||||
需要 ESP32 S3 与 AFE 支持
|
||||
endmenu
|
||||
|
||||
@@ -484,13 +484,9 @@ void Application::Start() {
|
||||
});
|
||||
});
|
||||
});
|
||||
#endif
|
||||
|
||||
#if CONFIG_USE_WAKE_WORD_DETECT
|
||||
wake_word_detect_.Initialize(codec->input_channels(), codec->input_reference());
|
||||
wake_word_detect_.OnVadStateChange([this](bool speaking) {
|
||||
Schedule([this, speaking]() {
|
||||
if (device_state_ == kDeviceStateListening) {
|
||||
audio_processor_.OnVadStateChange([this](bool speaking) {
|
||||
if (device_state_ == kDeviceStateListening) {
|
||||
Schedule([this, speaking]() {
|
||||
if (speaking) {
|
||||
voice_detected_ = true;
|
||||
} else {
|
||||
@@ -498,10 +494,13 @@ void Application::Start() {
|
||||
}
|
||||
auto led = Board::GetInstance().GetLed();
|
||||
led->OnStateChanged();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
#endif
|
||||
|
||||
#if CONFIG_USE_WAKE_WORD_DETECT
|
||||
wake_word_detect_.Initialize(codec->input_channels(), codec->input_reference());
|
||||
wake_word_detect_.OnWakeWordDetected([this](const std::string& wake_word) {
|
||||
Schedule([this, &wake_word]() {
|
||||
if (device_state_ == kDeviceStateIdle) {
|
||||
@@ -528,9 +527,6 @@ void Application::Start() {
|
||||
} else if (device_state_ == kDeviceStateActivating) {
|
||||
SetDeviceState(kDeviceStateIdle);
|
||||
}
|
||||
|
||||
// Resume detection
|
||||
wake_word_detect_.StartDetection();
|
||||
});
|
||||
});
|
||||
wake_word_detect_.StartDetection();
|
||||
@@ -738,6 +734,9 @@ void Application::SetDeviceState(DeviceState state) {
|
||||
display->SetEmotion("neutral");
|
||||
#if CONFIG_USE_AUDIO_PROCESSOR
|
||||
audio_processor_.Stop();
|
||||
#endif
|
||||
#if CONFIG_USE_WAKE_WORD_DETECT
|
||||
wake_word_detect_.StartDetection();
|
||||
#endif
|
||||
break;
|
||||
case kDeviceStateConnecting:
|
||||
@@ -752,6 +751,9 @@ void Application::SetDeviceState(DeviceState state) {
|
||||
opus_encoder_->ResetState();
|
||||
#if CONFIG_USE_AUDIO_PROCESSOR
|
||||
audio_processor_.Start();
|
||||
#endif
|
||||
#if CONFIG_USE_WAKE_WORD_DETECT
|
||||
wake_word_detect_.StopDetection();
|
||||
#endif
|
||||
UpdateIotStates();
|
||||
if (previous_state == kDeviceStateSpeaking) {
|
||||
@@ -765,6 +767,9 @@ void Application::SetDeviceState(DeviceState state) {
|
||||
codec->EnableOutput(true);
|
||||
#if CONFIG_USE_AUDIO_PROCESSOR
|
||||
audio_processor_.Stop();
|
||||
#endif
|
||||
#if CONFIG_USE_WAKE_WORD_DETECT
|
||||
wake_word_detect_.StartDetection();
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
|
||||
BIN
main/assets/ja-JP/upgrade.p3
Normal file
BIN
main/assets/ja-JP/upgrade.p3
Normal file
Binary file not shown.
@@ -57,6 +57,10 @@ IRAM_ATTR bool AudioCodec::on_recv(i2s_chan_handle_t handle, i2s_event_data_t *e
|
||||
void AudioCodec::Start() {
|
||||
Settings settings("audio", false);
|
||||
output_volume_ = settings.GetInt("output_volume", output_volume_);
|
||||
if (output_volume_ <= 0) {
|
||||
ESP_LOGW(TAG, "Output volume value (%d) is too small, setting to default (10)", output_volume_);
|
||||
output_volume_ = 10;
|
||||
}
|
||||
|
||||
// 注册音频数据回调
|
||||
i2s_event_callbacks_t rx_callbacks = {};
|
||||
@@ -72,6 +76,7 @@ void AudioCodec::Start() {
|
||||
|
||||
EnableInput(true);
|
||||
EnableOutput(true);
|
||||
ESP_LOGI(TAG, "Audio codec started");
|
||||
}
|
||||
|
||||
void AudioCodec::SetOutputVolume(int volume) {
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
static const char* TAG = "AudioProcessor";
|
||||
|
||||
AudioProcessor::AudioProcessor()
|
||||
: afe_communication_data_(nullptr) {
|
||||
: afe_data_(nullptr) {
|
||||
event_group_ = xEventGroupCreate();
|
||||
}
|
||||
|
||||
@@ -15,50 +15,41 @@ void AudioProcessor::Initialize(int channels, bool reference) {
|
||||
reference_ = reference;
|
||||
int ref_num = reference_ ? 1 : 0;
|
||||
|
||||
afe_config_t afe_config = {
|
||||
.aec_init = false,
|
||||
.se_init = true,
|
||||
.vad_init = false,
|
||||
.wakenet_init = false,
|
||||
.voice_communication_init = true,
|
||||
.voice_communication_agc_init = true,
|
||||
.voice_communication_agc_gain = 10,
|
||||
.vad_mode = VAD_MODE_3,
|
||||
.wakenet_model_name = NULL,
|
||||
.wakenet_model_name_2 = NULL,
|
||||
.wakenet_mode = DET_MODE_90,
|
||||
.afe_mode = SR_MODE_HIGH_PERF,
|
||||
.afe_perferred_core = 1,
|
||||
.afe_perferred_priority = 1,
|
||||
.afe_ringbuf_size = 50,
|
||||
.memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM,
|
||||
.afe_linear_gain = 1.0,
|
||||
.agc_mode = AFE_MN_PEAK_AGC_MODE_2,
|
||||
.pcm_config = {
|
||||
.total_ch_num = channels_,
|
||||
.mic_num = channels_ - ref_num,
|
||||
.ref_num = ref_num,
|
||||
.sample_rate = 16000,
|
||||
},
|
||||
.debug_init = false,
|
||||
.debug_hook = {{ AFE_DEBUG_HOOK_MASE_TASK_IN, NULL }, { AFE_DEBUG_HOOK_FETCH_TASK_IN, NULL }},
|
||||
.afe_ns_mode = NS_MODE_SSP,
|
||||
.afe_ns_model_name = NULL,
|
||||
.fixed_first_channel = true,
|
||||
};
|
||||
std::string input_format;
|
||||
for (int i = 0; i < channels_ - ref_num; i++) {
|
||||
input_format.push_back('M');
|
||||
}
|
||||
for (int i = 0; i < ref_num; i++) {
|
||||
input_format.push_back('R');
|
||||
}
|
||||
|
||||
afe_communication_data_ = esp_afe_vc_v1.create_from_config(&afe_config);
|
||||
afe_config_t* afe_config = afe_config_init(input_format.c_str(), NULL, AFE_TYPE_VC, AFE_MODE_HIGH_PERF);
|
||||
afe_config->aec_init = false;
|
||||
afe_config->aec_mode = AEC_MODE_VOIP_HIGH_PERF;
|
||||
afe_config->ns_init = true;
|
||||
afe_config->vad_init = true;
|
||||
afe_config->vad_mode = VAD_MODE_0;
|
||||
afe_config->vad_min_noise_ms = 100;
|
||||
afe_config->afe_perferred_core = 1;
|
||||
afe_config->afe_perferred_priority = 1;
|
||||
afe_config->agc_init = true;
|
||||
afe_config->agc_mode = AFE_AGC_MODE_WEBRTC;
|
||||
afe_config->agc_compression_gain_db = 10;
|
||||
afe_config->memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM;
|
||||
|
||||
afe_iface_ = esp_afe_handle_from_config(afe_config);
|
||||
afe_data_ = afe_iface_->create_from_config(afe_config);
|
||||
|
||||
xTaskCreate([](void* arg) {
|
||||
auto this_ = (AudioProcessor*)arg;
|
||||
this_->AudioProcessorTask();
|
||||
vTaskDelete(NULL);
|
||||
}, "audio_communication", 4096 * 2, this, 3, NULL);
|
||||
}, "audio_communication", 4096, this, 3, NULL);
|
||||
}
|
||||
|
||||
AudioProcessor::~AudioProcessor() {
|
||||
if (afe_communication_data_ != nullptr) {
|
||||
esp_afe_vc_v1.destroy(afe_communication_data_);
|
||||
if (afe_data_ != nullptr) {
|
||||
afe_iface_->destroy(afe_data_);
|
||||
}
|
||||
vEventGroupDelete(event_group_);
|
||||
}
|
||||
@@ -66,10 +57,10 @@ AudioProcessor::~AudioProcessor() {
|
||||
void AudioProcessor::Input(const std::vector<int16_t>& data) {
|
||||
input_buffer_.insert(input_buffer_.end(), data.begin(), data.end());
|
||||
|
||||
auto feed_size = esp_afe_vc_v1.get_feed_chunksize(afe_communication_data_) * channels_;
|
||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_) * channels_;
|
||||
while (input_buffer_.size() >= feed_size) {
|
||||
auto chunk = input_buffer_.data();
|
||||
esp_afe_vc_v1.feed(afe_communication_data_, chunk);
|
||||
afe_iface_->feed(afe_data_, chunk);
|
||||
input_buffer_.erase(input_buffer_.begin(), input_buffer_.begin() + feed_size);
|
||||
}
|
||||
}
|
||||
@@ -80,6 +71,7 @@ void AudioProcessor::Start() {
|
||||
|
||||
void AudioProcessor::Stop() {
|
||||
xEventGroupClearBits(event_group_, PROCESSOR_RUNNING);
|
||||
afe_iface_->reset_buffer(afe_data_);
|
||||
}
|
||||
|
||||
bool AudioProcessor::IsRunning() {
|
||||
@@ -90,16 +82,20 @@ void AudioProcessor::OnOutput(std::function<void(std::vector<int16_t>&& data)> c
|
||||
output_callback_ = callback;
|
||||
}
|
||||
|
||||
void AudioProcessor::OnVadStateChange(std::function<void(bool speaking)> callback) {
|
||||
vad_state_change_callback_ = callback;
|
||||
}
|
||||
|
||||
void AudioProcessor::AudioProcessorTask() {
|
||||
auto fetch_size = esp_afe_sr_v1.get_fetch_chunksize(afe_communication_data_);
|
||||
auto feed_size = esp_afe_sr_v1.get_feed_chunksize(afe_communication_data_);
|
||||
auto fetch_size = afe_iface_->get_fetch_chunksize(afe_data_);
|
||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_);
|
||||
ESP_LOGI(TAG, "Audio communication task started, feed size: %d fetch size: %d",
|
||||
feed_size, fetch_size);
|
||||
|
||||
while (true) {
|
||||
xEventGroupWaitBits(event_group_, PROCESSOR_RUNNING, pdFALSE, pdTRUE, portMAX_DELAY);
|
||||
|
||||
auto res = esp_afe_vc_v1.fetch(afe_communication_data_);
|
||||
auto res = afe_iface_->fetch_with_delay(afe_data_, portMAX_DELAY);
|
||||
if ((xEventGroupGetBits(event_group_) & PROCESSOR_RUNNING) == 0) {
|
||||
continue;
|
||||
}
|
||||
@@ -110,6 +106,17 @@ void AudioProcessor::AudioProcessorTask() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// VAD state change
|
||||
if (vad_state_change_callback_) {
|
||||
if (res->vad_state == VAD_SPEECH && !is_speaking_) {
|
||||
is_speaking_ = true;
|
||||
vad_state_change_callback_(true);
|
||||
} else if (res->vad_state == VAD_SILENCE && is_speaking_) {
|
||||
is_speaking_ = false;
|
||||
vad_state_change_callback_(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (output_callback_) {
|
||||
output_callback_(std::vector<int16_t>(res->data, res->data + res->data_size / sizeof(int16_t)));
|
||||
}
|
||||
|
||||
@@ -21,14 +21,18 @@ public:
|
||||
void Stop();
|
||||
bool IsRunning();
|
||||
void OnOutput(std::function<void(std::vector<int16_t>&& data)> callback);
|
||||
void OnVadStateChange(std::function<void(bool speaking)> callback);
|
||||
|
||||
private:
|
||||
EventGroupHandle_t event_group_ = nullptr;
|
||||
esp_afe_sr_data_t* afe_communication_data_ = nullptr;
|
||||
esp_afe_sr_iface_t* afe_iface_ = nullptr;
|
||||
esp_afe_sr_data_t* afe_data_ = nullptr;
|
||||
std::vector<int16_t> input_buffer_;
|
||||
std::function<void(std::vector<int16_t>&& data)> output_callback_;
|
||||
std::function<void(bool speaking)> vad_state_change_callback_;
|
||||
int channels_;
|
||||
bool reference_;
|
||||
bool is_speaking_ = false;
|
||||
|
||||
void AudioProcessorTask();
|
||||
};
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
static const char* TAG = "WakeWordDetect";
|
||||
|
||||
WakeWordDetect::WakeWordDetect()
|
||||
: afe_detection_data_(nullptr),
|
||||
: afe_data_(nullptr),
|
||||
wake_word_pcm_(),
|
||||
wake_word_opus_() {
|
||||
|
||||
@@ -19,8 +19,8 @@ WakeWordDetect::WakeWordDetect()
|
||||
}
|
||||
|
||||
WakeWordDetect::~WakeWordDetect() {
|
||||
if (afe_detection_data_ != nullptr) {
|
||||
esp_afe_sr_v1.destroy(afe_detection_data_);
|
||||
if (afe_data_ != nullptr) {
|
||||
afe_iface_->destroy(afe_data_);
|
||||
}
|
||||
|
||||
if (wake_word_encode_task_stack_ != nullptr) {
|
||||
@@ -50,61 +50,41 @@ void WakeWordDetect::Initialize(int channels, bool reference) {
|
||||
}
|
||||
}
|
||||
|
||||
afe_config_t afe_config = {
|
||||
.aec_init = reference_,
|
||||
.se_init = true,
|
||||
.vad_init = true,
|
||||
.wakenet_init = true,
|
||||
.voice_communication_init = false,
|
||||
.voice_communication_agc_init = false,
|
||||
.voice_communication_agc_gain = 10,
|
||||
.vad_mode = VAD_MODE_3,
|
||||
.wakenet_model_name = wakenet_model_,
|
||||
.wakenet_model_name_2 = NULL,
|
||||
.wakenet_mode = DET_MODE_90,
|
||||
.afe_mode = SR_MODE_HIGH_PERF,
|
||||
.afe_perferred_core = 1,
|
||||
.afe_perferred_priority = 1,
|
||||
.afe_ringbuf_size = 50,
|
||||
.memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM,
|
||||
.afe_linear_gain = 1.0,
|
||||
.agc_mode = AFE_MN_PEAK_AGC_MODE_2,
|
||||
.pcm_config = {
|
||||
.total_ch_num = channels_,
|
||||
.mic_num = channels_ - ref_num,
|
||||
.ref_num = ref_num,
|
||||
.sample_rate = 16000
|
||||
},
|
||||
.debug_init = false,
|
||||
.debug_hook = {{ AFE_DEBUG_HOOK_MASE_TASK_IN, NULL }, { AFE_DEBUG_HOOK_FETCH_TASK_IN, NULL }},
|
||||
.afe_ns_mode = NS_MODE_SSP,
|
||||
.afe_ns_model_name = NULL,
|
||||
.fixed_first_channel = true,
|
||||
};
|
||||
|
||||
afe_detection_data_ = esp_afe_sr_v1.create_from_config(&afe_config);
|
||||
std::string input_format;
|
||||
for (int i = 0; i < channels_ - ref_num; i++) {
|
||||
input_format.push_back('M');
|
||||
}
|
||||
for (int i = 0; i < ref_num; i++) {
|
||||
input_format.push_back('R');
|
||||
}
|
||||
afe_config_t* afe_config = afe_config_init(input_format.c_str(), models, AFE_TYPE_SR, AFE_MODE_HIGH_PERF);
|
||||
afe_config->aec_init = reference_;
|
||||
afe_config->aec_mode = AEC_MODE_SR_HIGH_PERF;
|
||||
afe_config->afe_perferred_core = 1;
|
||||
afe_config->afe_perferred_priority = 1;
|
||||
afe_config->memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM;
|
||||
|
||||
afe_iface_ = esp_afe_handle_from_config(afe_config);
|
||||
afe_data_ = afe_iface_->create_from_config(afe_config);
|
||||
|
||||
xTaskCreate([](void* arg) {
|
||||
auto this_ = (WakeWordDetect*)arg;
|
||||
this_->AudioDetectionTask();
|
||||
vTaskDelete(NULL);
|
||||
}, "audio_detection", 4096 * 2, this, 3, nullptr);
|
||||
}, "audio_detection", 4096, this, 3, nullptr);
|
||||
}
|
||||
|
||||
void WakeWordDetect::OnWakeWordDetected(std::function<void(const std::string& wake_word)> callback) {
|
||||
wake_word_detected_callback_ = callback;
|
||||
}
|
||||
|
||||
void WakeWordDetect::OnVadStateChange(std::function<void(bool speaking)> callback) {
|
||||
vad_state_change_callback_ = callback;
|
||||
}
|
||||
|
||||
void WakeWordDetect::StartDetection() {
|
||||
xEventGroupSetBits(event_group_, DETECTION_RUNNING_EVENT);
|
||||
}
|
||||
|
||||
void WakeWordDetect::StopDetection() {
|
||||
xEventGroupClearBits(event_group_, DETECTION_RUNNING_EVENT);
|
||||
afe_iface_->reset_buffer(afe_data_);
|
||||
}
|
||||
|
||||
bool WakeWordDetect::IsDetectionRunning() {
|
||||
@@ -114,23 +94,23 @@ bool WakeWordDetect::IsDetectionRunning() {
|
||||
void WakeWordDetect::Feed(const std::vector<int16_t>& data) {
|
||||
input_buffer_.insert(input_buffer_.end(), data.begin(), data.end());
|
||||
|
||||
auto feed_size = esp_afe_sr_v1.get_feed_chunksize(afe_detection_data_) * channels_;
|
||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_) * channels_;
|
||||
while (input_buffer_.size() >= feed_size) {
|
||||
esp_afe_sr_v1.feed(afe_detection_data_, input_buffer_.data());
|
||||
afe_iface_->feed(afe_data_, input_buffer_.data());
|
||||
input_buffer_.erase(input_buffer_.begin(), input_buffer_.begin() + feed_size);
|
||||
}
|
||||
}
|
||||
|
||||
void WakeWordDetect::AudioDetectionTask() {
|
||||
auto fetch_size = esp_afe_sr_v1.get_fetch_chunksize(afe_detection_data_);
|
||||
auto feed_size = esp_afe_sr_v1.get_feed_chunksize(afe_detection_data_);
|
||||
auto fetch_size = afe_iface_->get_fetch_chunksize(afe_data_);
|
||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_);
|
||||
ESP_LOGI(TAG, "Audio detection task started, feed size: %d fetch size: %d",
|
||||
feed_size, fetch_size);
|
||||
|
||||
while (true) {
|
||||
xEventGroupWaitBits(event_group_, DETECTION_RUNNING_EVENT, pdFALSE, pdTRUE, portMAX_DELAY);
|
||||
|
||||
auto res = esp_afe_sr_v1.fetch(afe_detection_data_);
|
||||
auto res = afe_iface_->fetch_with_delay(afe_data_, portMAX_DELAY);
|
||||
if (res == nullptr || res->ret_value == ESP_FAIL) {
|
||||
continue;;
|
||||
}
|
||||
@@ -138,17 +118,6 @@ void WakeWordDetect::AudioDetectionTask() {
|
||||
// Store the wake word data for voice recognition, like who is speaking
|
||||
StoreWakeWordData((uint16_t*)res->data, res->data_size / sizeof(uint16_t));
|
||||
|
||||
// VAD state change
|
||||
if (vad_state_change_callback_) {
|
||||
if (res->vad_state == AFE_VAD_SPEECH && !is_speaking_) {
|
||||
is_speaking_ = true;
|
||||
vad_state_change_callback_(true);
|
||||
} else if (res->vad_state == AFE_VAD_SILENCE && is_speaking_) {
|
||||
is_speaking_ = false;
|
||||
vad_state_change_callback_(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (res->wakeup_state == WAKENET_DETECTED) {
|
||||
StopDetection();
|
||||
last_detected_wake_word_ = wake_words_[res->wake_word_index - 1];
|
||||
|
||||
@@ -24,7 +24,6 @@ public:
|
||||
void Initialize(int channels, bool reference);
|
||||
void Feed(const std::vector<int16_t>& data);
|
||||
void OnWakeWordDetected(std::function<void(const std::string& wake_word)> callback);
|
||||
void OnVadStateChange(std::function<void(bool speaking)> callback);
|
||||
void StartDetection();
|
||||
void StopDetection();
|
||||
bool IsDetectionRunning();
|
||||
@@ -33,14 +32,13 @@ public:
|
||||
const std::string& GetLastDetectedWakeWord() const { return last_detected_wake_word_; }
|
||||
|
||||
private:
|
||||
esp_afe_sr_data_t* afe_detection_data_ = nullptr;
|
||||
esp_afe_sr_iface_t* afe_iface_ = nullptr;
|
||||
esp_afe_sr_data_t* afe_data_ = nullptr;
|
||||
char* wakenet_model_ = NULL;
|
||||
std::vector<std::string> wake_words_;
|
||||
std::vector<int16_t> input_buffer_;
|
||||
EventGroupHandle_t event_group_;
|
||||
std::function<void(const std::string& wake_word)> wake_word_detected_callback_;
|
||||
std::function<void(bool speaking)> vad_state_change_callback_;
|
||||
bool is_speaking_ = false;
|
||||
int channels_;
|
||||
bool reference_;
|
||||
std::string last_detected_wake_word_;
|
||||
|
||||
@@ -31,8 +31,16 @@ Backlight::~Backlight() {
|
||||
|
||||
void Backlight::RestoreBrightness() {
|
||||
// Load brightness from settings
|
||||
Settings settings("display");
|
||||
SetBrightness(settings.GetInt("brightness", 75));
|
||||
Settings settings("display");
|
||||
int saved_brightness = settings.GetInt("brightness", 75);
|
||||
|
||||
// 检查亮度值是否为0或过小,设置默认值
|
||||
if (saved_brightness <= 0) {
|
||||
ESP_LOGW(TAG, "Brightness value (%d) is too small, setting to default (10)", saved_brightness);
|
||||
saved_brightness = 10; // 设置一个较低的默认值
|
||||
}
|
||||
|
||||
SetBrightness(saved_brightness);
|
||||
}
|
||||
|
||||
void Backlight::SetBrightness(uint8_t brightness, bool permanent) {
|
||||
|
||||
@@ -7,8 +7,6 @@
|
||||
#define AUDIO_INPUT_SAMPLE_RATE 16000
|
||||
#define AUDIO_OUTPUT_SAMPLE_RATE 16000
|
||||
|
||||
#define AUDIO_INPUT_REFERENCE false
|
||||
|
||||
#define AUDIO_I2S_GPIO_MCLK GPIO_NUM_45
|
||||
#define AUDIO_I2S_GPIO_WS GPIO_NUM_41
|
||||
#define AUDIO_I2S_GPIO_BCLK GPIO_NUM_39
|
||||
|
||||
@@ -9,3 +9,5 @@
|
||||
"CONFIG_LCD_ST7735_128X128=y"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -29,7 +29,29 @@ LV_FONT_DECLARE(font_awesome_30_4);
|
||||
class Pmic : public Axp2101 {
|
||||
public:
|
||||
Pmic(i2c_master_bus_handle_t i2c_bus, uint8_t addr) : Axp2101(i2c_bus, addr) {
|
||||
// TODO: Configure the power management IC here...
|
||||
WriteReg(0x22, 0b110); // PWRON > OFFLEVEL as POWEROFF Source enable
|
||||
WriteReg(0x27, 0x10); // hold 4s to power off
|
||||
|
||||
// Disable All DCs but DC1
|
||||
WriteReg(0x80, 0x01);
|
||||
// Disable All LDOs
|
||||
WriteReg(0x90, 0x00);
|
||||
WriteReg(0x91, 0x00);
|
||||
|
||||
// Set DC1 to 3.3V
|
||||
WriteReg(0x82, (3300 - 1500) / 100);
|
||||
|
||||
// Set ALDO1 to 3.3V
|
||||
WriteReg(0x92, (3300 - 500) / 100);
|
||||
|
||||
// Enable ALDO1(MIC)
|
||||
WriteReg(0x90, 0x01);
|
||||
|
||||
WriteReg(0x64, 0x02); // CV charger voltage setting to 4.1V
|
||||
|
||||
WriteReg(0x61, 0x02); // set Main battery precharge current to 50mA
|
||||
WriteReg(0x62, 0x08); // set Main battery charger current to 400mA ( 0x08-200mA, 0x09-300mA, 0x0A-400mA )
|
||||
WriteReg(0x63, 0x01); // set Main battery term charge current to 25mA
|
||||
}
|
||||
};
|
||||
|
||||
@@ -45,8 +67,7 @@ static const sh8601_lcd_init_cmd_t vendor_specific_init[] = {
|
||||
{0x2A, (uint8_t[]){0x00, 0x00, 0x01, 0x6F}, 4, 0},
|
||||
{0x2B, (uint8_t[]){0x00, 0x00, 0x01, 0xBF}, 4, 0},
|
||||
{0x51, (uint8_t[]){0x00}, 1, 10},
|
||||
{0x29, (uint8_t[]){0x00}, 0, 10},
|
||||
{0x51, (uint8_t[]){0xFF}, 1, 0},
|
||||
{0x29, (uint8_t[]){0x00}, 0, 10}
|
||||
};
|
||||
|
||||
// 在waveshare_amoled_1_8类之前添加新的显示类
|
||||
@@ -82,6 +103,8 @@ protected:
|
||||
esp_lcd_panel_io_handle_t panel_io_;
|
||||
|
||||
virtual void SetBrightnessImpl(uint8_t brightness) override {
|
||||
auto display = Board::GetInstance().GetDisplay();
|
||||
DisplayLockGuard lock(display);
|
||||
uint8_t data[1] = {((uint8_t)((255 * brightness) / 100))};
|
||||
int lcd_cmd = 0x51;
|
||||
lcd_cmd &= 0xff;
|
||||
@@ -108,7 +131,7 @@ private:
|
||||
auto display = GetDisplay();
|
||||
display->SetChatMessage("system", "");
|
||||
display->SetEmotion("sleepy");
|
||||
GetBacklight()->SetBrightness(10);
|
||||
GetBacklight()->SetBrightness(20);
|
||||
});
|
||||
power_save_timer_->OnExitSleepMode([this]() {
|
||||
auto display = GetDisplay();
|
||||
@@ -217,7 +240,6 @@ private:
|
||||
esp_lcd_panel_reset(panel);
|
||||
esp_lcd_panel_init(panel);
|
||||
esp_lcd_panel_invert_color(panel, false);
|
||||
esp_lcd_panel_swap_xy(panel, DISPLAY_SWAP_XY);
|
||||
esp_lcd_panel_mirror(panel, DISPLAY_MIRROR_X, DISPLAY_MIRROR_Y);
|
||||
esp_lcd_panel_disp_on_off(panel, true);
|
||||
display_ = new CustomLcdDisplay(panel_io, panel,
|
||||
|
||||
@@ -198,7 +198,7 @@ private:
|
||||
// 液晶屏控制IO初始化
|
||||
ESP_LOGD(TAG, "Install panel IO");
|
||||
esp_lcd_panel_io_spi_config_t io_config = {};
|
||||
io_config.cs_gpio_num = GPIO_NUM_NC;//酷世diy的korvo板子上cs引脚为GPIO46 官方korvo2 v3的lcd cs引脚由TCA9554的IO3控制 所以这里设置为GPIO_NUM_NC
|
||||
io_config.cs_gpio_num = GPIO_NUM_46;
|
||||
io_config.dc_gpio_num = GPIO_NUM_2;
|
||||
io_config.spi_mode = 0;
|
||||
io_config.pclk_hz = 60 * 1000 * 1000;
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
#define AUDIO_INPUT_SAMPLE_RATE 24000
|
||||
#define AUDIO_OUTPUT_SAMPLE_RATE 24000
|
||||
#define AUDIO_INPUT_REFERENCE true
|
||||
|
||||
#define AUDIO_I2S_GPIO_MCLK GPIO_NUM_10
|
||||
#define AUDIO_I2S_GPIO_WS GPIO_NUM_12
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
#include <driver/gpio.h>
|
||||
#include <driver/spi_master.h>
|
||||
|
||||
#define AUDIO_INPUT_REFERENCE true
|
||||
#define AUDIO_INPUT_SAMPLE_RATE 24000
|
||||
#define AUDIO_OUTPUT_SAMPLE_RATE 24000
|
||||
#define AUDIO_DEFAULT_OUTPUT_VOLUME 80
|
||||
|
||||
@@ -22,7 +22,7 @@ protected:
|
||||
|
||||
DisplayFonts fonts_;
|
||||
|
||||
virtual void SetupUI();
|
||||
void SetupUI();
|
||||
virtual bool Lock(int timeout_ms = 0) override;
|
||||
virtual void Unlock() override;
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ dependencies:
|
||||
78/xiaozhi-fonts: "~1.3.2"
|
||||
espressif/led_strip: "^2.4.1"
|
||||
espressif/esp_codec_dev: "~1.3.2"
|
||||
espressif/esp-sr: "^1.9.0"
|
||||
espressif/esp-sr: "^2.0.2"
|
||||
espressif/button: "^3.3.1"
|
||||
lvgl/lvgl: "~9.2.2"
|
||||
esp_lvgl_port: "~2.4.4"
|
||||
|
||||
@@ -72,7 +72,7 @@ def release(board_type, board_config):
|
||||
|
||||
# Print Project Version
|
||||
project_version = get_project_version()
|
||||
print(f"Project Version: {project_version}")
|
||||
print(f"Project Version: {project_version}", config_path)
|
||||
release_path = f"releases/v{project_version}_{board_type}.zip"
|
||||
if os.path.exists(release_path):
|
||||
print(f"跳过 {board_type} 因为 {release_path} 已存在")
|
||||
|
||||
@@ -17,6 +17,4 @@ CONFIG_ESP32S3_INSTRUCTION_CACHE_32KB=y
|
||||
CONFIG_ESP32S3_DATA_CACHE_64KB=y
|
||||
CONFIG_ESP32S3_DATA_CACHE_LINE_64B=y
|
||||
|
||||
CONFIG_USE_WAKENET=y
|
||||
CONFIG_SR_WN_WN9_NIHAOXIAOZHI_TTS=y
|
||||
CONFIG_USE_MULTINET=n
|
||||
|
||||
Reference in New Issue
Block a user