diff --git a/Samples/BackgroundActivation/README.md b/Samples/BackgroundActivation/README.md index 3a194c722d..1b44c7ab33 100644 --- a/Samples/BackgroundActivation/README.md +++ b/Samples/BackgroundActivation/README.md @@ -30,6 +30,7 @@ This sample demonstrates the following: - Using a deferral object to include asynchronous code in your background task. - Handling the cancellation of a background task, and ensuring the task is cancelled when required conditions are no longer met. - Initializing background task progress and completion handlers when the app is launched. +- Registering a background task in a Background Task Registration Group. This sample uses the Single Process Model method for background activity. Applications can also use the Multiple Process Model method for running Background Tasks in a separate process from the foreground application. diff --git a/Samples/BackgroundActivation/cpp/BackgroundActivation.vcxproj b/Samples/BackgroundActivation/cpp/BackgroundActivation.vcxproj index a09182a2c3..a4c9c3009b 100644 --- a/Samples/BackgroundActivation/cpp/BackgroundActivation.vcxproj +++ b/Samples/BackgroundActivation/cpp/BackgroundActivation.vcxproj @@ -164,6 +164,9 @@ ..\shared\Scenario5_ApplicationTriggerTask.xaml + + ..\shared\Scenario6_GroupedTask.xaml + @@ -177,6 +180,7 @@ + Styles\Styles.xaml @@ -218,6 +222,9 @@ ..\shared\Scenario5_ApplicationTriggerTask.xaml + + ..\shared\Scenario6_GroupedTask.xaml + diff --git a/Samples/BackgroundActivation/cpp/BackgroundActivation.vcxproj.filters b/Samples/BackgroundActivation/cpp/BackgroundActivation.vcxproj.filters index ccef0cf5b2..6af32c79a8 100644 --- a/Samples/BackgroundActivation/cpp/BackgroundActivation.vcxproj.filters +++ b/Samples/BackgroundActivation/cpp/BackgroundActivation.vcxproj.filters @@ -23,10 +23,11 @@ + - + @@ -35,6 +36,7 @@ + @@ -49,6 +51,7 @@ + diff --git a/Samples/BackgroundActivation/cpp/BackgroundActivity.cpp b/Samples/BackgroundActivation/cpp/BackgroundActivity.cpp index bfb0894472..17e4113e21 100644 --- a/Samples/BackgroundActivation/cpp/BackgroundActivity.cpp +++ b/Samples/BackgroundActivation/cpp/BackgroundActivity.cpp @@ -13,6 +13,7 @@ #include "BackgroundActivity.h" using namespace SDKTemplate; +using namespace Windows::ApplicationModel::Activation; using namespace Windows::ApplicationModel::Background; using namespace Windows::Foundation; using namespace Windows::Storage; @@ -81,6 +82,11 @@ void BackgroundActivity::OnCanceled(IBackgroundTaskInstance^ taskInstance, Backg CancelReason = reason; } +void BackgroundActivity::OnStart(BackgroundTaskRegistrationGroup^ sender, BackgroundActivatedEventArgs^ args) +{ + Start(args->TaskInstance); +} + void BackgroundActivity::Start(IBackgroundTaskInstance^ taskInstance) { // Use the taskInstance->Name and/or taskInstance->InstanceId to determine diff --git a/Samples/BackgroundActivation/cpp/BackgroundActivity.h b/Samples/BackgroundActivation/cpp/BackgroundActivity.h index c4d3d25e25..41e4f702f2 100644 --- a/Samples/BackgroundActivation/cpp/BackgroundActivity.h +++ b/Samples/BackgroundActivation/cpp/BackgroundActivity.h @@ -19,6 +19,7 @@ namespace SDKTemplate void Run(Windows::ApplicationModel::Background::IBackgroundTaskInstance^ taskInstance); void OnCanceled(Windows::ApplicationModel::Background::IBackgroundTaskInstance^ taskInstance, Windows::ApplicationModel::Background::BackgroundTaskCancellationReason reason); + static void OnStart(Windows::ApplicationModel::Background::BackgroundTaskRegistrationGroup^ sender, Windows::ApplicationModel::Activation::BackgroundActivatedEventArgs^ args); static void Start(Windows::ApplicationModel::Background::IBackgroundTaskInstance^ taskInstance); private: Windows::ApplicationModel::Background::BackgroundTaskCancellationReason CancelReason = Windows::ApplicationModel::Background::BackgroundTaskCancellationReason::Abort; diff --git a/Samples/BackgroundActivation/cpp/SampleConfiguration.cpp b/Samples/BackgroundActivation/cpp/SampleConfiguration.cpp index 2ea014ce0d..6419edac0f 100644 --- a/Samples/BackgroundActivation/cpp/SampleConfiguration.cpp +++ b/Samples/BackgroundActivation/cpp/SampleConfiguration.cpp @@ -16,6 +16,7 @@ using namespace SDKTemplate; using namespace Windows::ApplicationModel; using namespace Windows::ApplicationModel::Activation; +using namespace Windows::Foundation; using namespace Windows::Foundation::Collections; String^ BackgroundTaskSample::SampleBackgroundTaskProgress = ""; @@ -34,6 +35,10 @@ String^ BackgroundTaskSample::ApplicationTriggerTaskProgress = ""; bool BackgroundTaskSample::ApplicationTriggerTaskRegistered = false; String^ BackgroundTaskSample::ApplicationTriggerTaskResult = ""; +String^ BackgroundTaskSample::GroupedBackgroundTaskProgress = ""; +bool BackgroundTaskSample::GroupedBackgroundTaskRegistered = false; + + PropertySet^ BackgroundTaskSample::TaskStatuses = ref new PropertySet(); Array^ MainPage::scenariosInner = ref new Array @@ -44,7 +49,8 @@ Array^ MainPage::scenariosInner = ref new Array { "Background task with a condition", "SDKTemplate.SampleBackgroundTaskWithCondition" }, { "Servicing complete task", "SDKTemplate.ServicingCompleteTask" }, { "Background task with time trigger", "SDKTemplate.TimeTriggeredTask" }, - { "Background task with application trigger", "SDKTemplate.ApplicationTriggerTask" } + { "Background task with application trigger", "SDKTemplate.ApplicationTriggerTask" }, + { "Grouped background task", "SDKTemplate.GroupedBackgroundTask" }, }; String^ BackgroundTaskSample::GetBackgroundTaskStatus(String^ name) @@ -70,6 +76,10 @@ String^ BackgroundTaskSample::GetBackgroundTaskStatus(String^ name) { registered = BackgroundTaskSample::ApplicationTriggerTaskRegistered; } + else if (name == GroupedBackgroundTaskName) + { + registered = BackgroundTaskSample::GroupedBackgroundTaskRegistered; + } String^ status = registered ? "Registered" : "Unregistered"; @@ -81,7 +91,7 @@ String^ BackgroundTaskSample::GetBackgroundTaskStatus(String^ name) return status; } -BackgroundTaskRegistration^ BackgroundTaskSample::RegisterBackgroundTask(String^ taskEntryPoint, String^ name, IBackgroundTrigger^ trigger, IBackgroundCondition^ condition) +BackgroundTaskRegistration^ BackgroundTaskSample::RegisterBackgroundTask(String^ taskEntryPoint, String^ name, IBackgroundTrigger^ trigger, IBackgroundCondition^ condition, BackgroundTaskRegistrationGroup^ group) { if (TaskRequiresBackgroundAccess(name)) { @@ -112,6 +122,11 @@ BackgroundTaskRegistration^ BackgroundTaskSample::RegisterBackgroundTask(String^ builder->CancelOnConditionLoss = true; } + if (group != nullptr) + { + builder->TaskGroup = group; + } + auto task = builder->Register(); UpdateBackgroundTaskRegistrationStatus(name, true); @@ -137,28 +152,55 @@ bool BackgroundTaskSample::TaskRequiresBackgroundAccess(String^ name) } } -void BackgroundTaskSample::UnregisterBackgroundTasks(String^ name) +void BackgroundTaskSample::UnregisterBackgroundTasks(String^ name, BackgroundTaskRegistrationGroup^ group) { // - // Loop through all background tasks and unregister any that have a name that matches - // the name passed into this function. + // If the given task group is registered then loop through all background tasks associated with it + // and unregister any with the name passed into this function. // - auto iter = BackgroundTaskRegistration::AllTasks->First(); - auto hascur = iter->HasCurrent; - while (hascur) + if (group != nullptr) { - auto cur = iter->Current->Value; - - if (cur->Name == name) + for (auto pair : group->AllTasks) + { + auto task = pair->Value; + if (task->Name == name) + { + task->Unregister(true); + } + } + } + else + { + // + // Loop through all ungrouped background tasks and unregister any with the name passed into this function. + // + for (auto pair : BackgroundTaskRegistration::AllTasks) { - cur->Unregister(true); - UpdateBackgroundTaskRegistrationStatus(name, false); + auto task = pair->Value; + if (task->Name == name) + { + task->Unregister(true); + } } + } + + UpdateBackgroundTaskRegistrationStatus(name, false); +} + - hascur = iter->MoveNext(); +BackgroundTaskRegistrationGroup^ BackgroundTaskSample::GetTaskGroup(String^ id, String^ groupName) +{ + auto group = BackgroundTaskRegistration::GetTaskGroup(id); + + if (group == nullptr) + { + group = ref new BackgroundTaskRegistrationGroup(id, groupName); } + + return group; } + void BackgroundTaskSample::UpdateBackgroundTaskRegistrationStatus(String^ name, bool registered) { if (name == SampleBackgroundTaskName) @@ -181,6 +223,10 @@ void BackgroundTaskSample::UpdateBackgroundTaskRegistrationStatus(String^ name, { BackgroundTaskSample::ApplicationTriggerTaskRegistered = registered; } + else if (name == GroupedBackgroundTaskName) + { + BackgroundTaskSample::GroupedBackgroundTaskRegistered = registered; + } } void BackgroundTaskSample::RemoveBackgroundTaskStatus(String^ name) @@ -195,3 +241,9 @@ void App::OnBackgroundActivated(BackgroundActivatedEventArgs^ args) { BackgroundActivity::Start(args->TaskInstance); } + +void App::Partial_Construct() +{ + auto group = BackgroundTaskSample::GetTaskGroup(BackgroundTaskGroupId, BackgroundTaskGroupFriendlyName); + group->BackgroundActivated += ref new TypedEventHandler(&BackgroundActivity::OnStart, CallbackContext::Same); +} diff --git a/Samples/BackgroundActivation/cpp/SampleConfiguration.h b/Samples/BackgroundActivation/cpp/SampleConfiguration.h index a416f94ec9..b687b1b982 100644 --- a/Samples/BackgroundActivation/cpp/SampleConfiguration.h +++ b/Samples/BackgroundActivation/cpp/SampleConfiguration.h @@ -11,9 +11,12 @@ #pragma once #include +#include "App.g.h" +#include "BackgroundActivity.h" using namespace Platform; using namespace Windows::ApplicationModel::Background; +using namespace Windows::ApplicationModel::Activation; using namespace Windows::Storage; #define SampleBackgroundTaskName "SampleBackgroundTask" @@ -21,6 +24,10 @@ using namespace Windows::Storage; #define ServicingCompleteTaskName "ServicingCompleteTask" #define TimeTriggeredTaskName "TimeTriggeredTask" #define ApplicationTriggerTaskName "ApplicationTriggerTask" +#define GroupedBackgroundTaskName "GroupedBackgroundTask" +#define BackgroundTaskGroupId "3F2504E0-5F89-41D3-9A0C-0405E82C3333" +#define BackgroundTaskGroupFriendlyName "Background Task Group" + namespace SDKTemplate { @@ -56,9 +63,10 @@ namespace SDKTemplate { public: static String^ GetBackgroundTaskStatus(String^ name); - static BackgroundTaskRegistration^ RegisterBackgroundTask(String^ taskEntryPoint, String^ name, IBackgroundTrigger^ trigger, IBackgroundCondition^ condition); + static BackgroundTaskRegistration^ RegisterBackgroundTask(String^ taskEntryPoint, String^ name, IBackgroundTrigger^ trigger, IBackgroundCondition^ condition, BackgroundTaskRegistrationGroup^ group = nullptr); static bool TaskRequiresBackgroundAccess(String^ name); - static void UnregisterBackgroundTasks(String^ name); + static void UnregisterBackgroundTasks(String^ name, BackgroundTaskRegistrationGroup^ group = nullptr); + static BackgroundTaskRegistrationGroup^ GetTaskGroup(String^ id, String^ groupName); static void UpdateBackgroundTaskRegistrationStatus(String^ name, bool registered); static void RemoveBackgroundTaskStatus(String^ name); @@ -78,7 +86,11 @@ namespace SDKTemplate static bool ApplicationTriggerTaskRegistered; static String^ ApplicationTriggerTaskResult; + static String^ GroupedBackgroundTaskProgress; + static bool GroupedBackgroundTaskRegistered; + static Windows::Foundation::Collections::PropertySet^ TaskStatuses; }; - } + + diff --git a/Samples/BackgroundActivation/cpp/Scenario6_GroupedTask.xaml.cpp b/Samples/BackgroundActivation/cpp/Scenario6_GroupedTask.xaml.cpp new file mode 100644 index 0000000000..ac03aae2f5 --- /dev/null +++ b/Samples/BackgroundActivation/cpp/Scenario6_GroupedTask.xaml.cpp @@ -0,0 +1,155 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +// +// Scenario6_GroupedTask.xaml.cpp +// Implementation of the GroupedBackgroundTask class +// + +#include "pch.h" +#include "Scenario6_GroupedTask.xaml.h" +#include "SampleConfiguration.h" + +using namespace SDKTemplate; +using namespace concurrency; +using namespace Windows::ApplicationModel::Background; +using namespace Windows::UI::Core; +using namespace Windows::UI::Xaml; +using namespace Windows::UI::Xaml::Controls; +using namespace Windows::UI::Xaml::Navigation; + +GroupedBackgroundTask::GroupedBackgroundTask() +{ + InitializeComponent(); +} + +/// +/// Invoked when this page is about to be displayed in a Frame. +/// +/// Event data that describes how this page was reached. The Parameter +/// property is typically used to configure the page. +void GroupedBackgroundTask::OnNavigatedTo(NavigationEventArgs^ e) +{ + // A pointer back to the main page. This is needed if you want to call methods in MainPage such + // as NotifyUser() + rootPage = MainPage::Current; + + group = BackgroundTaskSample::GetTaskGroup(BackgroundTaskGroupId, BackgroundTaskGroupFriendlyName); + + // + // Attach progress and completed handlers to any existing tasks. + // + for (auto pair : group->AllTasks) + { + auto task = pair->Value; + if (task->Name == GroupedBackgroundTaskName) + { + BackgroundTaskSample::UpdateBackgroundTaskRegistrationStatus(task->Name, true); + AttachProgressAndCompletedHandlers(task); + break; + } + } + + UpdateUI(); +} + +/// +/// Attach progress and completed handers to a background task. +/// +/// The task to attach progress and completed handlers to. +void GroupedBackgroundTask::AttachProgressAndCompletedHandlers(IBackgroundTaskRegistration^ task) +{ + task->Progress += ref new BackgroundTaskProgressEventHandler(this, &GroupedBackgroundTask::OnProgress); + task->Completed += ref new BackgroundTaskCompletedEventHandler(this, &GroupedBackgroundTask::OnCompleted); +} + +/// +/// Register a Grouped Background Task. +/// +/// +/// +void GroupedBackgroundTask::RegisterGroupedBackgroundTask(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e) +{ + auto task = BackgroundTaskSample::RegisterBackgroundTask(nullptr, + GroupedBackgroundTaskName, + ref new SystemTrigger(SystemTriggerType::TimeZoneChange, false), + nullptr, + group); + AttachProgressAndCompletedHandlers(task); + UpdateUI(); +} + +/// +/// Unregister a Grouped Background Task. +/// +/// +/// +void GroupedBackgroundTask::UnregisterGroupedTask(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e) +{ + BackgroundTaskSample::UnregisterBackgroundTasks(GroupedBackgroundTaskName, group); + UpdateUI(); +} + +/// +/// Unregister all Background Tasks that are not grouped. +/// +/// +/// +void GroupedBackgroundTask::UnregisterUngroupedTasks(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e) +{ + for (auto pair : BackgroundTaskRegistration::AllTasks) + { + auto task = pair->Value; + task->Unregister(true); + BackgroundTaskSample::UpdateBackgroundTaskRegistrationStatus(task->Name, false); + } +} + +/// +/// Handle background task progress. +/// +/// The task that is reporting progress. +/// Arguments of the progress report. +void GroupedBackgroundTask::OnProgress(BackgroundTaskRegistration^ task, BackgroundTaskProgressEventArgs^ args) +{ + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() + { + auto progress = "Progress: " + args->Progress + "%"; + BackgroundTaskSample::GroupedBackgroundTaskProgress = progress; + UpdateUI(); + })); +} + +/// +/// Handle background task completion. +/// +/// The task that is reporting completion. +/// Arguments of the completion report. +void GroupedBackgroundTask::OnCompleted(BackgroundTaskRegistration^ task, BackgroundTaskCompletedEventArgs^ args) +{ + UpdateUI(); +} + +/// +/// Update the scenario UI. +/// +void GroupedBackgroundTask::UpdateUI() +{ + auto uiDelegate = [this]() + { + RegisterButton->IsEnabled = !BackgroundTaskSample::GroupedBackgroundTaskRegistered; + UnregisterGroupedButton->IsEnabled = BackgroundTaskSample::GroupedBackgroundTaskRegistered; + Progress->Text = BackgroundTaskSample::GroupedBackgroundTaskProgress; + Status->Text = BackgroundTaskSample::GetBackgroundTaskStatus(GroupedBackgroundTaskName); + }; + auto handler = ref new Windows::UI::Core::DispatchedHandler(uiDelegate, Platform::CallbackContext::Any); + + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, handler); +} diff --git a/Samples/BackgroundActivation/cpp/Scenario6_GroupedTask.xaml.h b/Samples/BackgroundActivation/cpp/Scenario6_GroupedTask.xaml.h new file mode 100644 index 0000000000..4f639635a1 --- /dev/null +++ b/Samples/BackgroundActivation/cpp/Scenario6_GroupedTask.xaml.h @@ -0,0 +1,48 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +// +// Scenario5_GroupedTask.xaml.h +// Declaration of the GroupedBackgroundTask class +// + +#pragma once + +#include +#include "pch.h" +#include "Scenario6_GroupedTask.g.h" +#include "MainPage.xaml.h" + +namespace SDKTemplate +{ + /// + /// An empty page that can be used on its own or navigated to within a Frame. + /// + [Windows::Foundation::Metadata::WebHostHidden] + public ref class GroupedBackgroundTask sealed + { + public: + GroupedBackgroundTask(); + + protected: + virtual void OnNavigatedTo(Windows::UI::Xaml::Navigation::NavigationEventArgs^ e) override; + private: + SDKTemplate::MainPage^ rootPage; + BackgroundTaskRegistrationGroup^ group = nullptr; + + void AttachProgressAndCompletedHandlers(Windows::ApplicationModel::Background::IBackgroundTaskRegistration^ task); + void RegisterGroupedBackgroundTask(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e); + void UnregisterGroupedTask(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e); + void UnregisterUngroupedTasks(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e); + void OnProgress(Windows::ApplicationModel::Background::BackgroundTaskRegistration^ task, Windows::ApplicationModel::Background::BackgroundTaskProgressEventArgs^ args); + void OnCompleted(Windows::ApplicationModel::Background::BackgroundTaskRegistration^ task, Windows::ApplicationModel::Background::BackgroundTaskCompletedEventArgs^ args); + void UpdateUI(); + }; +} diff --git a/Samples/BackgroundActivation/cpp/pch.h b/Samples/BackgroundActivation/cpp/pch.h index 1d46d72775..5208eae636 100644 --- a/Samples/BackgroundActivation/cpp/pch.h +++ b/Samples/BackgroundActivation/cpp/pch.h @@ -28,6 +28,10 @@ namespace SDKTemplate /// protected: virtual void OnBackgroundActivated(Windows::ApplicationModel::Activation::BackgroundActivatedEventArgs^ args) override; + + private: + void Partial_Construct(); }; } + #include "App.xaml.h" \ No newline at end of file diff --git a/Samples/BackgroundActivation/cs/BackgroundActivation.csproj b/Samples/BackgroundActivation/cs/BackgroundActivation.csproj index 18360b800d..c018041096 100644 --- a/Samples/BackgroundActivation/cs/BackgroundActivation.csproj +++ b/Samples/BackgroundActivation/cs/BackgroundActivation.csproj @@ -119,6 +119,9 @@ Scenario3_ServicingCompleteTask.xaml + + Scenario6_GroupedTask.xaml + @@ -165,6 +168,12 @@ + + Scenario6_GroupedTask.xaml + MSBuild:Compile + + + Styles\Styles.xaml MSBuild:Compile diff --git a/Samples/BackgroundActivation/cs/BackgroundActivity.cs b/Samples/BackgroundActivation/cs/BackgroundActivity.cs index 9a1e7f496e..77961f2276 100644 --- a/Samples/BackgroundActivation/cs/BackgroundActivity.cs +++ b/Samples/BackgroundActivation/cs/BackgroundActivity.cs @@ -11,6 +11,7 @@ using System; using System.Diagnostics; using System.Threading; +using Windows.ApplicationModel.Activation; using Windows.ApplicationModel.Background; using Windows.Foundation; using Windows.Storage; @@ -99,6 +100,11 @@ private void PeriodicTimerCallback(ThreadPoolTimer timer) } } + public static void Start(BackgroundTaskRegistrationGroup sender, BackgroundActivatedEventArgs args) + { + Start(args.TaskInstance); + } + public static void Start(IBackgroundTaskInstance taskInstance) { // Use the taskInstance.Name and/or taskInstance.InstanceId to determine diff --git a/Samples/BackgroundActivation/cs/SampleConfiguration.cs b/Samples/BackgroundActivation/cs/SampleConfiguration.cs index 20803531ee..1327ab1561 100644 --- a/Samples/BackgroundActivation/cs/SampleConfiguration.cs +++ b/Samples/BackgroundActivation/cs/SampleConfiguration.cs @@ -31,7 +31,8 @@ public partial class MainPage : Page new Scenario() { Title="Background Task with Condition", ClassType=typeof(SampleBackgroundTaskWithCondition)}, new Scenario() { Title="Servicing Complete Task", ClassType=typeof(ServicingCompleteTask)}, new Scenario() { Title="Background Task with Time Trigger", ClassType=typeof(TimeTriggeredTask) }, - new Scenario() { Title="Background Task with Application Trigger", ClassType=typeof(ApplicationTriggerTask) } + new Scenario() { Title="Background Task with Application Trigger", ClassType=typeof(ApplicationTriggerTask) }, + new Scenario() { Title="Grouped Background Task", ClassType=typeof(GroupedBackgroundTask) }, }; } @@ -67,6 +68,12 @@ class BackgroundTaskSample public static string ApplicationTriggerTaskResult = ""; public static bool ApplicationTriggerTaskRegistered = false; + public const string GroupedBackgroundTaskName = "GroupedBackgroundTask"; + public const string BackgroundTaskGroupId = "3F2504E0-5F89-41D3-9A0C-0405E82C3333"; + public const string BackgroundTaskGroupFriendlyName = "Background Task Group"; + public static string GroupedBackgroundTaskProgress = ""; + public static bool GroupedBackgroundTaskRegistered = false; + // These strings are manipulated by multiple threads, so we must // use a thread-safe container. public static PropertySet TaskStatuses = new PropertySet(); @@ -79,7 +86,7 @@ class BackgroundTaskSample /// A name for the background task. /// The trigger for the background task. /// An optional conditional event that must be true for the task to fire. - public static BackgroundTaskRegistration RegisterBackgroundTask(String taskEntryPoint, String name, IBackgroundTrigger trigger, IBackgroundCondition condition) + public static BackgroundTaskRegistration RegisterBackgroundTask(String taskEntryPoint, String name, IBackgroundTrigger trigger, IBackgroundCondition condition, BackgroundTaskRegistrationGroup group = null) { if (TaskRequiresBackgroundAccess(name)) { @@ -111,6 +118,11 @@ public static BackgroundTaskRegistration RegisterBackgroundTask(String taskEntry builder.CancelOnConditionLoss = true; } + if (group != null) + { + builder.TaskGroup = group; + } + BackgroundTaskRegistration task = builder.Register(); UpdateBackgroundTaskRegistrationStatus(name, true); @@ -127,23 +139,56 @@ public static BackgroundTaskRegistration RegisterBackgroundTask(String taskEntry /// Unregister background tasks with specified name. /// /// Name of the background task to unregister. - public static void UnregisterBackgroundTasks(String name) + public static void UnregisterBackgroundTasks(String name, BackgroundTaskRegistrationGroup group = null) { // - // Loop through all background tasks and unregister any with SampleBackgroundTaskName or - // SampleBackgroundTaskWithConditionName. + // If the given task group is registered then loop through all background tasks associated with it + // and unregister any with the given name. // - foreach (var cur in BackgroundTaskRegistration.AllTasks) + if (group != null) + { + foreach (var cur in group.AllTasks) + { + if (cur.Value.Name == name) + { + cur.Value.Unregister(true); + } + } + } + else { - if (cur.Value.Name == name) + // + // Loop through all ungrouped background tasks and unregister any with the given name. + // + foreach (var cur in BackgroundTaskRegistration.AllTasks) { - cur.Value.Unregister(true); + if (cur.Value.Name == name) + { + cur.Value.Unregister(true); + } } } UpdateBackgroundTaskRegistrationStatus(name, false); } + /// + /// Retrieve a registered background task group. If no group is registered with the given id, + /// then create a new one and return it. + /// + /// The task group associated with the given id + public static BackgroundTaskRegistrationGroup GetTaskGroup(string id, string groupName) + { + var group = BackgroundTaskRegistration.GetTaskGroup(id); + + if (group == null) + { + group = new BackgroundTaskRegistrationGroup(id, groupName); + } + + return group; + } + /// /// Store the registration status of a background task with a given name. /// @@ -168,6 +213,9 @@ public static void UpdateBackgroundTaskRegistrationStatus(String name, bool regi case ApplicationTriggerTaskName: ApplicationTriggerTaskRegistered = registered; break; + case GroupedBackgroundTaskName: + GroupedBackgroundTaskRegistered = registered; + break; } } @@ -196,6 +244,9 @@ public static String GetBackgroundTaskStatus(String name) case ApplicationTriggerTaskName: registered = ApplicationTriggerTaskRegistered; break; + case GroupedBackgroundTaskName: + registered = GroupedBackgroundTaskRegistered; + break; } var status = registered ? "Registered" : "Unregistered"; @@ -243,5 +294,14 @@ protected override void OnBackgroundActivated(BackgroundActivatedEventArgs args) { BackgroundActivity.Start(args.TaskInstance); } + + /// + /// Register for grouped background task events in the Application constructor. + /// + partial void Construct() + { + var group = BackgroundTaskSample.GetTaskGroup(BackgroundTaskSample.BackgroundTaskGroupId, BackgroundTaskSample.BackgroundTaskGroupFriendlyName); + group.BackgroundActivated += BackgroundActivity.Start; + } } } \ No newline at end of file diff --git a/Samples/BackgroundActivation/cs/Scenario6_GroupedTask.xaml.cs b/Samples/BackgroundActivation/cs/Scenario6_GroupedTask.xaml.cs new file mode 100644 index 0000000000..00c214d0ca --- /dev/null +++ b/Samples/BackgroundActivation/cs/Scenario6_GroupedTask.xaml.cs @@ -0,0 +1,156 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +using System; +using SDKTemplate; +using Windows.ApplicationModel.Background; +using Windows.Storage; +using Windows.UI.Core; +using Windows.UI.Xaml; +using Windows.UI.Xaml.Controls; +using Windows.UI.Xaml.Navigation; + +// The Blank Page item template is documented at http://go.microsoft.com/fwlink/?LinkId=234238 + +namespace SDKTemplate +{ + /// + /// An empty page that can be used on its own or navigated to within a Frame. + /// + public sealed partial class GroupedBackgroundTask : Page + { + // A pointer back to the main page. This is needed if you want to call methods in MainPage such + // as NotifyUser() + MainPage rootPage = MainPage.Current; + + public GroupedBackgroundTask() + { + this.InitializeComponent(); + } + + BackgroundTaskRegistrationGroup group; + + /// + /// Invoked when this page is about to be displayed in a Frame. + /// + /// Event data that describes how this page was reached. The Parameter + /// property is typically used to configure the page. + protected override void OnNavigatedTo(NavigationEventArgs e) + { + group = BackgroundTaskSample.GetTaskGroup(BackgroundTaskSample.BackgroundTaskGroupId, BackgroundTaskSample.BackgroundTaskGroupFriendlyName); + + foreach (var task in group.AllTasks) + { + if (task.Value.Name == BackgroundTaskSample.GroupedBackgroundTaskName) + { + AttachProgressAndCompletedHandlers(task.Value); + BackgroundTaskSample.UpdateBackgroundTaskRegistrationStatus(BackgroundTaskSample.GroupedBackgroundTaskName, true); + break; + } + } + + UpdateUI(); + } + + /// + /// Register a Grouped Background Task. + /// + /// + /// + private void RegisterGroupedBackgroundTask(object sender, RoutedEventArgs e) + { + var task = BackgroundTaskSample.RegisterBackgroundTask(null, + BackgroundTaskSample.GroupedBackgroundTaskName, + new SystemTrigger(SystemTriggerType.TimeZoneChange, false), + null, + group); + AttachProgressAndCompletedHandlers(task); + UpdateUI(); + } + + + /// + /// Unregister a Grouped Background Task. + /// + /// + /// + private void UnregisterGroupedTask(object sender, RoutedEventArgs e) + { + BackgroundTaskSample.UnregisterBackgroundTasks(BackgroundTaskSample.GroupedBackgroundTaskName, group); + UpdateUI(); + } + + /// + /// Unregisters all Background Tasks that are not in groups. + /// This will not impact tasks registered with groups. + /// + /// + /// + private void UnregisterUngroupedTasks(object sender, RoutedEventArgs e) + { + foreach(var cur in BackgroundTaskRegistration.AllTasks) + { + cur.Value.Unregister(true); + BackgroundTaskSample.UpdateBackgroundTaskRegistrationStatus(cur.Value.Name, false); + } + } + + /// + /// Attach progress and completed handers to a background task. + /// + /// The task to attach progress and completed handlers to. + private void AttachProgressAndCompletedHandlers(IBackgroundTaskRegistration task) + { + task.Progress += new BackgroundTaskProgressEventHandler(OnProgress); + task.Completed += new BackgroundTaskCompletedEventHandler(OnCompleted); + } + + /// + /// Handle background task progress. + /// + /// The task that is reporting progress. + /// Arguments of the progress report. + private void OnProgress(IBackgroundTaskRegistration task, BackgroundTaskProgressEventArgs args) + { + var ignored = Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + { + var progress = "Progress: " + args.Progress + "%"; + BackgroundTaskSample.GroupedBackgroundTaskProgress = progress; + UpdateUI(); + }); + } + + /// + /// Handle background task completion. + /// + /// The task that is reporting completion. + /// Arguments of the completion report. + private void OnCompleted(IBackgroundTaskRegistration task, BackgroundTaskCompletedEventArgs args) + { + UpdateUI(); + } + + /// + /// Update the scenario UI. + /// + private async void UpdateUI() + { + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, + () => + { + RegisterButton.IsEnabled = !BackgroundTaskSample.GroupedBackgroundTaskRegistered; + UnregisterGroupedButton.IsEnabled = BackgroundTaskSample.GroupedBackgroundTaskRegistered; + Progress.Text = BackgroundTaskSample.GroupedBackgroundTaskProgress; + Status.Text = BackgroundTaskSample.GetBackgroundTaskStatus(BackgroundTaskSample.GroupedBackgroundTaskName); + }); + } + } +} diff --git a/Samples/BackgroundActivation/shared/Scenario6_GroupedTask.xaml b/Samples/BackgroundActivation/shared/Scenario6_GroupedTask.xaml new file mode 100644 index 0000000000..248bf2406d --- /dev/null +++ b/Samples/BackgroundActivation/shared/Scenario6_GroupedTask.xaml @@ -0,0 +1,49 @@ + + + + + + + + + + Registers a background task for a Time zone change trigger event with a background task + registration group. Registering with a group enables the task registration to remain active + when all ungrouped tasks are unregistered. + + + + + + + + + + + + + diff --git a/Samples/BackgroundActivation/vb/BackgroundActivation.vbproj b/Samples/BackgroundActivation/vb/BackgroundActivation.vbproj index e024221b86..d4257db511 100644 --- a/Samples/BackgroundActivation/vb/BackgroundActivation.vbproj +++ b/Samples/BackgroundActivation/vb/BackgroundActivation.vbproj @@ -123,6 +123,9 @@ Scenario3_ServicingCompleteTask.xaml + + Scenario6_GroupedTask.xaml + @@ -169,6 +172,12 @@ + + Scenario6_GroupedTask.xaml + MSBuild:Compile + + + Styles\Styles.xaml MSBuild:Compile diff --git a/Samples/BackgroundActivation/vb/BackgroundActivity.vb b/Samples/BackgroundActivation/vb/BackgroundActivity.vb index e86104a9e0..e97d81c4ab 100644 --- a/Samples/BackgroundActivation/vb/BackgroundActivity.vb +++ b/Samples/BackgroundActivation/vb/BackgroundActivity.vb @@ -72,6 +72,11 @@ Namespace Global.SDKTemplate End If End Sub + Public Shared Sub Start(sender As BackgroundTaskRegistrationGroup, args As BackgroundActivatedEventArgs) + Start(args.TaskInstance) + End Sub + + Public Shared Sub Start(taskInstance As IBackgroundTaskInstance) ' Use the taskInstance.Name and/or taskInstance.InstanceId to determine ' what background activity to perform. In this sample, all of our diff --git a/Samples/BackgroundActivation/vb/SampleConfiguration.vb b/Samples/BackgroundActivation/vb/SampleConfiguration.vb index 59dfc547aa..95894ef10c 100644 --- a/Samples/BackgroundActivation/vb/SampleConfiguration.vb +++ b/Samples/BackgroundActivation/vb/SampleConfiguration.vb @@ -24,7 +24,7 @@ Namespace Global.SDKTemplate Public Const FEATURE_NAME As String = "Background Activation" - Public ReadOnly Property scenarios As New List(Of Scenario) From {New Scenario() With {.Title = "Background Task", .ClassType = GetType(SampleBackgroundTask)}, New Scenario() With {.Title = "Background Task with Condition", .ClassType = GetType(SampleBackgroundTaskWithCondition)}, New Scenario() With {.Title = "Servicing Complete Task", .ClassType = GetType(ServicingCompleteTask)}, New Scenario() With {.Title = "Background Task with Time Trigger", .ClassType = GetType(TimeTriggeredTask)}, New Scenario() With {.Title = "Background Task with Application Trigger", .ClassType = GetType(ApplicationTriggerTask)}} + Public ReadOnly Property scenarios As New List(Of Scenario) From {New Scenario() With {.Title = "Background Task", .ClassType = GetType(SampleBackgroundTask)}, New Scenario() With {.Title = "Background Task with Condition", .ClassType = GetType(SampleBackgroundTaskWithCondition)}, New Scenario() With {.Title = "Servicing Complete Task", .ClassType = GetType(ServicingCompleteTask)}, New Scenario() With {.Title = "Background Task with Time Trigger", .ClassType = GetType(TimeTriggeredTask)}, New Scenario() With {.Title = "Background Task with Application Trigger", .ClassType = GetType(ApplicationTriggerTask)}, New Scenario() With {.Title = "Grouped Background Task", .ClassType = GetType(GroupedBackgroundTask)}} End Class Public Class Scenario @@ -71,6 +71,16 @@ Namespace Global.SDKTemplate Public Shared ApplicationTriggerTaskRegistered As Boolean = False + Public Const GroupedBackgroundTaskName As String = "GroupedBackgroundTask" + + Public Const BackgroundTaskGroupId As String = "3F2504E0-5F89-41D3-9A0C-0405E82C3333" + + Public Const BackgroundTaskGroupFriendlyName As String = "Background Task Group" + + Public Shared GroupedBackgroundTaskProgress As String = "" + + Public Shared GroupedBackgroundTaskRegistered As Boolean = False + Public Shared TaskStatuses As PropertySet = New PropertySet() ''' @@ -81,7 +91,7 @@ Namespace Global.SDKTemplate ''' A name for the background task. ''' The trigger for the background task. ''' An optional conditional event that must be true for the task to fire. - Public Shared Function RegisterBackgroundTask(taskEntryPoint As String, name As String, trigger As IBackgroundTrigger, condition As IBackgroundCondition) As BackgroundTaskRegistration + Public Shared Function RegisterBackgroundTask(taskEntryPoint As String, name As String, trigger As IBackgroundTrigger, condition As IBackgroundCondition, Optional group As BackgroundTaskRegistrationGroup = Nothing) As BackgroundTaskRegistration If TaskRequiresBackgroundAccess(name) Then ' If the user denies access, the task will not run. Dim requestTask = BackgroundExecutionManager.RequestAccessAsync() @@ -102,6 +112,10 @@ Namespace Global.SDKTemplate builder.CancelOnConditionLoss = True End If + If group IsNot Nothing Then + builder.TaskGroup = group + End If + Dim task As BackgroundTaskRegistration = builder.Register() UpdateBackgroundTaskRegistrationStatus(name, True) ' @@ -115,16 +129,42 @@ Namespace Global.SDKTemplate ''' Unregister background tasks with specified name. ''' ''' Name of the background task to unregister. - Public Shared Sub UnregisterBackgroundTasks(name As String) - For Each cur In BackgroundTaskRegistration.AllTasks - If cur.Value.Name = name Then - cur.Value.Unregister(True) - End If - Next + Public Shared Sub UnregisterBackgroundTasks(name As String, Optional group As BackgroundTaskRegistrationGroup = Nothing) + + If group IsNot Nothing Then + For Each cur In group.AllTasks + If cur.Value.Name = name Then + cur.Value.Unregister(True) + End If + Next + Else + For Each cur In BackgroundTaskRegistration.AllTasks + If cur.Value.Name = name Then + cur.Value.Unregister(True) + End If + Next + End If UpdateBackgroundTaskRegistrationStatus(name, False) End Sub + ''' + ''' Retrieve a registered background task group. If no group is registered with the given id, + ''' then create a new one and return it. + ''' + ''' + ''' + ''' The task group associated with the given id + Public Shared Function GetTaskGroup(id As String, groupName As String) As BackgroundTaskRegistrationGroup + Dim group = BackgroundTaskRegistration.GetTaskGroup(id) + + If group Is Nothing Then + group = New BackgroundTaskRegistrationGroup(id, groupName) + End If + + Return group + End Function + ''' ''' Store the registration status of a background task with a given name. ''' @@ -134,15 +174,17 @@ Namespace Global.SDKTemplate Select name Case SampleBackgroundTaskName SampleBackgroundTaskRegistered = registered - Case SampleBackgroundTaskWithConditionName + Case SampleBackgroundTaskWithConditionName SampleBackgroundTaskWithConditionRegistered = registered - Case ServicingCompleteTaskName + Case ServicingCompleteTaskName ServicingCompleteTaskRegistered = registered - Case TimeTriggeredTaskName + Case TimeTriggeredTaskName TimeTriggeredTaskRegistered = registered - Case ApplicationTriggerTaskName + Case ApplicationTriggerTaskName ApplicationTriggerTaskRegistered = registered - End Select + Case GroupedBackgroundTaskName + GroupedBackgroundTaskRegistered = registered + End Select End Sub ''' @@ -155,15 +197,17 @@ Namespace Global.SDKTemplate Select name Case SampleBackgroundTaskName registered = SampleBackgroundTaskRegistered - Case SampleBackgroundTaskWithConditionName + Case SampleBackgroundTaskWithConditionName registered = SampleBackgroundTaskWithConditionRegistered - Case ServicingCompleteTaskName + Case ServicingCompleteTaskName registered = ServicingCompleteTaskRegistered - Case TimeTriggeredTaskName + Case TimeTriggeredTaskName registered = TimeTriggeredTaskRegistered - Case ApplicationTriggerTaskName + Case ApplicationTriggerTaskName registered = ApplicationTriggerTaskRegistered - End Select + Case GroupedBackgroundTaskName + registered = GroupedBackgroundTaskRegistered + End Select Dim status = If(registered, "Registered", "Unregistered") Dim taskStatus As Object = Nothing @@ -195,5 +239,10 @@ Namespace Global.SDKTemplate Protected Overrides Sub OnBackgroundActivated(args As BackgroundActivatedEventArgs) BackgroundActivity.Start(args.TaskInstance) End Sub + + Private Sub Construct() + Dim group = BackgroundTaskSample.GetTaskGroup(BackgroundTaskSample.BackgroundTaskGroupId, BackgroundTaskSample.BackgroundTaskGroupFriendlyName) + AddHandler group.BackgroundActivated, AddressOf BackgroundActivity.Start + End Sub End Class End Namespace diff --git a/Samples/BackgroundActivation/vb/Scenario6_GroupedTask.xaml.vb b/Samples/BackgroundActivation/vb/Scenario6_GroupedTask.xaml.vb new file mode 100644 index 0000000000..6a07575d85 --- /dev/null +++ b/Samples/BackgroundActivation/vb/Scenario6_GroupedTask.xaml.vb @@ -0,0 +1,133 @@ +'********************************************************* +' +' Copyright (c) Microsoft. All rights reserved. +' This code is licensed under the MIT License (MIT). +' THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +' ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +' IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +' PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +' +'********************************************************* +Imports System +Imports SDKTemplate +Imports Windows.ApplicationModel.Background +Imports Windows.Storage +Imports Windows.UI.Core +Imports Windows.UI.Xaml +Imports Windows.UI.Xaml.Controls +Imports Windows.UI.Xaml.Navigation + +Namespace Global.SDKTemplate + + ''' + ''' An empty page that can be used on its own or navigated to within a Frame. + ''' + Partial Public NotInheritable Class GroupedBackgroundTask + Inherits Page + + Dim rootPage As MainPage = MainPage.Current + + Dim group As BackgroundTaskRegistrationGroup = Nothing + + Public Sub New() + Me.InitializeComponent() + End Sub + + ''' + ''' Invoked when this page is about to be displayed in a Frame. + ''' + ''' Event data that describes how this page was reached. The Parameter + ''' property is typically used to configure the page. + Protected Overrides Sub OnNavigatedTo(e As NavigationEventArgs) + + group = BackgroundTaskSample.GetTaskGroup(BackgroundTaskSample.BackgroundTaskGroupId, BackgroundTaskSample.BackgroundTaskGroupFriendlyName) + + For Each task In group.AllTasks + If task.Value.Name = BackgroundTaskSample.GroupedBackgroundTaskName Then + AttachProgressAndCompletedHandlers(task.Value) + BackgroundTaskSample.UpdateBackgroundTaskRegistrationStatus(BackgroundTaskSample.GroupedBackgroundTaskName, True) + Exit For + End If + Next + + UpdateUI() + End Sub + + ''' + ''' Register a Grouped Background Task. + ''' + ''' + ''' + Private Sub RegisterGroupedBackgroundTask(sender As Object, e As RoutedEventArgs) + Dim task = BackgroundTaskSample.RegisterBackgroundTask(Nothing, BackgroundTaskSample.GroupedBackgroundTaskName, New SystemTrigger(SystemTriggerType.TimeZoneChange, False), Nothing, group) + AttachProgressAndCompletedHandlers(task) + UpdateUI() + End Sub + + ''' + ''' Unregister a Grouped Background Task. + ''' + ''' + ''' + Private Sub UnregisterGroupedTask(sender As Object, e As RoutedEventArgs) + BackgroundTaskSample.UnregisterBackgroundTasks(BackgroundTaskSample.GroupedBackgroundTaskName, group) + UpdateUI() + End Sub + + ''' + ''' Unregister all Background Tasks that are not in groups. + ''' This will not impact tasks registered with groups. + ''' + ''' + ''' + Private Sub UnregisterUngroupedTasks(sender As Object, e As RoutedEventArgs) + For Each cur In BackgroundTaskRegistration.AllTasks + cur.Value.Unregister(True) + BackgroundTaskSample.UpdateBackgroundTaskRegistrationStatus(cur.Value.Name, False) + Next + End Sub + + ''' + ''' Attach progress and completed handers to a background task. + ''' + ''' The task to attach progress and completed handlers to. + Private Sub AttachProgressAndCompletedHandlers(task As IBackgroundTaskRegistration) + AddHandler task.Progress, New BackgroundTaskProgressEventHandler(AddressOf OnProgress) + AddHandler task.Completed, New BackgroundTaskCompletedEventHandler(AddressOf OnCompleted) + End Sub + + ''' + ''' Handle background task progress. + ''' + ''' The task that is reporting progress. + ''' Arguments of the progress report. + Private Sub OnProgress(task As IBackgroundTaskRegistration, args As BackgroundTaskProgressEventArgs) + Dim ignored = Dispatcher.RunAsync(CoreDispatcherPriority.Normal, Sub() + Dim progress = "Progress: " & args.Progress & "%" + BackgroundTaskSample.GroupedBackgroundTaskProgress = progress + UpdateUI() + End Sub) + End Sub + + ''' + ''' Handle background task completion. + ''' + ''' The task that is reporting completion. + ''' Arguments of the completion report. + Private Sub OnCompleted(task As IBackgroundTaskRegistration, args As BackgroundTaskCompletedEventArgs) + UpdateUI() + End Sub + + ''' + ''' Update the scenario UI. + ''' + Private Async Sub UpdateUI() + Await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, Sub() + RegisterButton.IsEnabled = Not BackgroundTaskSample.GroupedBackgroundTaskRegistered + UnregisterGroupedButton.IsEnabled = BackgroundTaskSample.GroupedBackgroundTaskRegistered + Progress.Text = BackgroundTaskSample.GroupedBackgroundTaskProgress + Status.Text = BackgroundTaskSample.GetBackgroundTaskStatus(BackgroundTaskSample.GroupedBackgroundTaskName) + End Sub) + End Sub + End Class +End Namespace diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Package.appxmanifest b/Samples/SpeechRecognitionAndSynthesis/cpp/Package.appxmanifest index 9f00375fd1..a4e185c3aa 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Package.appxmanifest +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Package.appxmanifest @@ -20,7 +20,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/SampleConfiguration.cpp b/Samples/SpeechRecognitionAndSynthesis/cpp/SampleConfiguration.cpp index 2b0eea967b..64656df861 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/SampleConfiguration.cpp +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/SampleConfiguration.cpp @@ -18,6 +18,7 @@ using namespace SDKTemplate; Platform::Array^ MainPage::scenariosInner = ref new Platform::Array { { "Synthesize Text", "SDKTemplate.Scenario_SynthesizeText" }, + { "Synthesize Text with Boundaries", "SDKTemplate.Scenario_SynthesizeTextBoundaries" }, { "Synthesize SSML", "SDKTemplate.Scenario_SynthesizeSSML" }, { "Predefined Dictation Grammar", "SDKTemplate.Scenario_PredefinedDictationGrammar" }, { "Predefined WebSearch Grammar", "SDKTemplate.Scenario_PredefinedWebSearchGrammar" }, diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml.cpp b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml.cpp index 1359bc074a..b6da3e539f 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml.cpp +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml.cpp @@ -47,9 +47,6 @@ void Scenario_ContinuousDictation::OnNavigatedTo(NavigationEventArgs^ e) { Page::OnNavigatedTo(e); - // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. - dispatcher = CoreWindow::GetForCurrentThread()->Dispatcher; - // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. create_task(AudioCapturePermissions::RequestMicrophonePermissionAsync(), task_continuation_context::use_current()) @@ -59,15 +56,15 @@ void Scenario_ContinuousDictation::OnNavigatedTo(NavigationEventArgs^ e) { this->btnContinuousRecognize->IsEnabled = true; - PopulateLanguageDropdown(); - InitializeRecognizer(SpeechRecognizer::SystemSpeechLanguage); + PopulateLanguageDropdown(); + InitializeRecognizer(SpeechRecognizer::SystemSpeechLanguage); } else { this->dictationTextBox->Text = "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone."; - this->cbLanguageSelection->IsEnabled = false; + this->cbLanguageSelection->IsEnabled = false; } - }); + }); } /// @@ -187,7 +184,7 @@ void Scenario_ContinuousDictation::OnNavigatedFrom(NavigationEventArgs^ e) /// Unused event details void Scenario_ContinuousDictation::ContinuousRecognize_Click(Object^ sender, RoutedEventArgs^ e) { - btnContinuousRecognize->IsEnabled = false; + btnContinuousRecognize->IsEnabled = false; // The recognizer can only start listening in a continuous fashion if the recognizer is currently idle. // This prevents an exception from occurring. if (speechRecognizer->State == SpeechRecognizerState::Idle) @@ -216,8 +213,8 @@ void Scenario_ContinuousDictation::ContinuousRecognize_Click(Object^ sender, Rou create_task(messageDialog->ShowAsync()); } }).then([this]() { - btnContinuousRecognize->IsEnabled = true; - }); + btnContinuousRecognize->IsEnabled = true; + }); } catch (COMException^ exception) { @@ -250,8 +247,8 @@ void Scenario_ContinuousDictation::ContinuousRecognize_Click(Object^ sender, Rou // Ensure we don't leave any hypothesis text behind dictationTextBox->Text = ref new Platform::String(this->dictatedTextBuilder.str().c_str()); }).then([this]() { - btnContinuousRecognize->IsEnabled = true; - }); + btnContinuousRecognize->IsEnabled = true; + }); } } @@ -296,7 +293,7 @@ void Scenario_ContinuousDictation::dictationTextBox_TextChanged(Object^ sender, /// The current state of the recognizer. void Scenario_ContinuousDictation::SpeechRecognizer_StateChanged(SpeechRecognizer ^sender, SpeechRecognizerStateChangedEventArgs ^args) { - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() { rootPage->NotifyUser("Speech recognizer state: " + args->State.ToString(), NotifyType::StatusMessage); })); @@ -319,7 +316,7 @@ void Scenario_ContinuousDictation::ContinuousRecognitionSession_Completed(Speech // With dictation (no grammar in place) modes, the default timeout is 20 seconds. if (args->Status == SpeechRecognitionResultStatus::TimeoutExceeded) { - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() { rootPage->NotifyUser("Automatic Time Out of Dictation", NotifyType::StatusMessage); DictationButtonText->Text = " Dictate"; @@ -329,7 +326,7 @@ void Scenario_ContinuousDictation::ContinuousRecognitionSession_Completed(Speech } else { - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() { rootPage->NotifyUser("Continuous Recognition Completed: " + args->Status.ToString(), NotifyType::ErrorMessage); DictationButtonText->Text = " Dictate"; @@ -355,7 +352,7 @@ void Scenario_ContinuousDictation::ContinuousRecognitionSession_ResultGenerated( { this->dictatedTextBuilder << args->Result->Text->Data() << " "; - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this]() { this->discardedTextBlock->Visibility = Windows::UI::Xaml::Visibility::Collapsed; @@ -365,7 +362,7 @@ void Scenario_ContinuousDictation::ContinuousRecognitionSession_ResultGenerated( } else { - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() { // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech // is not the primary input mechanism for the application. @@ -396,7 +393,7 @@ void Scenario_ContinuousDictation::SpeechRecognizer_HypothesisGenerated(SpeechRe String^ hypothesis = args->Hypothesis->Text; std::wstring textBoxContent = dictatedTextBuilder.str() + L" " + hypothesis->Data() + L"..."; - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, textBoxContent]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, textBoxContent]() { // Update the textbox with the currently confirmed text, and the hypothesis combined. this->dictationTextBox->Text = ref new Platform::String(textBoxContent.c_str()); @@ -411,7 +408,7 @@ void Scenario_ContinuousDictation::PopulateLanguageDropdown() { Windows::Globalization::Language^ defaultLanguage = SpeechRecognizer::SystemSpeechLanguage; auto supportedLanguages = SpeechRecognizer::SupportedTopicLanguages; - std::for_each(begin(supportedLanguages), end(supportedLanguages), [&](Windows::Globalization::Language^ lang) + for (Windows::Globalization::Language^ lang : supportedLanguages) { ComboBoxItem^ item = ref new ComboBoxItem(); item->Tag = lang; @@ -423,7 +420,7 @@ void Scenario_ContinuousDictation::PopulateLanguageDropdown() item->IsSelected = true; cbLanguageSelection->SelectedItem = item; } - }); + } } /// diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml.h b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml.h index cb0370ed63..dcad096e08 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml.h +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml.h @@ -34,7 +34,6 @@ namespace SDKTemplate static const unsigned int HResultPrivacyStatementDeclined = 0x80045509; SDKTemplate::MainPage^ rootPage; - Windows::UI::Core::CoreDispatcher^ dispatcher; Windows::Media::SpeechRecognition::SpeechRecognizer^ speechRecognizer; std::wstringstream dictatedTextBuilder; diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionListGrammar.xaml.cpp b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionListGrammar.xaml.cpp index 64234245d8..4371a318f2 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionListGrammar.xaml.cpp +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionListGrammar.xaml.cpp @@ -47,9 +47,6 @@ void Scenario_ContinuousRecognitionListGrammar::OnNavigatedTo(NavigationEventArg { Page::OnNavigatedTo(e); - // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. - dispatcher = CoreWindow::GetForCurrentThread()->Dispatcher; - // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. create_task(AudioCapturePermissions::RequestMicrophonePermissionAsync(), task_continuation_context::use_current()) @@ -59,20 +56,20 @@ void Scenario_ContinuousRecognitionListGrammar::OnNavigatedTo(NavigationEventArg { this->btnContinuousRecognize->IsEnabled = true; - Windows::Globalization::Language^ speechLanguage = SpeechRecognizer::SystemSpeechLanguage; - speechContext = ResourceContext::GetForCurrentView(); - speechContext->Languages = ref new VectorView(1, speechLanguage->LanguageTag); + Windows::Globalization::Language^ speechLanguage = SpeechRecognizer::SystemSpeechLanguage; + speechContext = ResourceContext::GetForCurrentView(); + speechContext->Languages = ref new VectorView(1, speechLanguage->LanguageTag); - speechResourceMap = ResourceManager::Current->MainResourceMap->GetSubtree(L"LocalizationSpeechResources"); + speechResourceMap = ResourceManager::Current->MainResourceMap->GetSubtree(L"LocalizationSpeechResources"); - PopulateLanguageDropdown(); - InitializeRecognizer(SpeechRecognizer::SystemSpeechLanguage); + PopulateLanguageDropdown(); + InitializeRecognizer(SpeechRecognizer::SystemSpeechLanguage); } else { this->resultTextBlock->Visibility = Windows::UI::Xaml::Visibility::Visible; this->resultTextBlock->Text = L"Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone."; - this->cbLanguageSelection->IsEnabled = false; + this->cbLanguageSelection->IsEnabled = false; } }); } @@ -149,7 +146,7 @@ void Scenario_ContinuousRecognitionListGrammar::InitializeRecognizer(Windows::Gl speechResourceMap->GetValue("ListGrammarGoHome", speechContext)->ValueAsString + L"', '" + speechResourceMap->GetValue("ListGrammarGoToContosoStudio", speechContext)->ValueAsString + L"' or '" + speechResourceMap->GetValue("ListGrammarShowMessage", speechContext)->ValueAsString + L"'"; - listGrammarHelpText->Text = + listGrammarHelpText->Text = speechResourceMap->GetValue("ListGrammarHelpText", speechContext)->ValueAsString + L"\n" + uiOptionsText; @@ -231,7 +228,7 @@ void Scenario_ContinuousRecognitionListGrammar::OnNavigatedFrom(NavigationEventA } else { - cleanupTask = create_task([]() {}, task_continuation_context::use_current()); + cleanupTask = task_from_result(); } cleanupTask.then([this]() @@ -255,7 +252,7 @@ void Scenario_ContinuousRecognitionListGrammar::OnNavigatedFrom(NavigationEventA /// Unused event details void Scenario_ContinuousRecognitionListGrammar::ContinuousRecognize_Click(Object^ sender, RoutedEventArgs^ e) { - btnContinuousRecognize->IsEnabled = false; + btnContinuousRecognize->IsEnabled = false; // The recognizer can only start listening in a continuous fashion if the recognizer is currently idle. // This prevents an exception from occurring. @@ -281,8 +278,8 @@ void Scenario_ContinuousRecognitionListGrammar::ContinuousRecognize_Click(Object create_task(messageDialog->ShowAsync()); } }).then([this]() { - btnContinuousRecognize->IsEnabled = true; - }); + btnContinuousRecognize->IsEnabled = true; + }); } else { @@ -293,8 +290,8 @@ void Scenario_ContinuousRecognitionListGrammar::ContinuousRecognize_Click(Object cbLanguageSelection->IsEnabled = true; create_task(speechRecognizer->ContinuousRecognitionSession->CancelAsync()).then([this]() { - btnContinuousRecognize->IsEnabled = true; - }); + btnContinuousRecognize->IsEnabled = true; + }); } } @@ -305,7 +302,7 @@ void Scenario_ContinuousRecognitionListGrammar::ContinuousRecognize_Click(Object /// The current state of the recognizer. void Scenario_ContinuousRecognitionListGrammar::SpeechRecognizer_StateChanged(SpeechRecognizer ^sender, SpeechRecognizerStateChangedEventArgs ^args) { - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() { rootPage->NotifyUser("Speech recognizer state: " + args->State.ToString(), NotifyType::StatusMessage); })); @@ -322,7 +319,7 @@ void Scenario_ContinuousRecognitionListGrammar::ContinuousRecognitionSession_Com { if (args->Status != SpeechRecognitionResultStatus::Success) { - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args]() { rootPage->NotifyUser("Continuous Recognition Completed: " + args->Status.ToString(), NotifyType::ErrorMessage); ContinuousRecoButtonText->Text = " Continuous Recognition"; @@ -353,7 +350,7 @@ void Scenario_ContinuousRecognitionListGrammar::ContinuousRecognitionSession_Res if (args->Result->Confidence == SpeechRecognitionConfidence::Medium || args->Result->Confidence == SpeechRecognitionConfidence::High) { - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args, tag]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args, tag]() { heardYouSayTextBlock->Visibility = Windows::UI::Xaml::Visibility::Visible; resultTextBlock->Visibility = Windows::UI::Xaml::Visibility::Visible; @@ -364,7 +361,7 @@ void Scenario_ContinuousRecognitionListGrammar::ContinuousRecognitionSession_Res { // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech // is not the primary input mechanism for the application. - dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args, tag]() + Dispatcher->RunAsync(CoreDispatcherPriority::Normal, ref new DispatchedHandler([this, args, tag]() { heardYouSayTextBlock->Visibility = Windows::UI::Xaml::Visibility::Collapsed; resultTextBlock->Visibility = Windows::UI::Xaml::Visibility::Visible; @@ -379,7 +376,7 @@ void Scenario_ContinuousRecognitionListGrammar::ContinuousRecognitionSession_Res void Scenario_ContinuousRecognitionListGrammar::PopulateLanguageDropdown() { // disable callback temporarily. - cbLanguageSelection->SelectionChanged -= cbLanguageSelectionSelectionChangedToken; + isPopulatingLanguages = true; Windows::Globalization::Language^ defaultLanguage = SpeechRecognizer::SystemSpeechLanguage; auto supportedLanguages = SpeechRecognizer::SupportedGrammarLanguages; @@ -397,8 +394,7 @@ void Scenario_ContinuousRecognitionListGrammar::PopulateLanguageDropdown() } }); - cbLanguageSelectionSelectionChangedToken = cbLanguageSelection->SelectionChanged += - ref new SelectionChangedEventHandler(this, &Scenario_ContinuousRecognitionListGrammar::cbLanguageSelection_SelectionChanged); + isPopulatingLanguages = false; } /// @@ -406,6 +402,11 @@ void Scenario_ContinuousRecognitionListGrammar::PopulateLanguageDropdown() /// void Scenario_ContinuousRecognitionListGrammar::cbLanguageSelection_SelectionChanged(Object^ sender, SelectionChangedEventArgs^ e) { + if (isPopulatingLanguages) + { + return; + } + ComboBoxItem^ item = (ComboBoxItem^)(cbLanguageSelection->SelectedItem); Windows::Globalization::Language^ newLanguage = (Windows::Globalization::Language^)item->Tag; @@ -416,7 +417,7 @@ void Scenario_ContinuousRecognitionListGrammar::cbLanguageSelection_SelectionCha return; } } - + try { speechContext->Languages = ref new VectorView(1, newLanguage->LanguageTag); @@ -428,5 +429,5 @@ void Scenario_ContinuousRecognitionListGrammar::cbLanguageSelection_SelectionCha auto messageDialog = ref new Windows::UI::Popups::MessageDialog(exception->Message, "Exception"); create_task(messageDialog->ShowAsync()); } - + } \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionListGrammar.xaml.h b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionListGrammar.xaml.h index f3b4aac7f4..0aef2ac677 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionListGrammar.xaml.h +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionListGrammar.xaml.h @@ -35,21 +35,19 @@ namespace SDKTemplate static const unsigned int HResultRecognizerNotFound = 0x8004503a; SDKTemplate::MainPage^ rootPage; - Windows::UI::Core::CoreDispatcher^ dispatcher; Windows::Media::SpeechRecognition::SpeechRecognizer^ speechRecognizer; Windows::ApplicationModel::Resources::Core::ResourceContext^ speechContext; Windows::ApplicationModel::Resources::Core::ResourceMap^ speechResourceMap; + bool isPopulatingLanguages = false; void ContinuousRecognize_Click(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e); void InitializeRecognizer(Windows::Globalization::Language^ recognizerLanguage); void PopulateLanguageDropdown(); - Windows::Foundation::EventRegistrationToken stateChangedToken; Windows::Foundation::EventRegistrationToken continuousRecognitionCompletedToken; Windows::Foundation::EventRegistrationToken continuousRecognitionResultGeneratedToken; - Windows::Foundation::EventRegistrationToken cbLanguageSelectionSelectionChangedToken; void SpeechRecognizer_StateChanged(Windows::Media::SpeechRecognition::SpeechRecognizer ^sender, Windows::Media::SpeechRecognition::SpeechRecognizerStateChangedEventArgs ^args); void ContinuousRecognitionSession_Completed(Windows::Media::SpeechRecognition::SpeechContinuousRecognitionSession ^sender, Windows::Media::SpeechRecognition::SpeechContinuousRecognitionCompletedEventArgs ^args); diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionSRGSGrammar.xaml.cpp b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionSRGSGrammar.xaml.cpp index 111ad4fe08..00db0196f6 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionSRGSGrammar.xaml.cpp +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionSRGSGrammar.xaml.cpp @@ -454,7 +454,7 @@ Windows::UI::Color Scenario_ContinuousRecognitionSRGSGrammar::getColor(Platform: void Scenario_ContinuousRecognitionSRGSGrammar::PopulateLanguageDropdown() { // disable callback temporarily. - cbLanguageSelection->SelectionChanged -= cbLanguageSelectionSelectionChangedToken; + isPopulatingLanguages = true; Windows::Globalization::Language^ defaultLanguage = SpeechRecognizer::SystemSpeechLanguage; auto supportedLanguages = SpeechRecognizer::SupportedGrammarLanguages; @@ -471,9 +471,8 @@ void Scenario_ContinuousRecognitionSRGSGrammar::PopulateLanguageDropdown() cbLanguageSelection->SelectedItem = item; } }); - - cbLanguageSelectionSelectionChangedToken = cbLanguageSelection->SelectionChanged += - ref new SelectionChangedEventHandler(this, &Scenario_ContinuousRecognitionSRGSGrammar::cbLanguageSelection_SelectionChanged); + + isPopulatingLanguages = false; } /// @@ -481,6 +480,11 @@ void Scenario_ContinuousRecognitionSRGSGrammar::PopulateLanguageDropdown() /// void Scenario_ContinuousRecognitionSRGSGrammar::cbLanguageSelection_SelectionChanged(Object^ sender, SelectionChangedEventArgs^ e) { + if (isPopulatingLanguages) + { + return; + } + ComboBoxItem^ item = (ComboBoxItem^)(cbLanguageSelection->SelectedItem); Windows::Globalization::Language^ newLanguage = (Windows::Globalization::Language^)item->Tag; diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionSRGSGrammar.xaml.h b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionSRGSGrammar.xaml.h index c2026fa5e8..cc217bbc84 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionSRGSGrammar.xaml.h +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousRecognitionSRGSGrammar.xaml.h @@ -45,6 +45,7 @@ namespace SDKTemplate Windows::Foundation::Collections::IMap^ colorLookup; Windows::ApplicationModel::Resources::Core::ResourceContext^ speechContext; Windows::ApplicationModel::Resources::Core::ResourceMap^ speechResourceMap; + bool isPopulatingLanguages = false; void ContinuousRecognize_Click(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e); @@ -54,8 +55,6 @@ namespace SDKTemplate Windows::Foundation::EventRegistrationToken stateChangedToken; Windows::Foundation::EventRegistrationToken continuousRecognitionCompletedToken; Windows::Foundation::EventRegistrationToken continuousRecognitionResultGeneratedToken; - Windows::Foundation::EventRegistrationToken cbLanguageSelectionSelectionChangedToken; - void SpeechRecognizer_StateChanged(Windows::Media::SpeechRecognition::SpeechRecognizer ^sender, Windows::Media::SpeechRecognition::SpeechRecognizerStateChangedEventArgs ^args); void ContinuousRecognitionSession_Completed(Windows::Media::SpeechRecognition::SpeechContinuousRecognitionSession ^sender, Windows::Media::SpeechRecognition::SpeechContinuousRecognitionCompletedEventArgs ^args); diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ListConstraint.xaml.cpp b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ListConstraint.xaml.cpp index e3d0ce8092..cb3f984f79 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ListConstraint.xaml.cpp +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ListConstraint.xaml.cpp @@ -378,11 +378,11 @@ void Scenario_ListConstraint::SpeechRecognizer_StateChanged(SpeechRecognizer ^se void Scenario_ListConstraint::PopulateLanguageDropdown() { // disable callback temporarily. - cbLanguageSelection->SelectionChanged -= cbLanguageSelectionSelectionChangedToken; + isPopulatingLanguages = true; Windows::Globalization::Language^ defaultLanguage = SpeechRecognizer::SystemSpeechLanguage; auto supportedLanguages = SpeechRecognizer::SupportedGrammarLanguages; - std::for_each(begin(supportedLanguages), end(supportedLanguages), [&](Windows::Globalization::Language^ lang) + for (Windows::Globalization::Language^ lang : supportedLanguages) { ComboBoxItem^ item = ref new ComboBoxItem(); item->Tag = lang; @@ -394,10 +394,9 @@ void Scenario_ListConstraint::PopulateLanguageDropdown() item->IsSelected = true; cbLanguageSelection->SelectedItem = item; } - }); + } - cbLanguageSelectionSelectionChangedToken = cbLanguageSelection->SelectionChanged += - ref new SelectionChangedEventHandler(this, &Scenario_ListConstraint::cbLanguageSelection_SelectionChanged); + isPopulatingLanguages = false; } @@ -406,6 +405,11 @@ void Scenario_ListConstraint::PopulateLanguageDropdown() /// void Scenario_ListConstraint::cbLanguageSelection_SelectionChanged(Object^ sender, SelectionChangedEventArgs^ e) { + if (isPopulatingLanguages) + { + return; + } + ComboBoxItem^ item = (ComboBoxItem^)(cbLanguageSelection->SelectedItem); Windows::Globalization::Language^ newLanguage = (Windows::Globalization::Language^)item->Tag; diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ListConstraint.xaml.h b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ListConstraint.xaml.h index 995cce1109..f508b7a4eb 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ListConstraint.xaml.h +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ListConstraint.xaml.h @@ -44,6 +44,7 @@ namespace SDKTemplate Windows::Media::SpeechRecognition::SpeechRecognizer^ speechRecognizer; Windows::ApplicationModel::Resources::Core::ResourceContext^ speechContext; Windows::ApplicationModel::Resources::Core::ResourceMap^ speechResourceMap; + bool isPopulatingLanguages = false; void RecognizeWithUIListConstraint_Click(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e); void RecognizeWithoutUIListConstraint_Click(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e); @@ -52,7 +53,6 @@ namespace SDKTemplate void PopulateLanguageDropdown(); Windows::Foundation::EventRegistrationToken stateChangedToken; - Windows::Foundation::EventRegistrationToken cbLanguageSelectionSelectionChangedToken; void SpeechRecognizer_StateChanged(Windows::Media::SpeechRecognition::SpeechRecognizer ^sender, Windows::Media::SpeechRecognition::SpeechRecognizerStateChangedEventArgs ^args); void cbLanguageSelection_SelectionChanged(Platform::Object^ sender, Windows::UI::Xaml::Controls::SelectionChangedEventArgs^ e); diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SRGSConstraint.xaml.cpp b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SRGSConstraint.xaml.cpp index 7661bab18f..073f385c6e 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SRGSConstraint.xaml.cpp +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SRGSConstraint.xaml.cpp @@ -445,11 +445,11 @@ Windows::UI::Color Scenario_SRGSConstraint::getColor(Platform::String^ colorStri void Scenario_SRGSConstraint::PopulateLanguageDropdown() { // disable callback temporarily. - cbLanguageSelection->SelectionChanged -= cbLanguageSelectionSelectionChangedToken; + isPopulatingLanguages = true; Windows::Globalization::Language^ defaultLanguage = SpeechRecognizer::SystemSpeechLanguage; auto supportedLanguages = SpeechRecognizer::SupportedGrammarLanguages; - std::for_each(begin(supportedLanguages), end(supportedLanguages), [&](Windows::Globalization::Language^ lang) + for (Windows::Globalization::Language^ lang : supportedLanguages) { ComboBoxItem^ item = ref new ComboBoxItem(); item->Tag = lang; @@ -461,9 +461,9 @@ void Scenario_SRGSConstraint::PopulateLanguageDropdown() item->IsSelected = true; cbLanguageSelection->SelectedItem = item; } - }); - cbLanguageSelectionSelectionChangedToken = cbLanguageSelection->SelectionChanged += - ref new SelectionChangedEventHandler(this, &Scenario_SRGSConstraint::cbLanguageSelection_SelectionChanged); + } + + isPopulatingLanguages = false; } @@ -472,6 +472,11 @@ void Scenario_SRGSConstraint::PopulateLanguageDropdown() /// void Scenario_SRGSConstraint::cbLanguageSelection_SelectionChanged(Object^ sender, SelectionChangedEventArgs^ e) { + if (isPopulatingLanguages) + { + return; + } + ComboBoxItem^ item = (ComboBoxItem^)(cbLanguageSelection->SelectedItem); Windows::Globalization::Language^ newLanguage = (Windows::Globalization::Language^)item->Tag; diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SRGSConstraint.xaml.h b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SRGSConstraint.xaml.h index 1977f4ae80..9f0c70da96 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SRGSConstraint.xaml.h +++ b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SRGSConstraint.xaml.h @@ -44,6 +44,7 @@ namespace SDKTemplate Windows::Foundation::Collections::IMap^ colorLookup; Windows::ApplicationModel::Resources::Core::ResourceContext^ speechContext; Windows::ApplicationModel::Resources::Core::ResourceMap^ speechResourceMap; + bool isPopulatingLanguages = false; void RecognizeWithUI_Click(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e); void RecognizeWithoutUI_Click(Platform::Object^ sender, Windows::UI::Xaml::RoutedEventArgs^ e); @@ -55,7 +56,6 @@ namespace SDKTemplate Windows::UI::Color getColor(Platform::String^ colorString); Windows::Foundation::EventRegistrationToken stateChangedToken; - Windows::Foundation::EventRegistrationToken cbLanguageSelectionSelectionChangedToken; void SpeechRecognizer_StateChanged(Windows::Media::SpeechRecognition::SpeechRecognizer ^sender, Windows::Media::SpeechRecognition::SpeechRecognizerStateChangedEventArgs ^args); void cbLanguageSelection_SelectionChanged(Platform::Object^ sender, Windows::UI::Xaml::Controls::SelectionChangedEventArgs^ e); diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SynthesizeSSML.xaml b/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SynthesizeSSML.xaml deleted file mode 100644 index ac02474d36..0000000000 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_SynthesizeSSML.xaml +++ /dev/null @@ -1,73 +0,0 @@ - - - - - - - - - - - - - - - This sample showcases speech synthesis using WinRT APIs to convert SSML to speech. - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/ContinuousRecognitionSRGSConstraintScenario.xaml b/Samples/SpeechRecognitionAndSynthesis/cs/ContinuousRecognitionSRGSConstraintScenario.xaml deleted file mode 100644 index 2a0c408e5c..0000000000 --- a/Samples/SpeechRecognitionAndSynthesis/cs/ContinuousRecognitionSRGSConstraintScenario.xaml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - This sample showcases continuous recognition using an SRGS/GRXML grammar for asynchronous voice commands. - - - - - - - - - - - - - - - - - - - - - - - diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/ListConstraintScenario.xaml b/Samples/SpeechRecognitionAndSynthesis/cs/ListConstraintScenario.xaml deleted file mode 100644 index 3a425575aa..0000000000 --- a/Samples/SpeechRecognitionAndSynthesis/cs/ListConstraintScenario.xaml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - - - - - This sample showcases one-shot speech recognition using a custom list-based grammar. - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/Package.appxmanifest b/Samples/SpeechRecognitionAndSynthesis/cs/Package.appxmanifest index d17e094c0b..2631854b85 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/Package.appxmanifest +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Package.appxmanifest @@ -21,7 +21,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/PauseAsyncScenario.xaml b/Samples/SpeechRecognitionAndSynthesis/cs/PauseAsyncScenario.xaml deleted file mode 100644 index 0a1921f73d..0000000000 --- a/Samples/SpeechRecognitionAndSynthesis/cs/PauseAsyncScenario.xaml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - This sample showcases how to switch grammars during a continuous recognition session. - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedDictationGrammarScenario.xaml b/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedDictationGrammarScenario.xaml deleted file mode 100644 index 071cf85f65..0000000000 --- a/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedDictationGrammarScenario.xaml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - This sample showcases one-shot speech recognition using the predefined dictation grammar. - - - - - - - - - - - - - - - - - The speech recognition privacy settings have not been accepted. Open Privacy Settings to review the privacy policy and enable personalization. - - - - - - - - diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedWebSearchGrammarScenario.xaml b/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedWebSearchGrammarScenario.xaml deleted file mode 100644 index 5f97293748..0000000000 --- a/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedWebSearchGrammarScenario.xaml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - This sample showcases one-shot speech recognition using the predefined web search grammar. - - - - - - - - - - - - - - - - - The speech recognition privacy settings have not been accepted. Open Privacy Settings to review the privacy policy and enable personalization. - - - - - - - - diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/SRGSConstraintScenario.xaml b/Samples/SpeechRecognitionAndSynthesis/cs/SRGSConstraintScenario.xaml deleted file mode 100644 index 7b105f3fcd..0000000000 --- a/Samples/SpeechRecognitionAndSynthesis/cs/SRGSConstraintScenario.xaml +++ /dev/null @@ -1,71 +0,0 @@ - - - - - - - - - - - - - - This sample showcases one-shot speech recognition using a custom SRGS/GRXML grammar. - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/SampleConfiguration.cs b/Samples/SpeechRecognitionAndSynthesis/cs/SampleConfiguration.cs index c8a677f3c0..403097ed03 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/SampleConfiguration.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/SampleConfiguration.cs @@ -12,7 +12,7 @@ using System; using System.Collections.Generic; using Windows.UI.Xaml.Controls; -using SpeechAndTTS; +using SDKTemplate; namespace SDKTemplate { @@ -22,16 +22,17 @@ public partial class MainPage : Page List scenarios = new List { - new Scenario() { Title="Synthesize Text", ClassType=typeof(SynthesizeTextScenario)}, - new Scenario() { Title="Synthesize SSML", ClassType=typeof(SynthesizeSSMLScenario)}, - new Scenario() { Title="Predefined Dictation Grammar", ClassType=typeof(PredefinedDictationGrammarScenario)}, - new Scenario() { Title="Predefined WebSearch Grammar", ClassType=typeof(PredefinedWebSearchGrammarScenario)}, - new Scenario() { Title="Custom List Constraint", ClassType=typeof(ListConstraintScenario)}, - new Scenario() { Title="Custom SRGS Constraint", ClassType=typeof(SRGSConstraintScenario)}, - new Scenario() { Title="Continuous Dictation", ClassType=typeof(ContinuousDictationScenario)}, - new Scenario() { Title="Continuous List Commands", ClassType=typeof(ContinuousRecoListGrammarScenario)}, - new Scenario() { Title="Continuous SRGS Commands", ClassType=typeof(ContinuousRecoSRGSConstraintScenario)}, - new Scenario() { Title="PauseAsync to Change Grammar", ClassType=typeof(PauseAsyncScenario)} + new Scenario() { Title="Synthesize Text", ClassType=typeof(Scenario_SynthesizeText)}, + new Scenario() { Title="Synthesize Text with Boundaries", ClassType=typeof(Scenario_SynthesizeTextBoundaries)}, + new Scenario() { Title="Synthesize SSML", ClassType=typeof(Scenario_SynthesizeSSML)}, + new Scenario() { Title="Predefined Dictation Grammar", ClassType=typeof(Scenario_PredefinedDictationGrammar)}, + new Scenario() { Title="Predefined WebSearch Grammar", ClassType=typeof(Scenario_PredefinedWebSearchGrammar)}, + new Scenario() { Title="Custom List Constraint", ClassType=typeof(Scenario_ListConstraint)}, + new Scenario() { Title="Custom SRGS Constraint", ClassType=typeof(Scenario_SRGSConstraint)}, + new Scenario() { Title="Continuous Dictation", ClassType=typeof(Scenario_ContinuousDictation)}, + new Scenario() { Title="Continuous List Commands", ClassType=typeof(Scenario_ContinuousRecognitionListGrammar)}, + new Scenario() { Title="Continuous SRGS Commands", ClassType=typeof(Scenario_ContinuousRecognitionSRGSGrammar)}, + new Scenario() { Title="PauseAsync to Change Grammar", ClassType=typeof(Scenario_PauseAsync)} }; } diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/ContinuousDictationScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ContinuousDictation.xaml.cs similarity index 95% rename from Samples/SpeechRecognitionAndSynthesis/cs/ContinuousDictationScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ContinuousDictation.xaml.cs index a91cb60698..2fdda5caf7 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/ContinuousDictationScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ContinuousDictation.xaml.cs @@ -23,17 +23,13 @@ using Windows.UI.Xaml.Documents; using System.Threading.Tasks; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class ContinuousDictationScenario : Page + public sealed partial class Scenario_ContinuousDictation : Page { // Reference to the main sample page in order to post status messages. private MainPage rootPage; - // Speech events may come in on a thread other than the UI thread, keep track of the UI thread's - // dispatcher, so we can update the UI in a thread-safe manner. - private CoreDispatcher dispatcher; - // The speech recognizer used throughout this sample. private SpeechRecognizer speechRecognizer; @@ -51,7 +47,7 @@ public sealed partial class ContinuousDictationScenario : Page /// private static uint HResultPrivacyStatementDeclined = 0x80045509; - public ContinuousDictationScenario() + public Scenario_ContinuousDictation() { this.InitializeComponent(); isListening = false; @@ -70,9 +66,6 @@ protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; - // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. - dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; - // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); @@ -231,7 +224,7 @@ private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecogn // With dictation (no grammar in place) modes, the default timeout is 20 seconds. if (args.Status == SpeechRecognitionResultStatus.TimeoutExceeded) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { rootPage.NotifyUser("Automatic Time Out of Dictation", NotifyType.StatusMessage); DictationButtonText.Text = " Dictate"; @@ -242,7 +235,7 @@ await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => } else { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { rootPage.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(), NotifyType.StatusMessage); DictationButtonText.Text = " Dictate"; @@ -264,7 +257,7 @@ private async void SpeechRecognizer_HypothesisGenerated(SpeechRecognizer sender, // Update the textbox with the currently confirmed text, and the hypothesis combined. string textboxContent = dictatedTextBuilder.ToString() + " " + hypothesis + " ..."; - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { dictationTextBox.Text = textboxContent; btnClearText.IsEnabled = true; @@ -287,7 +280,7 @@ private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuous { dictatedTextBuilder.Append(args.Result.Text + " "); - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { discardedTextBlock.Visibility = Windows.UI.Xaml.Visibility.Collapsed; @@ -300,7 +293,7 @@ await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech // is not the primary input mechanism for the application. // Here, just remove any hypothesis text by resetting it to the last known good. - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { dictationTextBox.Text = dictatedTextBuilder.ToString(); string discardedText = args.Result.Text; @@ -322,7 +315,7 @@ await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => /// The current state of the recognizer. private async void SpeechRecognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { rootPage.NotifyUser(args.State.ToString(), NotifyType.StatusMessage); }); } diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/ContinuousRecognitionListGrammarScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ContinuousRecognitionListGrammar.xaml.cs similarity index 95% rename from Samples/SpeechRecognitionAndSynthesis/cs/ContinuousRecognitionListGrammarScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ContinuousRecognitionListGrammar.xaml.cs index f2a3eb92a3..d16994d236 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/ContinuousRecognitionListGrammarScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ContinuousRecognitionListGrammar.xaml.cs @@ -21,17 +21,13 @@ using Windows.UI.Xaml.Controls; using Windows.UI.Xaml.Navigation; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class ContinuousRecoListGrammarScenario : Page + public sealed partial class Scenario_ContinuousRecognitionListGrammar : Page { // Reference to the main sample page in order to post status messages. private MainPage rootPage; - // Speech events may come in on a thread other than the UI thread, keep track of the UI thread's - // dispatcher, so we can update the UI in a thread-safe manner. - private CoreDispatcher dispatcher; - // The speech recognizer used throughout this sample. private SpeechRecognizer speechRecognizer; @@ -45,10 +41,12 @@ public sealed partial class ContinuousRecoListGrammarScenario : Page private ResourceContext speechContext; private ResourceMap speechResourceMap; + private bool isPopulatingLanguages = false; + // Keep track of whether the continuous recognizer is currently running, so it can be cleaned up appropriately. private bool isListening; - public ContinuousRecoListGrammarScenario() + public Scenario_ContinuousRecognitionListGrammar() { this.InitializeComponent(); isListening = false; @@ -66,9 +64,6 @@ protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; - // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. - dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; - // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); @@ -104,7 +99,8 @@ private void PopulateLanguageDropdown() { // disable the callback so we don't accidentally trigger initialization of the recognizer // while initialization is already in progress. - cbLanguageSelection.SelectionChanged -= cbLanguageSelection_SelectionChanged; + isPopulatingLanguages = true; + Language defaultLanguage = SpeechRecognizer.SystemSpeechLanguage; IEnumerable supportedLanguages = SpeechRecognizer.SupportedGrammarLanguages; foreach (Language lang in supportedLanguages) @@ -120,8 +116,8 @@ private void PopulateLanguageDropdown() cbLanguageSelection.SelectedItem = item; } } - - cbLanguageSelection.SelectionChanged += cbLanguageSelection_SelectionChanged; + + isPopulatingLanguages = false; } /// @@ -132,6 +128,11 @@ private void PopulateLanguageDropdown() /// Ignored private async void cbLanguageSelection_SelectionChanged(object sender, SelectionChangedEventArgs e) { + if (isPopulatingLanguages) + { + return; + } + ComboBoxItem item = (ComboBoxItem)(cbLanguageSelection.SelectedItem); Language newLanguage = (Language)item.Tag; if (speechRecognizer != null) @@ -310,7 +311,7 @@ private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecogn { if (args.Status != SpeechRecognitionResultStatus.Success) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { rootPage.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(), NotifyType.StatusMessage); ContinuousRecoButtonText.Text = " Continuous Recognition"; @@ -342,7 +343,7 @@ private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuous if (args.Result.Confidence == SpeechRecognitionConfidence.Medium || args.Result.Confidence == SpeechRecognitionConfidence.High) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { heardYouSayTextBlock.Visibility = Visibility.Visible; resultTextBlock.Visibility = Visibility.Visible; @@ -353,7 +354,7 @@ await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech // is not the primary input mechanism for the application. - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { heardYouSayTextBlock.Visibility = Visibility.Collapsed; resultTextBlock.Visibility = Visibility.Visible; @@ -369,7 +370,7 @@ await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => /// The current state of the recognizer. private async void SpeechRecognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { rootPage.NotifyUser(args.State.ToString(), NotifyType.StatusMessage); }); } diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/ContinuousRecognitionSRGSConstraintScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ContinuousRecognitionSRGSGrammar.xaml.cs similarity index 96% rename from Samples/SpeechRecognitionAndSynthesis/cs/ContinuousRecognitionSRGSConstraintScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ContinuousRecognitionSRGSGrammar.xaml.cs index 3c5e18196a..3266b23087 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/ContinuousRecognitionSRGSConstraintScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ContinuousRecognitionSRGSGrammar.xaml.cs @@ -25,17 +25,13 @@ using Windows.UI.Xaml.Media; using Windows.UI.Xaml.Navigation; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class ContinuousRecoSRGSConstraintScenario : Page + public sealed partial class Scenario_ContinuousRecognitionSRGSGrammar : Page { // Reference to the main sample page in order to post status messages. private MainPage rootPage; - // Speech events may come in on a thread other than the UI thread, keep track of the UI thread's - // dispatcher, so we can update the UI in a thread-safe manner. - private CoreDispatcher dispatcher; - /// /// the HResult 0x8004503a typically represents the case where a recognizer for a particular language cannot /// be found. This may occur if the language is installed, but the speech pack for that language is not. @@ -49,6 +45,8 @@ public sealed partial class ContinuousRecoSRGSConstraintScenario : Page private ResourceContext speechContext; private ResourceMap speechResourceMap; + private bool isPopulatingLanguages = false; + private Dictionary colorLookup = new Dictionary { { "COLOR_RED", Colors.Red }, {"COLOR_BLUE", Colors.Blue }, {"COLOR_BLACK", Colors.Black}, @@ -57,7 +55,7 @@ public sealed partial class ContinuousRecoSRGSConstraintScenario : Page { "COLOR_ORANGE",Colors.Orange}, {"COLOR_GRAY", Colors.Gray}, {"COLOR_WHITE", Colors.White} }; - public ContinuousRecoSRGSConstraintScenario() + public Scenario_ContinuousRecognitionSRGSGrammar() { InitializeComponent(); } @@ -74,9 +72,6 @@ protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; - // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. - dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; - // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); @@ -113,7 +108,8 @@ private void PopulateLanguageDropdown() { // disable the callback so we don't accidentally trigger initialization of the recognizer // while initialization is already in progress. - cbLanguageSelection.SelectionChanged -= cbLanguageSelection_SelectionChanged; + isPopulatingLanguages = true; + Language defaultLanguage = SpeechRecognizer.SystemSpeechLanguage; IEnumerable supportedLanguages = SpeechRecognizer.SupportedGrammarLanguages; foreach (Language lang in supportedLanguages) @@ -129,7 +125,7 @@ private void PopulateLanguageDropdown() cbLanguageSelection.SelectedItem = item; } } - cbLanguageSelection.SelectionChanged += cbLanguageSelection_SelectionChanged; + isPopulatingLanguages = false; } /// @@ -140,6 +136,11 @@ private void PopulateLanguageDropdown() /// Ignored private async void cbLanguageSelection_SelectionChanged(object sender, SelectionChangedEventArgs e) { + if (isPopulatingLanguages) + { + return; + } + btnContinuousRecognize.IsEnabled = false; ComboBoxItem item = (ComboBoxItem)(cbLanguageSelection.SelectedItem); Language newLanguage = (Language)item.Tag; @@ -334,7 +335,7 @@ public async void ContinuousRecognize_Click(object sender, RoutedEventArgs e) /// The state of the recognizer private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecognitionSession sender, SpeechContinuousRecognitionCompletedEventArgs args) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { rootPage.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(), NotifyType.StatusMessage); ContinuousRecoButtonText.Text = " Continuous Recognition"; @@ -356,7 +357,7 @@ private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuous if (args.Result.Confidence == SpeechRecognitionConfidence.Medium || args.Result.Confidence == SpeechRecognitionConfidence.High) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { HandleRecognitionResult(args.Result); }); @@ -367,7 +368,7 @@ await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech // is not the primary input mechanism for the application. - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { resultTextBlock.Text = speechResourceMap.GetValue("SRGSGarbagePromptText", speechContext).ValueAsString; }); @@ -381,7 +382,7 @@ await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => /// The current state of the recognizer. private async void SpeechRecognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { rootPage.NotifyUser(args.State.ToString(), NotifyType.StatusMessage); }); diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/ListConstraintScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ListConstraint.xaml.cs similarity index 97% rename from Samples/SpeechRecognitionAndSynthesis/cs/ListConstraintScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ListConstraint.xaml.cs index c6e7e918c1..a141820d42 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/ListConstraintScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_ListConstraint.xaml.cs @@ -25,9 +25,9 @@ using Windows.UI.Xaml.Controls; using Windows.UI.Xaml.Navigation; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class ListConstraintScenario : Page + public sealed partial class Scenario_ListConstraint : Page { /// /// This HResult represents the scenario where a user is prompted to allow in-app speech, but @@ -44,12 +44,12 @@ public sealed partial class ListConstraintScenario : Page private static uint HResultRecognizerNotFound = 0x8004503a; private SpeechRecognizer speechRecognizer; - private CoreDispatcher dispatcher; private ResourceContext speechContext; private ResourceMap speechResourceMap; + private bool isPopulatingLanguages = false; private IAsyncOperation recognitionOperation; - public ListConstraintScenario() + public Scenario_ListConstraint() { InitializeComponent(); } @@ -62,9 +62,6 @@ public ListConstraintScenario() /// The navigation event details protected async override void OnNavigatedTo(NavigationEventArgs e) { - // Save the UI thread dispatcher to allow speech status messages to be shown on the UI. - dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; - bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { @@ -100,7 +97,8 @@ private void PopulateLanguageDropdown() { // disable the callback so we don't accidentally trigger initialization of the recognizer // while initialization is already in progress. - cbLanguageSelection.SelectionChanged -= cbLanguageSelection_SelectionChanged; + isPopulatingLanguages = true; + Language defaultLanguage = SpeechRecognizer.SystemSpeechLanguage; IEnumerable supportedLanguages = SpeechRecognizer.SupportedGrammarLanguages; foreach(Language lang in supportedLanguages) @@ -116,7 +114,7 @@ private void PopulateLanguageDropdown() cbLanguageSelection.SelectedItem = item; } } - cbLanguageSelection.SelectionChanged += cbLanguageSelection_SelectionChanged; + isPopulatingLanguages = false; } /// @@ -127,6 +125,11 @@ private void PopulateLanguageDropdown() /// Ignored private async void cbLanguageSelection_SelectionChanged(object sender, SelectionChangedEventArgs e) { + if (isPopulatingLanguages) + { + return; + } + ComboBoxItem item = (ComboBoxItem)(cbLanguageSelection.SelectedItem); Language newLanguage = (Language)item.Tag; if (speechRecognizer != null) @@ -301,7 +304,7 @@ private async Task InitializeRecognizer(Language recognizerLanguage) /// The recognizer's status private async void SpeechRecognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { MainPage.Current.NotifyUser("Speech recognizer state: " + args.State.ToString(), NotifyType.StatusMessage); }); diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/PauseAsyncScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_PauseAsync.xaml.cs similarity index 96% rename from Samples/SpeechRecognitionAndSynthesis/cs/PauseAsyncScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_PauseAsync.xaml.cs index 6bbab8d8fd..cf4fd02c24 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/PauseAsyncScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_PauseAsync.xaml.cs @@ -19,17 +19,13 @@ using Windows.UI.Xaml.Controls; using Windows.UI.Xaml.Navigation; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class PauseAsyncScenario : Page + public sealed partial class Scenario_PauseAsync : Page { // Reference to the main sample page in order to post status messages. private MainPage rootPage; - // Speech events may come in on a thread other than the UI thread, keep track of the UI thread's - // dispatcher, so we can update the UI in a thread-safe manner. - private CoreDispatcher dispatcher; - // The speech recognizer used throughout this sample. private SpeechRecognizer speechRecognizer; @@ -40,7 +36,7 @@ public sealed partial class PauseAsyncScenario : Page private SpeechRecognitionListConstraint emailConstraint; private SpeechRecognitionListConstraint phoneConstraint; - public PauseAsyncScenario() + public Scenario_PauseAsync() { InitializeComponent(); isListening = false; @@ -58,9 +54,6 @@ protected async override void OnNavigatedTo(NavigationEventArgs e) { rootPage = MainPage.Current; - // Keep track of the UI thread dispatcher, as speech events will come in on a separate thread. - dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; - // Prompt the user for permission to access the microphone. This request will only happen // once, it will not re-prompt if the user rejects the permission. bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); @@ -163,7 +156,7 @@ private async void ContinuousRecognitionSession_Completed(SpeechContinuousRecogn { if (args.Status != SpeechRecognitionResultStatus.Success) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { rootPage.NotifyUser("Continuous Recognition Completed: " + args.Status.ToString(), NotifyType.StatusMessage); recognizeButtonText.Text = " Continuous Recognition"; @@ -196,7 +189,7 @@ private async void ContinuousRecognitionSession_ResultGenerated(SpeechContinuous if (args.Result.Confidence == SpeechRecognitionConfidence.Medium || args.Result.Confidence == SpeechRecognitionConfidence.High) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { resultTextBlock.Text = string.Format("Heard: '{0}', (Tag: '{1}', Confidence: {2})", args.Result.Text, tag, args.Result.Confidence.ToString()); }); @@ -205,7 +198,7 @@ await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { // In some scenarios, a developer may choose to ignore giving the user feedback in this case, if speech // is not the primary input mechanism for the application. - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { resultTextBlock.Text = string.Format("Sorry, I didn't catch that. (Heard: '{0}', Tag: {1}, Confidence: {2})", args.Result.Text, tag, args.Result.Confidence.ToString()); }); @@ -219,7 +212,7 @@ await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => /// The current state of the recognizer. private async void SpeechRecognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { rootPage.NotifyUser(args.State.ToString(), NotifyType.StatusMessage); }); diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedDictationGrammarScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_PredefinedDictationGrammar.xaml.cs similarity index 97% rename from Samples/SpeechRecognitionAndSynthesis/cs/PredefinedDictationGrammarScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_PredefinedDictationGrammar.xaml.cs index 53a11f85d3..f9420299fb 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedDictationGrammarScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_PredefinedDictationGrammar.xaml.cs @@ -23,9 +23,9 @@ using Windows.UI.Xaml.Navigation; using Windows.Foundation; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class PredefinedDictationGrammarScenario : Page + public sealed partial class Scenario_PredefinedDictationGrammar : Page { /// /// This HResult represents the scenario where a user is prompted to allow in-app speech, but @@ -35,12 +35,11 @@ public sealed partial class PredefinedDictationGrammarScenario : Page private static uint HResultPrivacyStatementDeclined = 0x80045509; private SpeechRecognizer speechRecognizer; - private CoreDispatcher dispatcher; private IAsyncOperation recognitionOperation; private ResourceContext speechContext; private ResourceMap speechResourceMap; - public PredefinedDictationGrammarScenario() + public Scenario_PredefinedDictationGrammar() { InitializeComponent(); } @@ -53,9 +52,6 @@ public PredefinedDictationGrammarScenario() /// The navigation event details protected async override void OnNavigatedTo(NavigationEventArgs e) { - // Save the UI thread dispatcher to allow speech status messages to be shown on the UI. - dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; - bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { @@ -213,7 +209,7 @@ private async Task InitializeRecognizer(Language recognizerLanguage) /// The recognizer's status private async void SpeechRecognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { MainPage.Current.NotifyUser("Speech recognizer state: " + args.State.ToString(), NotifyType.StatusMessage); }); diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedWebSearchGrammarScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_PredefinedWebSearchGrammar.xaml.cs similarity index 97% rename from Samples/SpeechRecognitionAndSynthesis/cs/PredefinedWebSearchGrammarScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_PredefinedWebSearchGrammar.xaml.cs index 665e062ca0..5f269803d4 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/PredefinedWebSearchGrammarScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_PredefinedWebSearchGrammar.xaml.cs @@ -23,9 +23,9 @@ using Windows.Foundation; using Windows.ApplicationModel.Resources.Core; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class PredefinedWebSearchGrammarScenario : Page + public sealed partial class Scenario_PredefinedWebSearchGrammar : Page { /// /// This HResult represents the scenario where a user is prompted to allow in-app speech, but @@ -35,12 +35,11 @@ public sealed partial class PredefinedWebSearchGrammarScenario : Page private static uint HResultPrivacyStatementDeclined = 0x80045509; private SpeechRecognizer speechRecognizer; - private CoreDispatcher dispatcher; private IAsyncOperation recognitionOperation; private ResourceContext speechContext; private ResourceMap speechResourceMap; - public PredefinedWebSearchGrammarScenario() + public Scenario_PredefinedWebSearchGrammar() { InitializeComponent(); } @@ -53,9 +52,6 @@ public PredefinedWebSearchGrammarScenario() /// The navigation event details protected async override void OnNavigatedTo(NavigationEventArgs e) { - // Save the UI thread dispatcher to allow speech status messages to be shown on the UI. - dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; - bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { @@ -183,7 +179,7 @@ private async Task InitializeRecognizer(Language recognizerLanguage) /// The recognizer's status private async void SpeechRecognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { MainPage.Current.NotifyUser("Speech recognizer state: " + args.State.ToString(), NotifyType.StatusMessage); }); diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/SRGSConstraintScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SRGSConstraint.xaml.cs similarity index 97% rename from Samples/SpeechRecognitionAndSynthesis/cs/SRGSConstraintScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SRGSConstraint.xaml.cs index 76bcb6eeea..6db638ad4d 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/SRGSConstraintScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SRGSConstraint.xaml.cs @@ -26,9 +26,9 @@ using Windows.UI.Xaml.Navigation; using Windows.Foundation; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class SRGSConstraintScenario : Page + public sealed partial class Scenario_SRGSConstraint : Page { /// /// This HResult represents the scenario where a user is prompted to allow in-app speech, but @@ -46,9 +46,9 @@ public sealed partial class SRGSConstraintScenario : Page private SpeechRecognizer speechRecognizer; private IAsyncOperation recognitionOperation; - private CoreDispatcher dispatcher; private ResourceContext speechContext; private ResourceMap speechResourceMap; + private bool isPopulatingLanguages = false; private Dictionary colorLookup = new Dictionary { @@ -59,7 +59,7 @@ public sealed partial class SRGSConstraintScenario : Page }; - public SRGSConstraintScenario() + public Scenario_SRGSConstraint() { InitializeComponent(); } @@ -72,9 +72,6 @@ public SRGSConstraintScenario() /// The navigation event details protected async override void OnNavigatedTo(NavigationEventArgs e) { - // Save the UI thread dispatcher to allow speech status messages to be shown on the UI. - dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; - bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { @@ -112,7 +109,8 @@ private void PopulateLanguageDropdown() // disable the callback so we don't accidentally trigger initialization of the recognizer // while initialization is already in progress. - cbLanguageSelection.SelectionChanged -= cbLanguageSelection_SelectionChanged; + isPopulatingLanguages = true; + Language defaultLanguage = SpeechRecognizer.SystemSpeechLanguage; IEnumerable supportedLanguages = SpeechRecognizer.SupportedGrammarLanguages; foreach (Language lang in supportedLanguages) @@ -128,8 +126,8 @@ private void PopulateLanguageDropdown() cbLanguageSelection.SelectedItem = item; } } - - cbLanguageSelection.SelectionChanged += cbLanguageSelection_SelectionChanged; + + isPopulatingLanguages = false; } /// @@ -140,6 +138,11 @@ private void PopulateLanguageDropdown() /// Ignored private async void cbLanguageSelection_SelectionChanged(object sender, SelectionChangedEventArgs e) { + if (isPopulatingLanguages) + { + return; + } + ComboBoxItem item = (ComboBoxItem)(cbLanguageSelection.SelectedItem); Language newLanguage = (Language)item.Tag; @@ -279,7 +282,7 @@ private async Task InitializeRecognizer(Language recognizerLanguage) /// The recognizer's status private async void SpeechRecognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { - await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { MainPage.Current.NotifyUser("Speech recognizer state: " + args.State.ToString(), NotifyType.StatusMessage); }); diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/SynthesizeSSMLScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SynthesizeSSML.xaml.cs similarity index 57% rename from Samples/SpeechRecognitionAndSynthesis/cs/SynthesizeSSMLScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SynthesizeSSML.xaml.cs index 6773410ff3..913982f4b8 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/SynthesizeSSMLScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SynthesizeSSML.xaml.cs @@ -13,20 +13,23 @@ using System.Linq; using Windows.ApplicationModel.Resources.Core; using Windows.Data.Xml.Dom; +using Windows.Foundation.Collections; +using Windows.Media.Core; +using Windows.Media.Playback; using Windows.Media.SpeechSynthesis; using Windows.UI.Xaml; using Windows.UI.Xaml.Controls; using Windows.UI.Xaml.Media; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class SynthesizeSSMLScenario : Page + public sealed partial class Scenario_SynthesizeSSML : Page { private SpeechSynthesizer synthesizer; private ResourceContext speechContext; private ResourceMap speechResourceMap; - public SynthesizeSSMLScenario() + public Scenario_SynthesizeSSML() { InitializeComponent(); synthesizer = new SpeechSynthesizer(); @@ -36,6 +39,13 @@ public SynthesizeSSMLScenario() speechResourceMap = ResourceManager.Current.MainResourceMap.GetSubtree("LocalizationTTSResources"); + MediaPlayer player = new MediaPlayer(); + player.AutoPlay = false; + player.MediaEnded += media_MediaEnded; + + media.SetMediaPlayer(player); + media.MediaPlayer.AutoPlay = false; + InitializeListboxVoiceChooser(); UpdateSSMLText(); } @@ -48,9 +58,9 @@ public SynthesizeSSMLScenario() private async void Speak_Click(object sender, RoutedEventArgs e) { // If the media is playing, the user has pressed the button to stop the playback. - if (media.CurrentState.Equals(MediaElementState.Playing)) + if (media.MediaPlayer.PlaybackSession.PlaybackState == MediaPlaybackState.Playing) { - media.Stop(); + media.MediaPlayer.Pause(); btnSpeak.Content = "Speak"; } else @@ -66,10 +76,19 @@ private async void Speak_Click(object sender, RoutedEventArgs e) // Create a stream from the text. This will be played using a media element. SpeechSynthesisStream synthesisStream = await synthesizer.SynthesizeSsmlToStreamAsync(text); - // Set the source and start playing the synthesized audio stream. - media.AutoPlay = true; - media.SetSource(synthesisStream, synthesisStream.ContentType); - media.Play(); + // Create a media source from the stream: + var mediaSource = MediaSource.CreateFromStream(synthesisStream, synthesisStream.ContentType); + + //Create a Media Playback Item + var mediaPlaybackItem = new MediaPlaybackItem(mediaSource); + + // Ensure that the app is notified for marks. + RegisterForMarkEvents(mediaPlaybackItem); + + // Set the source of the MediaElement or MediaPlayerElement to the MediaPlaybackItem + // and start playing the synthesized audio stream. + media.Source = mediaPlaybackItem; + media.MediaPlayer.Play(); } catch (System.IO.FileNotFoundException) { @@ -93,6 +112,87 @@ private async void Speak_Click(object sender, RoutedEventArgs e) } } + /// + /// Register for all mark events + /// + /// The Media PLayback Item add handlers to. + private void RegisterForMarkEvents(MediaPlaybackItem mediaPlaybackItem) + { + //tracks could all be generated at creation + for (int index =0; index < mediaPlaybackItem.TimedMetadataTracks.Count; index++) + { + RegisterMetadataHandlerForMarks(mediaPlaybackItem, index); + } + + // if the tracks are added later we will + // monitor the tracks being added and subscribe to the ones of interest + mediaPlaybackItem.TimedMetadataTracksChanged += (MediaPlaybackItem sender, IVectorChangedEventArgs args) => + { + if (args.CollectionChange == CollectionChange.ItemInserted) + { + RegisterMetadataHandlerForMarks(sender, (int)args.Index); + } + else if (args.CollectionChange == CollectionChange.Reset) + { + for (int index = 0; index < sender.TimedMetadataTracks.Count; index++) + { + RegisterMetadataHandlerForMarks(sender, index); + } + } + }; + } + + /// + /// Register for just word bookmark events. + /// + /// The Media Playback Item to register handlers for. + /// Index of the timedMetadataTrack within the mediaPlaybackItem. + private void RegisterMetadataHandlerForMarks(MediaPlaybackItem mediaPlaybackItem, int index) + { + //make sure we only register for bookmarks + var timedTrack = mediaPlaybackItem.TimedMetadataTracks[index]; + if(timedTrack.Id == "SpeechBookmark") + { + timedTrack.CueEntered += metadata_MarkCueEntered; + mediaPlaybackItem.TimedMetadataTracks.SetPresentationMode((uint)index, TimedMetadataTrackPresentationMode.ApplicationPresented); + + } + } + + /// + /// This function executes when a SpeechCue is hit and calls the functions to update the UI + /// + /// The timedMetadataTrack associated with the event. + /// the arguments associated with the event. + private async void metadata_MarkCueEntered(TimedMetadataTrack timedMetadataTrack, MediaCueEventArgs args) + { + // Check in case there are different tracks and the handler was used for more tracks + if (timedMetadataTrack.TimedMetadataKind == TimedMetadataKind.Speech) + { + var cue = args.Cue as SpeechCue; + if (cue != null) + { + System.Diagnostics.Debug.WriteLine("Cue text:[" + cue.Text + "]"); + // Do something with the cue + await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, + () => + { + // Your UI update code goes here! + FillTextBox(cue); + }); + } + } + } + + /// + /// Update the UI with text from the mark + /// + /// The cue containing the text + private void FillTextBox(SpeechCue cue) + { + textBoxLastMarkTriggered.Text = cue.Text; + } + /// /// This is invoked when the stream is finished playing. /// @@ -101,9 +201,14 @@ private async void Speak_Click(object sender, RoutedEventArgs e) /// /// unused object parameter /// unused event parameter - void media_MediaEnded(object sender, RoutedEventArgs e) + async void media_MediaEnded(MediaPlayer sender, object e) { - btnSpeak.Content = "Speak"; + await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, + () => + { + // Your UI update code goes here! + btnSpeak.Content = "Speak"; + }); } /// diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/SynthesizeTextScenario.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SynthesizeText.xaml.cs similarity index 97% rename from Samples/SpeechRecognitionAndSynthesis/cs/SynthesizeTextScenario.xaml.cs rename to Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SynthesizeText.xaml.cs index 6b066b2a73..172fb82a9b 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/SynthesizeTextScenario.xaml.cs +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SynthesizeText.xaml.cs @@ -18,15 +18,15 @@ using Windows.UI.Xaml.Controls; using Windows.UI.Xaml.Media; -namespace SpeechAndTTS +namespace SDKTemplate { - public sealed partial class SynthesizeTextScenario : Page + public sealed partial class Scenario_SynthesizeText : Page { private SpeechSynthesizer synthesizer; private ResourceContext speechContext; private ResourceMap speechResourceMap; - public SynthesizeTextScenario() + public Scenario_SynthesizeText() { InitializeComponent(); synthesizer = new SpeechSynthesizer(); @@ -47,7 +47,7 @@ public SynthesizeTextScenario() private async void Speak_Click(object sender, RoutedEventArgs e) { // If the media is playing, the user has pressed the button to stop the playback. - if (media.CurrentState.Equals(MediaElementState.Playing)) + if (media.CurrentState == MediaElementState.Playing) { media.Stop(); btnSpeak.Content = "Speak"; diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SynthesizeTextBoundaries.xaml.cs b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SynthesizeTextBoundaries.xaml.cs new file mode 100644 index 0000000000..1bcab17896 --- /dev/null +++ b/Samples/SpeechRecognitionAndSynthesis/cs/Scenario_SynthesizeTextBoundaries.xaml.cs @@ -0,0 +1,303 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +using System; +using System.Linq; +using Windows.ApplicationModel.Resources.Core; +using Windows.Foundation.Collections; +using Windows.Media.Core; +using Windows.Media.Playback; +using Windows.Media.SpeechSynthesis; +using Windows.UI.Core; +using Windows.UI.Xaml; +using Windows.UI.Xaml.Controls; + +namespace SDKTemplate +{ + public sealed partial class Scenario_SynthesizeTextBoundaries : Page + { + private SpeechSynthesizer synthesizer; + private ResourceContext speechContext; + private ResourceMap speechResourceMap; + + public Scenario_SynthesizeTextBoundaries() + { + InitializeComponent(); + synthesizer = new SpeechSynthesizer(); + + speechContext = ResourceContext.GetForCurrentView(); + speechContext.Languages = new string[] { SpeechSynthesizer.DefaultVoice.Language }; + + speechResourceMap = ResourceManager.Current.MainResourceMap.GetSubtree("LocalizationTTSResources"); + + MediaPlayer player = new MediaPlayer(); + player.AutoPlay = false; + player.MediaEnded += media_MediaEnded; + + media.SetMediaPlayer(player); + + InitializeListboxVoiceChooser(); + } + + /// + /// This is invoked when the user clicks on the speak/stop button. + /// + /// Button that triggered this event + /// State information about the routed event + private async void Speak_Click(object sender, RoutedEventArgs e) + { + // If the media is playing, the user has pressed the button to stop the playback. + if (media.MediaPlayer.PlaybackSession.PlaybackState == MediaPlaybackState.Playing) + { + media.MediaPlayer.Pause(); + btnSpeak.Content = "Speak"; + } + else + { + string text = textToSynthesize.Text; + if (!String.IsNullOrEmpty(text)) + { + // Change the button label. You could also just disable the button if you don't want any user control. + btnSpeak.Content = "Stop"; + + try + { + // Enable word marker generation (false by default). + synthesizer.Options.IncludeWordBoundaryMetadata = true; + synthesizer.Options.IncludeSentenceBoundaryMetadata = true; + + SpeechSynthesisStream synthesisStream = await synthesizer.SynthesizeTextToStreamAsync(text); + + // Create a media source from the stream: + var mediaSource = MediaSource.CreateFromStream(synthesisStream, synthesisStream.ContentType); + + //Create a Media Playback Item   + var mediaPlaybackItem = new MediaPlaybackItem(mediaSource); + + // Ensure that the app is notified for cues.  + RegisterForWordBoundaryEvents(mediaPlaybackItem); + + // Set the source of the MediaElement or MediaPlayerElement to the MediaPlaybackItem + // and start playing the synthesized audio stream. + media.Source = mediaPlaybackItem; + media.MediaPlayer.Play(); + } + catch (System.IO.FileNotFoundException) + { + // If media player components are unavailable, (eg, using a N SKU of windows), we won't + // be able to start media playback. Handle this gracefully + btnSpeak.Content = "Speak"; + btnSpeak.IsEnabled = false; + textToSynthesize.IsEnabled = false; + listboxVoiceChooser.IsEnabled = false; + var messageDialog = new Windows.UI.Popups.MessageDialog("Media player components unavailable"); + await messageDialog.ShowAsync(); + } + catch (Exception) + { + // If the text is unable to be synthesized, throw an error message to the user. + btnSpeak.Content = "Speak"; + media.AutoPlay = false; + var messageDialog = new Windows.UI.Popups.MessageDialog("Unable to synthesize text"); + await messageDialog.ShowAsync(); + } + } + } + } + + /// + /// This is invoked when the stream is finished playing. + /// + /// + /// In this case, we're changing the button label based on the state. + /// + /// unused object parameter + /// unused event parameter + async void media_MediaEnded(MediaPlayer sender, object e) + { + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, + () => + { + // Your UI update code goes here! + btnSpeak.Content = "Speak"; + }); + } + + /// + /// This creates items out of the system installed voices. The voices are then displayed in a listbox. + /// This allows the user to change the voice of the synthesizer in your app based on their preference. + /// + private void InitializeListboxVoiceChooser() + { + // Get all of the installed voices. + var voices = SpeechSynthesizer.AllVoices; + + // Get the currently selected voice. + VoiceInformation currentVoice = synthesizer.Voice; + + foreach (VoiceInformation voice in voices.OrderBy(p => p.Language)) + { + ComboBoxItem item = new ComboBoxItem(); + item.Name = voice.DisplayName; + item.Tag = voice; + item.Content = voice.DisplayName + " (Language: " + voice.Language + ")"; + listboxVoiceChooser.Items.Add(item); + + // Check to see if we're looking at the current voice and set it as selected in the listbox. + if (currentVoice.Id == voice.Id) + { + item.IsSelected = true; + listboxVoiceChooser.SelectedItem = item; + } + } + } + + /// + /// This is called when the user has selects a voice from the drop down. + /// + /// unused object parameter + /// unused event parameter + private void ListboxVoiceChooser_SelectionChanged(object sender, SelectionChangedEventArgs e) + { + ComboBoxItem item = (ComboBoxItem)(listboxVoiceChooser.SelectedItem); + VoiceInformation voice = (VoiceInformation)(item.Tag); + synthesizer.Voice = voice; + + // update UI text to be an appropriate default translation. + speechContext.Languages = new string[] { voice.Language }; + textToSynthesize.Text = speechResourceMap.GetValue("SynthesizeTextBoundariesDefaultText", speechContext).ValueAsString; + } + + /// + /// This function executes when a SpeechCue is hit and calls the functions to update the UI + /// + /// The timedMetadataTrack associated with the event. + /// the arguments associated with the event. + private async void metadata_SpeechCueEntered(TimedMetadataTrack timedMetadataTrack, MediaCueEventArgs args) + { + // Check in case there are different tracks and the handler was used for more tracks + if (timedMetadataTrack.TimedMetadataKind == TimedMetadataKind.Speech) + { + var cue = args.Cue as SpeechCue; + if (cue != null) + { + System.Diagnostics.Debug.WriteLine("Hit Cue with start:" + cue.StartPositionInInput + " and end:" + cue.EndPositionInInput); + System.Diagnostics.Debug.WriteLine("Cue text:[" + cue.Text + "]"); + // Do something with the cue + await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, + () => + { + // Your UI update code goes here! + HighlightTextOnScreen(cue.StartPositionInInput, cue.EndPositionInInput); + FillTextBoxes(cue, timedMetadataTrack); + }); + } + } + } + + /// + /// This function executes when a SpeechCue is hit and calls the functions to update the UI + /// + /// The timedMetadataTrack associated with the event. + /// the SpeechCue object. + private void FillTextBoxes(SpeechCue cue, TimedMetadataTrack timedMetadataTrack) + { + //if it is a sentence cue, populate the sentence text box. + if(timedMetadataTrack.Id == "SpeechSentence") + { + textBoxLastSpeechSentence.Text = cue.Text; + } + //if it is a word cue, populate the word text box + if(timedMetadataTrack.Id == "SpeechWord") + { + textBoxLastSpeechWord.Text = cue.Text; + } + } + + /// + /// This function selects the text associated with the start and end positions. + /// + /// the starting index of the word + /// the ending index of the word + private void HighlightTextOnScreen(int? startPositionInInput, int? endPositionInInput) + { + if (startPositionInInput != null && endPositionInInput != null) + { + textToSynthesize.Select(startPositionInInput.Value, endPositionInInput.Value - startPositionInInput.Value + 1); + } + } + + /// + /// Register for all boundary events and register a function to add any new events if they arise. + /// + /// The Media PLayback Item add handlers to. + private void RegisterForWordBoundaryEvents(MediaPlaybackItem mediaPlaybackItem) + { + // If tracks were available at source resolution time, itterate through and register: + for (int index = 0; index < mediaPlaybackItem.TimedMetadataTracks.Count; index++) + { + RegisterMetadataHandlerForWords(mediaPlaybackItem, index); + RegisterMetadataHandlerForSentences(mediaPlaybackItem, index); + } + + // Since the tracks are added later we will  + // monitor the tracks being added and subscribe to the ones of interest + mediaPlaybackItem.TimedMetadataTracksChanged += (MediaPlaybackItem sender, IVectorChangedEventArgs args) => + { + if (args.CollectionChange == CollectionChange.ItemInserted) + { + RegisterMetadataHandlerForWords(sender, (int)args.Index); + RegisterMetadataHandlerForSentences(mediaPlaybackItem, (int)args.Index); + } + else if (args.CollectionChange == CollectionChange.Reset) + { + for (int index = 0; index < sender.TimedMetadataTracks.Count; index++) + { + RegisterMetadataHandlerForWords(sender, index); + RegisterMetadataHandlerForSentences(mediaPlaybackItem, index); + } + } + }; + } + + /// + /// Register for just word boundary events. + /// + /// The Media Playback Item to register handlers for. + /// Index of the timedMetadataTrack within the mediaPlaybackItem. + private void RegisterMetadataHandlerForWords(MediaPlaybackItem mediaPlaybackItem, int index) + { + var timedTrack = mediaPlaybackItem.TimedMetadataTracks[index]; + //register for only word cues + if (timedTrack.Id == "SpeechWord" && checkBoxWordBoundaries.IsChecked.Value) + { + timedTrack.CueEntered += metadata_SpeechCueEntered; + mediaPlaybackItem.TimedMetadataTracks.SetPresentationMode((uint)index, TimedMetadataTrackPresentationMode.ApplicationPresented); + } + } + + /// + /// Register for just sentence boundary events. + /// + /// The Media Playback Item to register handlers for. + /// Index of the timedMetadataTrack within the mediaPlaybackItem. + private void RegisterMetadataHandlerForSentences(MediaPlaybackItem mediaPlaybackItem, int index) + { + var timedTrack = mediaPlaybackItem.TimedMetadataTracks[index]; + //register for only sentence cues + if (timedTrack.Id == "SpeechSentence" && checkBoxSentenceBoundaries.IsChecked.Value) + { + timedTrack.CueEntered += metadata_SpeechCueEntered; + mediaPlaybackItem.TimedMetadataTracks.SetPresentationMode((uint)index, TimedMetadataTrackPresentationMode.ApplicationPresented); + } + } + } +} diff --git a/Samples/SpeechRecognitionAndSynthesis/cs/SpeechAndTTS.csproj b/Samples/SpeechRecognitionAndSynthesis/cs/SpeechAndTTS.csproj index c1370847d8..29c9ffdc9f 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cs/SpeechAndTTS.csproj +++ b/Samples/SpeechRecognitionAndSynthesis/cs/SpeechAndTTS.csproj @@ -14,7 +14,6 @@ 10.0.15063.0 10.0.15063.0 14 - true true 512 {A5A43C5B-DE2A-4C0C-9213-0A381AF9435A};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC} @@ -97,43 +96,46 @@ App.xaml - - ContinuousDictationScenario.xaml + + Scenario_ContinuousDictation.xaml - - ContinuousRecognitionListGrammarScenario.xaml + + Scenario_ContinuousRecognitionListGrammar.xaml - - ContinuousRecognitionSRGSConstraintScenario.xaml + + Scenario_ContinuousRecognitionSRGSGrammar.xaml MainPage.xaml.cs MainPage.xaml - - PredefinedDictationGrammarScenario.xaml + + Scenario_PredefinedDictationGrammar.xaml - - PredefinedWebSearchGrammarScenario.xaml + + Scenario_PredefinedWebSearchGrammar.xaml - - ListConstraintScenario.xaml + + Scenario_ListConstraint.xaml Properties\AssemblyInfo.cs - - SRGSConstraintScenario.xaml + + Scenario_SRGSConstraint.xaml - - PauseAsyncScenario.xaml + + Scenario_PauseAsync.xaml - - SynthesizeSSMLScenario.xaml + + Scenario_SynthesizeSSML.xaml - - SynthesizeTextScenario.xaml + + Scenario_SynthesizeTextBoundaries.xaml + + + Scenario_SynthesizeText.xaml @@ -147,15 +149,18 @@ MSBuild:Compile Designer - + + Scenario_ContinuousDictation.xaml MSBuild:Compile Designer - + + Scenario_ContinuousRecognitionListGrammar.xaml MSBuild:Compile Designer - + + Scenario_ContinuousRecognitionSRGSGrammar.xaml MSBuild:Compile Designer @@ -164,19 +169,23 @@ MSBuild:Compile Designer - + + Scenario_PredefinedDictationGrammar.xaml MSBuild:Compile Designer - + + Scenario_PredefinedWebSearchGrammar.xaml MSBuild:Compile Designer - + + Scenario_ListConstraint.xaml MSBuild:Compile Designer - + + Scenario_SRGSConstraint.xaml MSBuild:Compile Designer @@ -185,15 +194,23 @@ MSBuild:Compile Designer - + + Scenario_PauseAsync.xaml + MSBuild:Compile + Designer + + + Scenario_SynthesizeSSML.xaml MSBuild:Compile Designer - + + Scenario_SynthesizeTextBoundaries.xaml MSBuild:Compile Designer - + + Scenario_SynthesizeText.xaml MSBuild:Compile Designer diff --git a/Samples/SpeechRecognitionAndSynthesis/js/SpeechAndTTS.jsproj b/Samples/SpeechRecognitionAndSynthesis/js/SpeechAndTTS.jsproj index 8d61ea254b..3150ae8af6 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/SpeechAndTTS.jsproj +++ b/Samples/SpeechRecognitionAndSynthesis/js/SpeechAndTTS.jsproj @@ -50,16 +50,17 @@ default.html - - + + + - - - - - - - + + + + + + + images\microsoft-sdk.png @@ -86,16 +87,17 @@ - - + + + - - - - - - - + + + + + + + Microsoft.WinJS.4.0\css\ui-dark.css diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario9_ContinuousRecognitionSRGSGrammar.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario10_ContinuousRecognitionSRGSGrammar.html similarity index 94% rename from Samples/SpeechRecognitionAndSynthesis/js/html/scenario9_ContinuousRecognitionSRGSGrammar.html rename to Samples/SpeechRecognitionAndSynthesis/js/html/scenario10_ContinuousRecognitionSRGSGrammar.html index e66d79824f..1bc0eeda7f 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario9_ContinuousRecognitionSRGSGrammar.html +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario10_ContinuousRecognitionSRGSGrammar.html @@ -17,7 +17,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario10_PauseAsync.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario11_PauseAsync.html similarity index 96% rename from Samples/SpeechRecognitionAndSynthesis/js/html/scenario10_PauseAsync.html rename to Samples/SpeechRecognitionAndSynthesis/js/html/scenario11_PauseAsync.html index 8745afbbf4..8b3313397e 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario10_PauseAsync.html +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario11_PauseAsync.html @@ -17,7 +17,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario2_SynthesizeTextBoundaries.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario2_SynthesizeTextBoundaries.html new file mode 100644 index 0000000000..172c0a93c6 --- /dev/null +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario2_SynthesizeTextBoundaries.html @@ -0,0 +1,56 @@ + + + + + + + + + + + +
+
+

Description:

+
+

This sample showcases basic speech synthesis using WinRT APIs to convert text to speech.

+
+
+
+ + + +

+
+
+ Use Word Boundaries + Use Sentence Boundaries +
+
+

Last word boundary hit

+ +
+
+

Last sentence boundary hit

+ +
+
+ + + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario2_SynthesizeSSML.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario3_SynthesizeSSML.html similarity index 80% rename from Samples/SpeechRecognitionAndSynthesis/js/html/scenario2_SynthesizeSSML.html rename to Samples/SpeechRecognitionAndSynthesis/js/html/scenario3_SynthesizeSSML.html index 6c7adfd07d..993b724a5c 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario2_SynthesizeSSML.html +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario3_SynthesizeSSML.html @@ -16,7 +16,7 @@ - + @@ -35,6 +35,11 @@

Description:

+
+

Last mark triggered:

+ +
diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario3_PredefinedDictationGrammar.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario4_PredefinedDictationGrammar.html similarity index 96% rename from Samples/SpeechRecognitionAndSynthesis/js/html/scenario3_PredefinedDictationGrammar.html rename to Samples/SpeechRecognitionAndSynthesis/js/html/scenario4_PredefinedDictationGrammar.html index 61ef77fac4..636956f9c3 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario3_PredefinedDictationGrammar.html +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario4_PredefinedDictationGrammar.html @@ -17,7 +17,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario4_PredefinedWebSearchGrammar.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario5_PredefinedWebSearchGrammar.html similarity index 96% rename from Samples/SpeechRecognitionAndSynthesis/js/html/scenario4_PredefinedWebSearchGrammar.html rename to Samples/SpeechRecognitionAndSynthesis/js/html/scenario5_PredefinedWebSearchGrammar.html index dae2a47571..dc0e0b07a0 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario4_PredefinedWebSearchGrammar.html +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario5_PredefinedWebSearchGrammar.html @@ -17,7 +17,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario5_ListConstraint.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario6_ListConstraint.html similarity index 96% rename from Samples/SpeechRecognitionAndSynthesis/js/html/scenario5_ListConstraint.html rename to Samples/SpeechRecognitionAndSynthesis/js/html/scenario6_ListConstraint.html index e9fbf648ea..42483076e6 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario5_ListConstraint.html +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario6_ListConstraint.html @@ -17,7 +17,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario6_SRGSConstraint.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario7_SRGSConstraint.html similarity index 96% rename from Samples/SpeechRecognitionAndSynthesis/js/html/scenario6_SRGSConstraint.html rename to Samples/SpeechRecognitionAndSynthesis/js/html/scenario7_SRGSConstraint.html index 6bfcc88e1e..f6e56f4748 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario6_SRGSConstraint.html +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario7_SRGSConstraint.html @@ -17,7 +17,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario7_ContinuousDictation.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario8_ContinuousDictation.html similarity index 96% rename from Samples/SpeechRecognitionAndSynthesis/js/html/scenario7_ContinuousDictation.html rename to Samples/SpeechRecognitionAndSynthesis/js/html/scenario8_ContinuousDictation.html index 7eda2d0fd1..5ce63ca14c 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario7_ContinuousDictation.html +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario8_ContinuousDictation.html @@ -18,7 +18,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario8_ContinuousRecognitionListGrammar.html b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario9_ContinuousRecognitionListGrammar.html similarity index 95% rename from Samples/SpeechRecognitionAndSynthesis/js/html/scenario8_ContinuousRecognitionListGrammar.html rename to Samples/SpeechRecognitionAndSynthesis/js/html/scenario9_ContinuousRecognitionListGrammar.html index e93499710e..b9234810fe 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/html/scenario8_ContinuousRecognitionListGrammar.html +++ b/Samples/SpeechRecognitionAndSynthesis/js/html/scenario9_ContinuousRecognitionListGrammar.html @@ -17,7 +17,7 @@ - + diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/sample-configuration.js b/Samples/SpeechRecognitionAndSynthesis/js/js/sample-configuration.js index 16391fbb20..bb58687a81 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/sample-configuration.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/sample-configuration.js @@ -16,15 +16,16 @@ var scenarios = [ { url: "/html/scenario1_SynthesizeText.html", title: "Synthesize Text" }, - { url: "/html/scenario2_SynthesizeSSML.html", title: "Synthesize SSML" }, - { url: "/html/scenario3_PredefinedDictationGrammar.html", title: "Predefined Dictation Grammar" }, - { url: "/html/scenario4_PredefinedWebSearchGrammar.html", title: "Predefined Web Search Grammar" }, - { url: "/html/scenario5_ListConstraint.html", title: "Custom List Constraint" }, - { url: "/html/scenario6_SRGSConstraint.html", title: "Custom SRGS Constraint" }, - { url: "/html/scenario7_ContinuousDictation.html", title: "Continuous Dictation" }, - { url: "/html/scenario8_ContinuousRecognitionListGrammar.html", title: "Continuous List Commands" }, - { url: "/html/scenario9_ContinuousRecognitionSRGSGrammar.html", title: "Continuous SRGS Commands" }, - { url: "/html/scenario10_PauseAsync.html", title: "PauseAsync to Change Grammar" } + { url: "/html/scenario2_SynthesizeTextBoundaries.html", title: "Synthesize Text with Boundaries" }, + { url: "/html/scenario3_SynthesizeSSML.html", title: "Synthesize SSML" }, + { url: "/html/scenario4_PredefinedDictationGrammar.html", title: "Predefined Dictation Grammar" }, + { url: "/html/scenario5_PredefinedWebSearchGrammar.html", title: "Predefined Web Search Grammar" }, + { url: "/html/scenario6_ListConstraint.html", title: "Custom List Constraint" }, + { url: "/html/scenario7_SRGSConstraint.html", title: "Custom SRGS Constraint" }, + { url: "/html/scenario8_ContinuousDictation.html", title: "Continuous Dictation" }, + { url: "/html/scenario9_ContinuousRecognitionListGrammar.html", title: "Continuous List Commands" }, + { url: "/html/scenario10_ContinuousRecognitionSRGSGrammar.html", title: "Continuous SRGS Commands" }, + { url: "/html/scenario11_PauseAsync.html", title: "PauseAsync to Change Grammar" } ]; WinJS.Namespace.define("SdkSample", { diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario9_ContinuousRecognitionSRGSGrammar.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario10_ContinuousRecognitionSRGSGrammar.js similarity index 99% rename from Samples/SpeechRecognitionAndSynthesis/js/js/scenario9_ContinuousRecognitionSRGSGrammar.js rename to Samples/SpeechRecognitionAndSynthesis/js/js/scenario10_ContinuousRecognitionSRGSGrammar.js index ffc136ef82..79b6e63f23 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario9_ContinuousRecognitionSRGSGrammar.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario10_ContinuousRecognitionSRGSGrammar.js @@ -11,7 +11,7 @@ (function () { "use strict"; - var page = WinJS.UI.Pages.define("/html/scenario9_ContinuousRecognitionSRGSGrammar.html", { + var page = WinJS.UI.Pages.define("/html/scenario10_ContinuousRecognitionSRGSGrammar.html", { ready: function (element, options) { AudioCapturePermissions.requestMicrophonePermission().then(function (available) { if (available) { diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario10_PauseAsync.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario11_PauseAsync.js similarity index 99% rename from Samples/SpeechRecognitionAndSynthesis/js/js/scenario10_PauseAsync.js rename to Samples/SpeechRecognitionAndSynthesis/js/js/scenario11_PauseAsync.js index bfcb2e3f0d..61d795a71d 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario10_PauseAsync.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario11_PauseAsync.js @@ -11,7 +11,7 @@ (function () { "use strict"; - var page = WinJS.UI.Pages.define("/html/scenario10_PauseAsync.html", { + var page = WinJS.UI.Pages.define("/html/scenario11_PauseAsync.html", { ready: function (element, options) { AudioCapturePermissions.requestMicrophonePermission().then(function (available) { if (available) { diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario1_SynthesizeText.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario1_SynthesizeText.js index bfab9eaea8..528ed0b288 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario1_SynthesizeText.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario1_SynthesizeText.js @@ -52,6 +52,7 @@ var synthesizer; var audio; + var marksMetadataTrackForAudio; // localization resources var context; @@ -95,11 +96,11 @@ // Creates a stream from the text. This will be played using an audio element. synthesizer.synthesizeTextToStreamAsync(textToSynthesize.value).done( - function (markersStream) { + function (textStream) { // Set the source and start playing the synthesized audio stream. - var blob = MSApp.createBlobFromRandomAccessStream(markersStream.ContentType, markersStream); + var blob = MSApp.createBlobFromRandomAccessStream(textStream.ContentType, textStream); audio.src = URL.createObjectURL(blob, { oneTimeOnly: true }); - markersStream.seek(0); + textStream.seek(0); audio.play(); }, function (error) { diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario2_SynthesizeTextBoundaries.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario2_SynthesizeTextBoundaries.js new file mode 100644 index 0000000000..1837cfa556 --- /dev/null +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario2_SynthesizeTextBoundaries.js @@ -0,0 +1,326 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// This code is licensed under the MIT License (MIT). +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* + +(function () { + "use strict"; + var page = WinJS.UI.Pages.define("/html/scenario2_SynthesizeTextBoundaries.html", { + ready: function (element, options) { + try { + synthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); + audio = new Audio(); + + // + // Create a track on the HTML5 audio tag to receive the metadata created  + // by the speech synthesizer + // + marksMetadataTrackForAudio = audio.addTextTrack("metadata", "SpeechBoundaries"); + marksMetadataTrackForAudio.addEventListener("cuechange", cueChange, false); + + btnSpeak.addEventListener("click", speakFn, false); + voicesSelect.addEventListener("click", setVoiceFunction, false); + + var rcns = Windows.ApplicationModel.Resources.Core; + context = new rcns.ResourceContext(); + context.languages = new Array(synthesizer.voice.language); + resourceMap = rcns.ResourceManager.current.mainResourceMap.getSubtree('LocalizationTTSResources'); + + textToSynthesize.innerText = resourceMap.getValue('SynthesizeTextBoundariesDefaultText', context).valueAsString; + + listbox_GetVoices(); + audio_SetUp(); + + causeAudioTrackCrash(); + } catch (exception) { + if (exception.number == -2147467263) {// E_NOTIMPL + + // If media player components aren't installed (for example, when using an N SKU of windows) + // this error may occur when instantiating the Audio object. + statusMessage.innerText = "Media Player components are not available."; + statusBox.style.backgroundColor = "red"; + btnSpeak.disabled = true; + textToSynthesize.disabled = true; + } + } + }, + + unload: function (element, options) { + if (audio != null) { + audio.onpause = null; + audio.pause(); + } + } + }); + + var synthesizer; + var audio; + var marksMetadataTrackForAudio; + + // localization resources + var context; + var resourceMap; + + function audio_SetUp() { + /// + /// Sets up the audio element's events so the app UI updates based on the current state of playback. + /// + audio.onplay = function () { // Fires when the audio begins playing + statusMessage.innerText = "Playing"; + }; + + audio.onpause = function () { // Fires when the user presses the stop button + statusMessage.innerText = "Completed"; + btnSpeak.innerText = "Speak"; + }; + + audio.onended = function () { // Fires when the audio finishes playing + statusMessage.innerText = "Completed"; + btnSpeak.innerText = "Speak"; + voicesSelect.disabled = false; + // Clean old cues + while (marksMetadataTrackForAudio.cues.length > 0) { + var cue = marksMetadataTrackForAudio.cues[0]; + marksMetadataTrackForAudio.removeCue(cue); + } + }; + } + + function speakFn() { + /// + /// This is invoked when the user clicks on the speak/stop button. It attempts to convert the + /// textbox content into a stream, then plays the stream through the audio element. + /// + if (btnSpeak.innerText == "Stop") { + voicesSelect.disabled = false; + audio.pause(); + // Clean old cues + while (marksMetadataTrackForAudio.cues.length > 0) { + var cue = marksMetadataTrackForAudio.cues[0]; + marksMetadataTrackForAudio.removeCue(cue); + } + return; + } + + // Changes the button label. You could also just disable the button if you don't want any user control. + voicesSelect.disabled = true; + btnSpeak.innerText = "Stop"; + statusBox.style.backgroundColor = "green"; + + if (checkBoxWordBoundaries.checked == true) { + // Enable cues track generation for word boundaries + synthesizer.options.includeWordBoundaryMetadata = true + } + else { + //make sure it is false if unchecked + synthesizer.options.includeWordBoundaryMetadata = false; + } + + if (checkBoxSentenceBoundaries.checked == true) { + // Enable cues track generation for word boundaries + synthesizer.options.includeSentenceBoundaryMetadata = true + } + else { + //make sure it is false if unchecked + synthesizer.options.includeSentenceBoundaryMetadata = false; + } + + // Creates a stream from the text. This will be played using an audio element. + synthesizer.synthesizeTextToStreamAsync(textToSynthesize.value).done( + function (markersStream) { + // Set the source and start playing the synthesized audio stream. + var blob = MSApp.createBlobFromRandomAccessStream(markersStream.ContentType, markersStream); + audio.src = URL.createObjectURL(blob, { oneTimeOnly: true }); + markersStream.seek(0); + // There is only may be more than one track. We need to set Audio Metadata from all of them + // Word boundaries will be one track and Sentence boundaries will be another if both are active + if (markersStream.timedMetadataTracks.length !== 0) { + setAudioMetadaTrack(markersStream.timedMetadataTracks); + } + audio.play(); + }, + function (error) { + errorMessage(error.message); + }); + } + + function causeAudioTrackCrash() { + + var metadataTrackForAudio = audio.addTextTrack("metadata", "SpeechBoundaries"); + metadataTrackForAudio.addEventListener("cuechange", cueChange, false); + + var textCue1 = new TextTrackCue(.101, 250, "first Cue"); + metadataTrackForAudio.addCue(textCue1); + var textCue2 = new TextTrackCue(.099, 250, "second Cue"); + metadataTrackForAudio.addCue(textCue2); + } + + // + // Event handler for cues being reached by the audio stream being played out. + // + + function cueChange() { + var cues = marksMetadataTrackForAudio.activeCues; + + if (cues.length > 0) { + for (var index = 0; index < cues.length; index++) { + var speechCue = JSON.parse(cues[index].text); + + statusMessage.innerText = speechCue.text; + // the plus 1 is because it is a 0 based array + highlightTextOnScreen(speechCue.startPositionInInput, speechCue.endPositionInInput + 1); + fillTextBoxes(speechCue); + + //depending on the speed of your cues, a wait may be helpfult to make sure it displays. + wait(200); + } + } + } + + function sleep(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); + } + + async function wait(ms) { + await sleep(ms); + } + + //populate the correct text boxes with the last boundary hit. + function fillTextBoxes(speechCue) { + if (speechCue.cueType == "SpeechWord") { + lastWordBoundaryText.innerText = speechCue.text; + } + if (speechCue.cueType == "SpeechSentence") { + lastSentenceBoundaryText.innerText = speechCue.text; + } + } + + //select the text between the positions to highlight them on the screen + function highlightTextOnScreen(startPosition, endPosition) { + textToSynthesize.setSelectionRange(startPosition, endPosition); + } + + function listbox_GetVoices() { + /// + /// This creates items out of the system installed voices. The voices are then displayed in a listbox. + /// This allows the user to change the voice of the synthesizer in your app based on their preference. + /// + + // Get the list of all of the voices installed on this machine. + var allVoices = Windows.Media.SpeechSynthesis.SpeechSynthesizer.allVoices; + + // Get the currently selected voice. + var defaultVoice = Windows.Media.SpeechSynthesis.SpeechSynthesizer.defaultVoice; + + for (var voiceIndex = 0; voiceIndex < allVoices.size; voiceIndex++) { + var currVoice = allVoices[voiceIndex]; + var option = document.createElement("option"); + option.text = currVoice.displayName + " (" + currVoice.language + ")"; + voicesSelect.add(option, null); + + // Check to see if we're looking at the current voice and set it as selected in the listbox. + if (currVoice.id === defaultVoice.id) { + voicesSelect.selectedIndex = voiceIndex; + } + } + } + + function setVoiceFunction() { + /// + /// This is called when the user selects a voice from the drop down. + /// + if (voicesSelect.selectedIndex !== -1) { + var allVoices = Windows.Media.SpeechSynthesis.SpeechSynthesizer.allVoices; + + // Use the selected index to find the voice. + var selectedVoice = allVoices[voicesSelect.selectedIndex]; + + synthesizer.voice = selectedVoice; + + // change the language of the sample text. + context.languages = new Array(synthesizer.voice.language); + textToSynthesize.innerText = resourceMap.getValue('SynthesizeTextDefaultText', context).valueAsString; + } + } + + function errorMessage(text) { + /// + /// Sets the specified text area with the error message details. + /// + errorTextArea.innerText = text; + } + + // + // This function sets the cues from the media track on the speech synthesizer stream to a  + // HTML5 audio track with media cues converted in json format.  + function setAudioMetadaTrack(speechMediaTracks) { + + var index = 0; + var jindex = 0; + //currently the addTextCue must be called in order so we have to sort the sentence and word boundaries + //there is a bug open to fix the addTextCue + if (speechMediaTracks.length == 1) { //we just have sentence OR word boundaries + for (index = 0; index < speechMediaTracks[0].cues.length; index++) { + var speechCue = speechMediaTracks[0].cues[index]; + addTextCue(speechCue, speechMediaTracks[0].id); + } + } + else { // we have sentence AND word boundaries + var speechCue0; + var speechCue1; + while (index < speechMediaTracks[0].cues.length && jindex < speechMediaTracks[1].cues.length) { + if (index < speechMediaTracks[0].cues.length) { + speechCue0 = speechMediaTracks[0].cues[index]; + } + else { + speechCue0.startTime = Number.MAX_SAFE_INTEGER; + } + if (jindex < speechMediaTracks[1].cues.length) { + speechCue1 = speechMediaTracks[1].cues[jindex]; + } + else { + speechCue1.startTime = Number.MAX_SAFE_INTEGER; + } + if (speechCue1.startTime < speechCue0.startTime) {//speechCue1 comes first + addTextCue(speechCue1, speechMediaTracks[1].id); + jindex++; + } + else { //speechCue0 comes first + addTextCue(speechCue0, speechMediaTracks[0].id); + index++; + } + } + while (index < speechMediaTracks[0].cues.length) { + speechCue0 = speechMediaTracks[0].cues[index]; + addTextCue(speechCue0, speechMediaTracks[0].id); + index++; + } + while (jindex < speechMediaTracks[1].cues.length) { + speechCue1 = speechMediaTracks[1].cues[jindex]; + addTextCue(speechCue1, speechMediaTracks[1].id); + jindex++; + } + } + } + + function addTextCue(speechCue, type) { + //we need to properly clone the data properties and put them in the TextTrackCue to make them available at the event trigger time + var jsonSpeech = { + "startPositionInInput": speechCue.startPositionInInput, + "endPositionInInput": speechCue.endPositionInInput, + "text": speechCue.text, + "cueType": type, + "duration": speechCue.duration + } + var textCue = new TextTrackCue(speechCue.startTime / 1000.0, (speechCue.startTime + speechCue.duration + 300) / 1000.0, JSON.stringify(jsonSpeech)); + marksMetadataTrackForAudio.addCue(textCue); + + //git test + } +})(); diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario2_SynthesizeSSML.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario3_SynthesizeSSML.js similarity index 78% rename from Samples/SpeechRecognitionAndSynthesis/js/js/scenario2_SynthesizeSSML.js rename to Samples/SpeechRecognitionAndSynthesis/js/js/scenario3_SynthesizeSSML.js index 32bc799555..ceb9152ef1 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario2_SynthesizeSSML.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario3_SynthesizeSSML.js @@ -11,12 +11,20 @@ (function () { "use strict"; - var page = WinJS.UI.Pages.define("/html/scenario2_SynthesizeSSML.html", { + var page = WinJS.UI.Pages.define("/html/scenario3_SynthesizeSSML.html", { ready: function (element, options) { try { synthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); + audio = new Audio(); + // + // Create a track on the HTML5 audio tag to receive the metadata created  + // by the speech synthesizer + // + marksMetadataTrackForAudio = audio.addTextTrack("metadata", "SpeechBookmarks"); + marksMetadataTrackForAudio.addEventListener("cuechange", cueChange, false); + btnSpeak.addEventListener("click", speakFn, false); voicesSelect.addEventListener("click", setVoiceFunction, false); @@ -30,7 +38,7 @@ listbox_GetVoices(); audio_SetUp(); } catch (exception) { - if (exception.number == -2147467263) // E_NOTIMPL + if (exception.number === -2147467263) // E_NOTIMPL { // If media player components aren't installed (for example, when using an N SKU of windows) // this error may occur when instantiating the Audio object. @@ -43,7 +51,7 @@ }, unload: function (element, options) { - if (audio != null) { + if (audio !== null) { audio.onpause = null; audio.pause(); } @@ -52,6 +60,7 @@ var synthesizer; var audio; + var marksMetadataTrackForAudio; // localization resources var context; @@ -82,7 +91,7 @@ /// This is invoked when the user clicks on the speak/stop button. It attempts to convert the /// textbox content into a stream, then plays the stream through the audio element. ///
- if (btnSpeak.innerText == "Stop") { + if (btnSpeak.innerText === "Stop") { voicesSelect.disabled = false; audio.pause(); return; @@ -100,6 +109,11 @@ var blob = MSApp.createBlobFromRandomAccessStream(markersStream.ContentType, markersStream); audio.src = URL.createObjectURL(blob, { oneTimeOnly: true }); markersStream.seek(0); + + // There is only one track present the one for cues so we pick it + if (markersStream.timedMetadataTracks.length !== 0) { + setAudioMetadaTrack(markersStream.timedMetadataTracks[0]); + } audio.play(); }, function (error) { @@ -107,6 +121,19 @@ }); } + // + // Event handler for cues being reached by the audio stream being played out. + // + + function cueChange() { + var cues = marksMetadataTrackForAudio.activeCues; + if (cues.length > 0) { + var speechCue = cues[0]; + statusMessage.innerText = speechCue.text; + lastMarkTriggered.innerText =speechCue.text; + } + } + function listbox_GetVoices() { /// /// This creates items out of the system installed voices. The voices are then displayed in a listbox. @@ -190,4 +217,23 @@ /// errorTextArea.innerText = text; } + + // + // This function sets the cues from the media track on the speech synthesizer stream to a  + // HTML5 audio track with media cues converted in json format.  + function setAudioMetadaTrack(speechMediaTrack) { + + // Clean old cues + while (marksMetadataTrackForAudio.cues.length > 0) { + var cue = marksMetadataTrackForAudio.cues[0]; + marksMetadataTrackForAudio.removeCue(cue); + } + + for (var index = 0; index < speechMediaTrack.cues.length; index++) { + var speechCue = speechMediaTrack.cues[index]; + var textCue = new TextTrackCue(speechCue.startTime / 1000.0, (speechCue.startTime + speechCue.duration + 100) / 1000.0, speechCue.text); + marksMetadataTrackForAudio.addCue(textCue); + } + + } })(); diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario3_PredefinedDictationGrammar.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario4_PredefinedDictationGrammar.js similarity index 99% rename from Samples/SpeechRecognitionAndSynthesis/js/js/scenario3_PredefinedDictationGrammar.js rename to Samples/SpeechRecognitionAndSynthesis/js/js/scenario4_PredefinedDictationGrammar.js index 26dddbf989..c2e3fe8ac3 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario3_PredefinedDictationGrammar.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario4_PredefinedDictationGrammar.js @@ -11,7 +11,7 @@ (function () { "use strict"; - var page = WinJS.UI.Pages.define("/html/scenario3_PredefinedDictationGrammar.html", { + var page = WinJS.UI.Pages.define("/html/scenario4_PredefinedDictationGrammar.html", { ready: function (element, options) { AudioCapturePermissions.requestMicrophonePermission().then(function (available) { if (available) { diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario4_PredefinedWebSearchGrammar.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario5_PredefinedWebSearchGrammar.js similarity index 99% rename from Samples/SpeechRecognitionAndSynthesis/js/js/scenario4_PredefinedWebSearchGrammar.js rename to Samples/SpeechRecognitionAndSynthesis/js/js/scenario5_PredefinedWebSearchGrammar.js index 4a675b6c93..21d24407d9 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario4_PredefinedWebSearchGrammar.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario5_PredefinedWebSearchGrammar.js @@ -11,7 +11,7 @@ (function () { "use strict"; - var page = WinJS.UI.Pages.define("/html/scenario4_PredefinedWebSearchGrammar.html", { + var page = WinJS.UI.Pages.define("/html/scenario5_PredefinedWebSearchGrammar.html", { ready: function (element, options) { AudioCapturePermissions.requestMicrophonePermission().then(function (available) { if (available) { diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario5_ListConstraint.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario6_ListConstraint.js similarity index 99% rename from Samples/SpeechRecognitionAndSynthesis/js/js/scenario5_ListConstraint.js rename to Samples/SpeechRecognitionAndSynthesis/js/js/scenario6_ListConstraint.js index c3e8ede990..9c9847efd4 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario5_ListConstraint.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario6_ListConstraint.js @@ -11,7 +11,7 @@ (function () { "use strict"; - var page = WinJS.UI.Pages.define("/html/scenario5_ListConstraint.html", { + var page = WinJS.UI.Pages.define("/html/scenario6_ListConstraint.html", { ready: function (element, options) { AudioCapturePermissions.requestMicrophonePermission().then(function (available) { if (available) { diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario6_SRGSConstraint.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario7_SRGSConstraint.js similarity index 99% rename from Samples/SpeechRecognitionAndSynthesis/js/js/scenario6_SRGSConstraint.js rename to Samples/SpeechRecognitionAndSynthesis/js/js/scenario7_SRGSConstraint.js index 61448ebc25..2b32706a3d 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario6_SRGSConstraint.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario7_SRGSConstraint.js @@ -11,7 +11,7 @@ (function () { "use strict"; - var page = WinJS.UI.Pages.define("/html/scenario6_SRGSConstraint.html", { + var page = WinJS.UI.Pages.define("/html/scenario7_SRGSConstraint.html", { ready: function (element, options) { AudioCapturePermissions.requestMicrophonePermission().then(function (available) { if (available) { diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario7_ContinuousDictation.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario8_ContinuousDictation.js similarity index 99% rename from Samples/SpeechRecognitionAndSynthesis/js/js/scenario7_ContinuousDictation.js rename to Samples/SpeechRecognitionAndSynthesis/js/js/scenario8_ContinuousDictation.js index ca76a97092..d8ddc30370 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario7_ContinuousDictation.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario8_ContinuousDictation.js @@ -11,7 +11,7 @@ (function () { "use strict"; - var page = WinJS.UI.Pages.define("/html/scenario7_ContinuousDictation.html", { + var page = WinJS.UI.Pages.define("/html/scenario8_ContinuousDictation.html", { ready: function (element, options) { AudioCapturePermissions.requestMicrophonePermission().then(function (available) { if (available) { diff --git a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario8_ContinuousRecognitionListGrammar.js b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario9_ContinuousRecognitionListGrammar.js similarity index 99% rename from Samples/SpeechRecognitionAndSynthesis/js/js/scenario8_ContinuousRecognitionListGrammar.js rename to Samples/SpeechRecognitionAndSynthesis/js/js/scenario9_ContinuousRecognitionListGrammar.js index 22413fac9e..014a097ec7 100644 --- a/Samples/SpeechRecognitionAndSynthesis/js/js/scenario8_ContinuousRecognitionListGrammar.js +++ b/Samples/SpeechRecognitionAndSynthesis/js/js/scenario9_ContinuousRecognitionListGrammar.js @@ -11,7 +11,7 @@ (function () { "use strict"; - var page = WinJS.UI.Pages.define("/html/scenario8_ContinuousRecognitionListGrammar.html", { + var page = WinJS.UI.Pages.define("/html/scenario9_ContinuousRecognitionListGrammar.html", { ready: function (element, options) { AudioCapturePermissions.requestMicrophonePermission().then(function (available) { if (available) { diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/de-DE/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/de-DE/LocalizationTTSResources.resjson index ba830b31ff..4791d63843 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/de-DE/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/de-DE/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027de-DE\u0027\u003e\r\n\r\nDies ist ein Beispiel für eine phonetische Aussprache:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nDies ist ein Beispiel eines Datums:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nDies ist ein Beispiel für eine Zahl zu sagen:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "Der schnelle rot-Fuchs sprang über den faulen Hund der braun" + "SynthesizeSSMLDefaultText": "\n\nDies ist ein Beispiel für eine phonetische Aussprache:\n whatchamacallit .\n\nDies ist ein Beispiel eines Datums:\n 04/30/2013 .\n\nDies ist ein Beispiel für eine Zahl zu sagen:\n 4 .\n\n", + "SynthesizeTextDefaultText": "Der schnelle rote Fuchs sprang über den faulen braunen Hund.", + "SynthesizeTextBoundariesDefaultText": "Sie haben die Möglichkeit, allein per Spracheingabe Programme zu starten, Menüs zu öffnen, auf Schaltflächen oder andere Bildschirmobjekte zu klicken, Text in Dokumente zu diktieren sowie E-Mails zu schreiben und zu versenden. Nahezu alle Aktionen, die mittels Tastatur und Maus ausgeführt werden, lassen sich auch per Spracheingabe ausführen." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/de-DE/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/de-DE/LocalizationTTSResources.resw index 3ef8721d6a..db532cf2bf 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/de-DE/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/de-DE/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='de-DE'> -Dies ist ein Beispiel für eine phonetische Aussprache: +<mark name='phonetic'/>Dies ist ein Beispiel für eine phonetische Aussprache: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -Dies ist ein Beispiel eines Datums: +<mark name='date'/>Dies ist ein Beispiel eines Datums: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -Dies ist ein Beispiel für eine Zahl zu sagen: +<mark name='number'/>Dies ist ein Beispiel für eine Zahl zu sagen: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> - Der schnelle rot-Fuchs sprang über den faulen Hund der braun + Der schnelle rote Fuchs sprang über den faulen braunen Hund. + + + Sie haben die Möglichkeit, allein per Spracheingabe Programme zu starten, Menüs zu öffnen, auf Schaltflächen oder andere Bildschirmobjekte zu klicken, Text in Dokumente zu diktieren sowie E-Mails zu schreiben und zu versenden. Nahezu alle Aktionen, die mittels Tastatur und Maus ausgeführt werden, lassen sich auch per Spracheingabe ausführen. \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-AU/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-AU/LocalizationTTSResources.resjson index b490c5e73a..b0ebf99298 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-AU/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-AU/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027en-GB\u0027\u003e\r\n\r\nThis is an example of a phonetic pronunciation:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nThis is an example of a date:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nThis is an example of an ordinal number:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "The Quick Red Fox Jumped Over The Lazy Brown Dog" + "SynthesizeSSMLDefaultText": "\n\nThis is an example of a phonetic pronunciation:\n whatchamacallit .\n\nThis is an example of a date:\n 04/30/2013 .\n\nThis is an example of an ordinal number:\n 4 .\n\n", + "SynthesizeTextDefaultText": "The quick red fox jumped over the lazy brown dog", + "SynthesizeTextBoundariesDefaultText": "Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send emails. Just about everything you do with your keyboard and mouse can be done with only your voice." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-AU/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-AU/LocalizationTTSResources.resw index 766d648459..669e428448 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-AU/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-AU/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='en-GB'> -This is an example of a phonetic pronunciation: +<mark name='phonetic'/>This is an example of a phonetic pronunciation: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -This is an example of a date: +<mark name='date'/>This is an example of a date: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -This is an example of an ordinal number: +<mark name='number'/>This is an example of an ordinal number: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> - The Quick Red Fox Jumped Over The Lazy Brown Dog + The quick red fox jumped over the lazy brown dog + + + Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send emails. Just about everything you do with your keyboard and mouse can be done with only your voice. \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-CA/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-CA/LocalizationTTSResources.resjson index b490c5e73a..b0ebf99298 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-CA/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-CA/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027en-GB\u0027\u003e\r\n\r\nThis is an example of a phonetic pronunciation:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nThis is an example of a date:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nThis is an example of an ordinal number:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "The Quick Red Fox Jumped Over The Lazy Brown Dog" + "SynthesizeSSMLDefaultText": "\n\nThis is an example of a phonetic pronunciation:\n whatchamacallit .\n\nThis is an example of a date:\n 04/30/2013 .\n\nThis is an example of an ordinal number:\n 4 .\n\n", + "SynthesizeTextDefaultText": "The quick red fox jumped over the lazy brown dog", + "SynthesizeTextBoundariesDefaultText": "Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send emails. Just about everything you do with your keyboard and mouse can be done with only your voice." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-CA/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-CA/LocalizationTTSResources.resw index 766d648459..669e428448 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-CA/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-CA/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='en-GB'> -This is an example of a phonetic pronunciation: +<mark name='phonetic'/>This is an example of a phonetic pronunciation: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -This is an example of a date: +<mark name='date'/>This is an example of a date: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -This is an example of an ordinal number: +<mark name='number'/>This is an example of an ordinal number: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> - The Quick Red Fox Jumped Over The Lazy Brown Dog + The quick red fox jumped over the lazy brown dog + + + Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send emails. Just about everything you do with your keyboard and mouse can be done with only your voice. \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-GB/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-GB/LocalizationTTSResources.resjson index b490c5e73a..b0ebf99298 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-GB/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-GB/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027en-GB\u0027\u003e\r\n\r\nThis is an example of a phonetic pronunciation:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nThis is an example of a date:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nThis is an example of an ordinal number:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "The Quick Red Fox Jumped Over The Lazy Brown Dog" + "SynthesizeSSMLDefaultText": "\n\nThis is an example of a phonetic pronunciation:\n whatchamacallit .\n\nThis is an example of a date:\n 04/30/2013 .\n\nThis is an example of an ordinal number:\n 4 .\n\n", + "SynthesizeTextDefaultText": "The quick red fox jumped over the lazy brown dog", + "SynthesizeTextBoundariesDefaultText": "Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send emails. Just about everything you do with your keyboard and mouse can be done with only your voice." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-GB/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-GB/LocalizationTTSResources.resw index 766d648459..669e428448 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-GB/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-GB/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='en-GB'> -This is an example of a phonetic pronunciation: +<mark name='phonetic'/>This is an example of a phonetic pronunciation: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -This is an example of a date: +<mark name='date'/>This is an example of a date: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -This is an example of an ordinal number: +<mark name='number'/>This is an example of an ordinal number: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> - The Quick Red Fox Jumped Over The Lazy Brown Dog + The quick red fox jumped over the lazy brown dog + + + Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send emails. Just about everything you do with your keyboard and mouse can be done with only your voice. \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-IN/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-IN/LocalizationTTSResources.resjson index a259064a29..d6c4311d78 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-IN/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-IN/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027en-IN\u0027\u003e\r\n\r\nThis is an example of a phonetic pronunciation:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nThis is an example of a date:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nThis is an example of an ordinal number:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "The quick red fox jumped over the lazy brown dog" + "SynthesizeSSMLDefaultText": "\nThis is an example of a phonetic pronunciation:\n whatchamacallit .\n\nThis is an example of a date:\n 04/30/2013 .\n\nThis is an example of an ordinal number:\n 4 .\n\n", + "SynthesizeTextDefaultText": "The quick red fox jumped over the lazy brown dog", + "SynthesizeTextBoundariesDefaultText": "Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send emails. Just about everything you do with your keyboard and mouse can be done with only your voice." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-IN/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-IN/LocalizationTTSResources.resw index b432611b31..844d02156b 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-IN/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-IN/LocalizationTTSResources.resw @@ -122,19 +122,21 @@ xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='en-IN'> - -This is an example of a phonetic pronunciation: +<mark name='phonetic'/>This is an example of a phonetic pronunciation: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -This is an example of a date: +<mark name='date'/>This is an example of a date: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -This is an example of an ordinal number: +<mark name='number'/>This is an example of an ordinal number: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> The quick red fox jumped over the lazy brown dog + + Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send emails. Just about everything you do with your keyboard and mouse can be done with only your voice. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-US/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-US/LocalizationTTSResources.resjson index 2133f35e6c..f9efa8b1e2 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-US/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-US/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027en-US\u0027\u003e\r\n\r\nThis is an example of a phonetic pronunciation:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nThis is an example of a date:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nThis is an example of an ordinal number:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "The quick red fox jumped over the lazy brown dog" + "SynthesizeSSMLDefaultText": "\n\nThis is an example of a phonetic pronunciation:\n whatchamacallit .\n\nThis is an example of a date:\n 04/30/2013 .\n\nThis is an example of an ordinal number:\n 4 .\n\n", + "SynthesizeTextDefaultText": "The quick red fox jumped over the lazy brown dog", + "SynthesizeTextBoundariesDefaultText": "Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send e-mails. Just about everything you do with your keyboard and mouse can be done with only your voice." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-US/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-US/LocalizationTTSResources.resw index 190254b246..0c0fbc4718 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-US/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/en-US/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='en-US'> -This is an example of a phonetic pronunciation: +<mark name='phonetic'/>This is an example of a phonetic pronunciation: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -This is an example of a date: +<mark name='date'/>This is an example of a date: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -This is an example of an ordinal number: +<mark name='number'/>This is an example of an ordinal number: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> The quick red fox jumped over the lazy brown dog + + Using only your voice, you can start programs, open menus, click buttons and other objects on the screen, dictate text into documents, and write and send e-mails. Just about everything you do with your keyboard and mouse can be done with only your voice. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-ES/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-ES/LocalizationTTSResources.resjson index 3b76f71c77..5edd47d34e 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-ES/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-ES/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027es-ES\u0027\u003e\r\n\r\nEste es un ejemplo de una pronunciación fonética:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nEste es un ejemplo de una cita:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nEste es un ejemplo de decir un número:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "El rápido zorro rojo saltado sobre el perro perezoso marrón" + "SynthesizeSSMLDefaultText": "\n\nEste es un ejemplo de una pronunciación fonética:\n whatchamacallit .\n\nEste es un ejemplo de una cita:\n 04/30/2013 .\n\nEste es un ejemplo de decir un número:\n 4 .\n\n", + "SynthesizeTextDefaultText": "El rápido zorro rojo saltado sobre el perro perezoso marrón", + "SynthesizeTextBoundariesDefaultText": "Solo con su voz podrá iniciar programas, abrir menús, hacer clic en botones y otros objetos de la pantalla, dictar texto para insertar en documentos, y escribir y enviar mensajes de correo electrónico. Casi todo lo que hace con el teclado y el mouse lo puede hacer ahora con la voz." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-ES/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-ES/LocalizationTTSResources.resw index 9b41fa4b79..b2ae558a88 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-ES/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-ES/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='es-ES'> -Este es un ejemplo de una pronunciación fonética: +<mark name='phonetic'/>Este es un ejemplo de una pronunciación fonética: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -Este es un ejemplo de una cita: +<mark name='date'/>Este es un ejemplo de una cita: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -Este es un ejemplo de decir un número: +<mark name='number'/>Este es un ejemplo de decir un número: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> El rápido zorro rojo saltado sobre el perro perezoso marrón + + Solo con su voz podrá iniciar programas, abrir menús, hacer clic en botones y otros objetos de la pantalla, dictar texto para insertar en documentos, y escribir y enviar mensajes de correo electrónico. Casi todo lo que hace con el teclado y el mouse lo puede hacer ahora con la voz. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-MX/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-MX/LocalizationTTSResources.resjson index a261432dab..c9ef719af5 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-MX/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-MX/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027es-MX\u0027\u003e\r\n\r\nEste es un ejemplo de una pronunciación fonética:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nEste es un ejemplo de una cita:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nEste es un ejemplo de decir un número:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "El rápido zorro rojo saltado sobre el perro perezoso marrón" + "SynthesizeSSMLDefaultText": "\n\nEste es un ejemplo de una pronunciación fonética:\n whatchamacallit .\n\nEste es un ejemplo de una cita:\n 04/30/2013 .\n\nEste es un ejemplo de decir un número:\n 4 .\n\n", + "SynthesizeTextDefaultText": "El rápido zorro rojo saltado sobre el perro perezoso marrón", + "SynthesizeTextBoundariesDefaultText": "Solo con su voz podrá iniciar programas, abrir menús, hacer clic en botones y otros objetos de la pantalla, dictar texto para insertar en documentos, y escribir y enviar mensajes de correo electrónico. Casi todo lo que hace con el teclado y el mouse lo puede hacer ahora con la voz." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-MX/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-MX/LocalizationTTSResources.resw index ee8f0d37b0..ada77454df 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-MX/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/es-MX/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='es-MX'> -Este es un ejemplo de una pronunciación fonética: +<mark name='phonetic'/>Este es un ejemplo de una pronunciación fonética: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -Este es un ejemplo de una cita: +<mark name='date'/>Este es un ejemplo de una cita: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -Este es un ejemplo de decir un número: +<mark name='number'/>Este es un ejemplo de decir un número: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> El rápido zorro rojo saltado sobre el perro perezoso marrón + + Solo con su voz podrá iniciar programas, abrir menús, hacer clic en botones y otros objetos de la pantalla, dictar texto para insertar en documentos, y escribir y enviar mensajes de correo electrónico. Casi todo lo que hace con el teclado y el mouse lo puede hacer ahora con la voz. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/fr-FR/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/fr-FR/LocalizationTTSResources.resjson index 461750c872..752f55d970 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/fr-FR/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/fr-FR/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027fr-FR\u0027\u003e\r\n\r\nIl s\u0027agit d\u0027un exemple d\u0027une prononciation phonétique :\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nIl s\u0027agit d\u0027un exemple d\u0027une date :\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nIl s\u0027agit d\u0027un exemple de dire un certain nombre :\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "Le rapide renard roux a sauté sur le chien paresseux brun" + "SynthesizeSSMLDefaultText": "\n\nIl s'agit d'un exemple d'une prononciation phonétique :\n whatchamacallit .\n\nIl s'agit d'un exemple d'une date :\n 04/30/2013 .\n\nIl s'agit d'un exemple de dire un certain nombre :\n 4 .\n\n", + "SynthesizeTextDefaultText": "Le rapide renard roux a sauté sur le chien paresseux brun", + "SynthesizeTextBoundariesDefaultText": "Simplement à l’aide de votre voix, vous pouvez démarrer des programmes, ouvrir des menus, cliquer sur des boutons et autres objets à l’écran, dicter du texte dans des documents, et écrire et envoyer des courriers électroniques. Tout ce que vous faites habituellement avec votre clavier et votre souris, vous pouvez le faire avec votre voix." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/fr-FR/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/fr-FR/LocalizationTTSResources.resw index 28c2fea084..c2d81add10 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/fr-FR/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/fr-FR/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='fr-FR'> -Il s'agit d'un exemple d'une prononciation phonétique : +<mark name='phonetic'/>Il s'agit d'un exemple d'une prononciation phonétique : <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -Il s'agit d'un exemple d'une date : +<mark name='date'/>Il s'agit d'un exemple d'une date : <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -Il s'agit d'un exemple de dire un certain nombre : +<mark name='number'/>Il s'agit d'un exemple de dire un certain nombre : <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> Le rapide renard roux a sauté sur le chien paresseux brun + + Simplement à l’aide de votre voix, vous pouvez démarrer des programmes, ouvrir des menus, cliquer sur des boutons et autres objets à l’écran, dicter du texte dans des documents, et écrire et envoyer des courriers électroniques. Tout ce que vous faites habituellement avec votre clavier et votre souris, vous pouvez le faire avec votre voix. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/it-IT/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/it-IT/LocalizationTTSResources.resjson index e0cbdca731..d8d24e6478 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/it-IT/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/it-IT/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027it-IT\u0027\u003e\r\n\r\nQuesto è un esempio di una pronuncia fonetica:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nQuesto è un esempio di una data:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nQuesto è un esempio di dire un numero:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "Volpe veloce saltato sopra il cane pigro marrone" + "SynthesizeSSMLDefaultText": "\n\nQuesto è un esempio di una pronuncia fonetica:\n whatchamacallit .\n\nQuesto è un esempio di una data:\n 04/30/2013 .\n\nQuesto è un esempio di dire un numero:\n 4 .\n\n", + "SynthesizeTextDefaultText": "Volpe veloce saltato sopra il cane pigro marrone", + "SynthesizeTextBoundariesDefaultText": "Usando solo la voce puoi avviare programmi, aprire menu, fare clic su pulsanti e altri oggetti sullo schermo, dettare testo nei documenti e scrivere e inviare messaggi e-mail. Quasi tutte le operazioni svolte con la tastiera e il mouse possono essere eseguite con la voce." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/it-IT/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/it-IT/LocalizationTTSResources.resw index b1e2d0bf48..5b588ca7a3 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/it-IT/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/it-IT/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='it-IT'> -Questo è un esempio di una pronuncia fonetica: +<mark name='phonetic'/>Questo è un esempio di una pronuncia fonetica: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -Questo è un esempio di una data: +<mark name='date'/>Questo è un esempio di una data: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -Questo è un esempio di dire un numero: +<mark name='number'/>Questo è un esempio di dire un numero: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> Volpe veloce saltato sopra il cane pigro marrone + + Usando solo la voce puoi avviare programmi, aprire menu, fare clic su pulsanti e altri oggetti sullo schermo, dettare testo nei documenti e scrivere e inviare messaggi e-mail. Quasi tutte le operazioni svolte con la tastiera e il mouse possono essere eseguite con la voce. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ja-JP/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ja-JP/LocalizationTTSResources.resjson index 666469fc01..139ebf615c 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ja-JP/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ja-JP/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027en-US\u0027\u003e\r\n\r\nThis is an example of a phonetic pronunciation:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nThis is an example of a date:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nThis is an example of an ordinal number:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "怠惰な茶色犬を飛び越えたクイック レッド フォックス" + "SynthesizeSSMLDefaultText": "\n\nThis is an example of a phonetic pronunciation:\n whatchamacallit .\n\nThis is an example of a date:\n 04/30/2013 .\n\nThis is an example of an ordinal number:\n 4 .\n\n", + "SynthesizeTextDefaultText": "怠惰な茶色犬を飛び越えたクイック レッド フォックス", + "SynthesizeTextBoundariesDefaultText": "プログラムの起動、メニューのオープン、ボタンなど画面上にあるオブジェクトのクリック、テキストをディクテーションしてドキュメントに保存、電子メールの作成と送信などの操作を、音声だけで行うことができます。キーボードとマウスの操作が、ほぼすべて音声のみで実行できます。" } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ja-JP/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ja-JP/LocalizationTTSResources.resw index c6fd810e9e..d0f6189e5e 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ja-JP/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ja-JP/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='en-US'> -This is an example of a phonetic pronunciation: +<mark name='phonetic'/>This is an example of a phonetic pronunciation: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -This is an example of a date: +<mark name='date'/>This is an example of a date: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -This is an example of an ordinal number: +<mark name='number'/>This is an example of an ordinal number: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> 怠惰な茶色犬を飛び越えたクイック レッド フォックス + + プログラムの起動、メニューのオープン、ボタンなど画面上にあるオブジェクトのクリック、テキストをディクテーションしてドキュメントに保存、電子メールの作成と送信などの操作を、音声だけで行うことができます。キーボードとマウスの操作が、ほぼすべて音声のみで実行できます。 + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ko-KR/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ko-KR/LocalizationTTSResources.resjson index 1179af65ca..ccbbe43f8c 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ko-KR/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ko-KR/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027ko-KR\u0027\u003e\r\n\r\n이것은 음성 발음의 예:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\n이것은 날짜의 예:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\n이것은 말하는 숫자의 예입니다.\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "갈색 게으른 개에 뛰어 빠른 레드 폭스" + "SynthesizeSSMLDefaultText": "\n\n이것은 음성 발음의 예:\n whatchamacallit .\n\n이것은 날짜의 예:\n 04/30/2013 .\n\n이것은 말하는 숫자의 예입니다.\n 4 .\n\n", + "SynthesizeTextDefaultText": "갈색 게으른 개에 뛰어 빠른 레드 폭스", + "SynthesizeTextBoundariesDefaultText": "음성만 사용 하면 프로그램, 메뉴 열기, 화면에서 단추 및 기타 개체를 클릭 하 고 문서에 텍스트를 받아쓰게 한 다음 전자 메일을 쓰고 보낼 수 있습니다. 단지 당신이 당신의 키보드와 마우스와 더불어 하는 모든 것에 대해 당신의 목소리로 할 수 있다." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ko-KR/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ko-KR/LocalizationTTSResources.resw index b9d71ab26b..c0bd54cefb 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ko-KR/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ko-KR/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='ko-KR'> -이것은 음성 발음의 예: +<mark name='phonetic'/>이것은 음성 발음의 예: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -이것은 날짜의 예: +<mark name='date'/>이것은 날짜의 예: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -이것은 말하는 숫자의 예입니다. +<mark name='number'/>이것은 말하는 숫자의 예입니다. <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> 갈색 게으른 개에 뛰어 빠른 레드 폭스 + + 음성만 사용 하면 프로그램, 메뉴 열기, 화면에서 단추 및 기타 개체를 클릭 하 고 문서에 텍스트를 받아쓰게 한 다음 전자 메일을 쓰고 보낼 수 있습니다. 단지 당신이 당신의 키보드와 마우스와 더불어 하는 모든 것에 대해 당신의 목소리로 할 수 있다. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pl-PL/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pl-PL/LocalizationTTSResources.resjson index e0000c54ba..f6c49d7844 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pl-PL/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pl-PL/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027pl-PL\u0027\u003e\r\n\r\nTo jest przykład fonetycznej wymowy:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nTo jest przykład daty:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nTo jest przykład mówi wiele:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "Szybki, czerwony lis przeskoczył nad leniwym psem brązowy" + "SynthesizeSSMLDefaultText": "\n\nTo jest przykład fonetycznej wymowy:\n whatchamacallit .\n\nTo jest przykład daty:\n 04/30/2013 .\n\nTo jest przykład mówi wiele:\n 4 .\n\n", + "SynthesizeTextDefaultText": "Szybki, czerwony lis przeskoczył nad leniwym psem brązowy", + "SynthesizeTextBoundariesDefaultText": "Używając tylko głosu, można uruchamiać programy, otwórz menu, kliknij przyciski i innych obiektów na ekranie, dyktowanie tekstu w dokumentach i pisać i wysyłać e-maile. Prawie wszystko, co możesz zrobić z klawiatury i myszy może odbywać się tylko swoim głosem." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pl-PL/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pl-PL/LocalizationTTSResources.resw index f8691cf1e5..952cf043b5 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pl-PL/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pl-PL/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='pl-PL'> -To jest przykład fonetycznej wymowy: +<mark name='phonetic'/>To jest przykład fonetycznej wymowy: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -To jest przykład daty: +<mark name='date'/>To jest przykład daty: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -To jest przykład mówi wiele: +<mark name='number'/>To jest przykład mówi wiele: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> Szybki, czerwony lis przeskoczył nad leniwym psem brązowy + + Używając tylko głosu, można uruchamiać programy, otwórz menu, kliknij przyciski i innych obiektów na ekranie, dyktowanie tekstu w dokumentach i pisać i wysyłać e-maile. Prawie wszystko, co możesz zrobić z klawiatury i myszy może odbywać się tylko swoim głosem. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pt-BR/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pt-BR/LocalizationTTSResources.resjson index 1d935fba2e..58055d93d9 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pt-BR/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pt-BR/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027pt-BR\u0027\u003e\r\n\r\nEste é um exemplo de uma pronúncia fonética:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nEste é um exemplo de uma data:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nEste é um exemplo de um número de dizer:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "A rápida raposa vermelha, saltada sobre o cachorro preguiçoso marrom" + "SynthesizeSSMLDefaultText": "\n\nEste é um exemplo de uma pronúncia fonética:\n whatchamacallit .\n\nEste é um exemplo de uma data:\n 04/30/2013 .\n\nEste é um exemplo de um número de dizer:\n 4 .\n\n", + "SynthesizeTextDefaultText": "A rápida raposa vermelha, saltada sobre o cachorro preguiçoso marrom", + "SynthesizeTextBoundariesDefaultText": "Usando apenas a voz, você pode iniciar programas, abrir menus, clicar em botões e em outros objetos na tela, ditar texto em documentos e escrever e enviar emails. Tudo o que você faz com o teclado e o mouse pode ser feito apenas com a voz." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pt-BR/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pt-BR/LocalizationTTSResources.resw index 21d342fdd7..b739059212 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pt-BR/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/pt-BR/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='pt-BR'> -Este é um exemplo de uma pronúncia fonética: +<mark name='phonetic'/>Este é um exemplo de uma pronúncia fonética: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -Este é um exemplo de uma data: +<mark name='date'/>Este é um exemplo de uma data: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -Este é um exemplo de um número de dizer: +<mark name='number'/>Este é um exemplo de um número de dizer: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> A rápida raposa vermelha, saltada sobre o cachorro preguiçoso marrom + + Usando apenas a voz, você pode iniciar programas, abrir menus, clicar em botões e em outros objetos na tela, ditar texto em documentos e escrever e enviar emails. Tudo o que você faz com o teclado e o mouse pode ser feito apenas com a voz. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ru-RU/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ru-RU/LocalizationTTSResources.resjson index de8355cf2f..ce622594b6 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ru-RU/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ru-RU/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027ru-RU\u0027\u003e\r\n\r\nЭто пример фонетического произношения:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\nЭто пример даты:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\nЭто пример сказать ряд:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "Быстрый red fox, перепрыгнул через ленивую собаку коричневый" + "SynthesizeSSMLDefaultText": "\n\nЭто пример фонетического произношения:\n whatchamacallit .\n\nЭто пример даты:\n 04/30/2013 .\n\nЭто пример сказать ряд:\n 4 .\n\n", + "SynthesizeTextDefaultText": "Быстрый red fox, перепрыгнул через ленивую собаку коричневый", + "SynthesizeTextBoundariesDefaultText": "Используя только ваш голос, можно запускать программы, открывать меню, нажмите кнопки и другие объекты на экране, диктовать текст документов и писать и отправлять электронную почту. Почти все, что вы делаете с вашей клавиатуры и мыши может быть сделано с только ваш голос." } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ru-RU/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ru-RU/LocalizationTTSResources.resw index 123db1f512..3755122852 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ru-RU/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/ru-RU/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='ru-RU'> -Это пример фонетического произношения: +<mark name='phonetic'/>Это пример фонетического произношения: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -Это пример даты: +<mark name='date'/>Это пример даты: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -Это пример сказать ряд: +<mark name='number'/>Это пример сказать ряд: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> Быстрый red fox, перепрыгнул через ленивую собаку коричневый + + Используя только ваш голос, можно запускать программы, открывать меню, нажмите кнопки и другие объекты на экране, диктовать текст документов и писать и отправлять электронную почту. Почти все, что вы делаете с вашей клавиатуры и мыши может быть сделано с только ваш голос. + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-CN/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-CN/LocalizationTTSResources.resjson index d5559fed11..6a13dbb592 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-CN/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-CN/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027zh-CN\u0027\u003e\r\n\r\n这是发音的一个语音的示例:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\n这是一个日期的示例:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\n这是说了一些示例:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "快速的红狐狸跳过懒惰的棕色狗" + "SynthesizeSSMLDefaultText": "\n\n这是发音的一个语音的示例:\n whatchamacallit .\n\n这是一个日期的示例:\n 04/30/2013 .\n\n这是说了一些示例:\n 4 .\n\n", + "SynthesizeTextDefaultText": "快速的红狐狸跳过懒惰的棕色狗", + "SynthesizeTextBoundariesDefaultText": "仅使用语音,便可以启动程序、打开菜单、单击屏幕上的按钮和其他对象、将文本口述到文档中以及书写和发送电子邮件。只要是可以用键盘和鼠标完成的所有事情,都可以仅用语音来完成。" } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-CN/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-CN/LocalizationTTSResources.resw index 3988daf2ec..e870f10f6b 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-CN/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-CN/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='zh-CN'> -这是发音的一个语音的示例: +<mark name='phonetic'/>这是发音的一个语音的示例: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -这是一个日期的示例: +<mark name='date'/>这是一个日期的示例: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -这是说了一些示例: +<mark name='number'/>这是说了一些示例: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> 快速的红狐狸跳过懒惰的棕色狗 + + 仅使用语音,便可以启动程序、打开菜单、单击屏幕上的按钮和其他对象、将文本口述到文档中以及书写和发送电子邮件。只要是可以用键盘和鼠标完成的所有事情,都可以仅用语音来完成。 + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-TW/LocalizationTTSResources.resjson b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-TW/LocalizationTTSResources.resjson index 2ceeeffca3..3d8cafd139 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-TW/LocalizationTTSResources.resjson +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-TW/LocalizationTTSResources.resjson @@ -1,4 +1,5 @@ { - "SynthesizeSSMLDefaultText": "\u003cspeak version=\u00271.0\u0027 xmlns=\u0027http://www.w3.org/2001/10/synthesis\u0027 \r\n xmlns:xsi=\u0027http://www.w3.org/2001/XMLSchema-instance\u0027 \r\n xsi:schemaLocation=\u0027http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd\u0027 \r\n xml:lang=\u0027zh-TW\u0027\u003e\r\n\r\n這是發音的一個語音的示例:\r\n\u003cphoneme alphabet=\u0027x-microsoft-ups\u0027 ph=\u0027S1 W AA T . CH AX . M AX . S2 K AA L . IH T\u0027\u003e whatchamacallit \u003c/phoneme\u003e.\r\n\r\n這是一個日期的示例:\r\n\u003csay-as interpret-as=\u0027date\u0027 format=\u0027mdy\u0027\u003e 04/30/2013 \u003c/say-as\u003e.\r\n\r\n這是說了一些示例:\r\n\u003csay-as interpret-as=\u0027ordinal\u0027\u003e 4 \u003c/say-as\u003e.\r\n\r\n\u003c/speak\u003e", - "SynthesizeTextDefaultText": "快速的紅狐狸跳過懶惰的棕色狗" + "SynthesizeSSMLDefaultText": "\n\n這是發音的一個語音的示例:\n whatchamacallit .\n\n這是一個日期的示例:\n 04/30/2013 .\n\n這是說了一些示例:\n 4 .\n\n", + "SynthesizeTextDefaultText": "快速的紅狐狸跳過懶惰的棕色狗", + "SynthesizeTextBoundariesDefaultText": "只要使用您的聲音,就可以啟動程式、開啟功能表、按一下按鈕和螢幕上的其他物件、將文字聽寫為文件,以及撰寫和傳送電子郵件。而可以使用鍵盤和滑鼠完成的所有事項,都可以使用您的聲音完成。" } diff --git a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-TW/LocalizationTTSResources.resw b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-TW/LocalizationTTSResources.resw index e5e707c942..af39608096 100644 --- a/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-TW/LocalizationTTSResources.resw +++ b/Samples/SpeechRecognitionAndSynthesis/shared/Strings/zh-TW/LocalizationTTSResources.resw @@ -123,18 +123,21 @@ xsi:schemaLocation='http://www.w3.org/2001/10/synthesis http://www.w3.org/TR/speech-synthesis/synthesis.xsd' xml:lang='zh-TW'> -這是發音的一個語音的示例: +<mark name='phonetic'/>這是發音的一個語音的示例: <phoneme alphabet='x-microsoft-ups' ph='S1 W AA T . CH AX . M AX . S2 K AA L . IH T'> whatchamacallit </phoneme>. -這是一個日期的示例: +<mark name='date'/>這是一個日期的示例: <say-as interpret-as='date' format='mdy'> 04/30/2013 </say-as>. -這是說了一些示例: +<mark name='number'/>這是說了一些示例: <say-as interpret-as='ordinal'> 4 </say-as>. - +<mark name='end'/> </speak> 快速的紅狐狸跳過懶惰的棕色狗 + + 只要使用您的聲音,就可以啟動程式、開啟功能表、按一下按鈕和螢幕上的其他物件、將文字聽寫為文件,以及撰寫和傳送電子郵件。而可以使用鍵盤和滑鼠完成的所有事項,都可以使用您的聲音完成。 + \ No newline at end of file diff --git a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml b/Samples/SpeechRecognitionAndSynthesis/shared/xaml/Scenario_ContinuousDictation.xaml similarity index 81% rename from Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml rename to Samples/SpeechRecognitionAndSynthesis/shared/xaml/Scenario_ContinuousDictation.xaml index 8a8a46db05..169caeac65 100644 --- a/Samples/SpeechRecognitionAndSynthesis/cpp/Scenario_ContinuousDictation.xaml +++ b/Samples/SpeechRecognitionAndSynthesis/shared/xaml/Scenario_ContinuousDictation.xaml @@ -10,18 +10,16 @@ // //********************************************************* --> - - + @@ -38,7 +36,7 @@ - + + + + + + + <Button Content="Button" Click="Button_Click"/> + + <Image Source="/Assets/Slices.png" /> + + </Button> + + + + + + - - - - - - - - - - - - - <Button Content="Button" Click="Button_Click"/> - - <Image Source="/Assets/Slices.png" /> - - </Button> - - - - + - - - - - - - - - - - + + + + + + <Button Background="#FF42A214" Foreground="White" + FontFamily="Segoe Print" FontSize="22" + Click="Button_Click"> + + <Button.Resources> + <SolidColorBrush x:Key="ButtonBackgroundPointerOver">#FF42A214</SolidColorBrush> + <SolidColorBrush x:Key="ButtonBackgroundPressed">#FF359308</SolidColorBrush> + <SolidColorBrush x:Key="ButtonForegroundPointerOver">white</SolidColorBrush> + <SolidColorBrush x:Key="ButtonForegroundPressed">white</SolidColorBrush> + <SolidColorBrush x:Key="ButtonBorderBrushPointerOver">#FF267600</SolidColorBrush> + <SolidColorBrush x:Key="ButtonBorderBrushPressed">#FF267600</SolidColorBrush> + </Button.Resources> + + <StackPanel Orientation="Horizontal"> + <SymbolIcon Symbol="Accept" Margin="0,0,10,0"/> + <TextBlock Text="Custom styles"/> + </StackPanel> + + </Button> + + + + + + +
public sealed partial class FlipViewPage : Page { - private IEnumerable _groups; + private List _items; public FlipViewPage() { this.InitializeComponent(); } - public IEnumerable Groups + public List Items { - get { return this._groups; } + get { return this._items; } } - + protected async override void OnNavigatedTo(NavigationEventArgs e) { base.OnNavigatedTo(e); - _groups = await ControlInfoDataSource.GetGroupsAsync(); + var groups = NavigationRootPage.Current.Groups.Any() ? NavigationRootPage.Current.Groups : await ControlInfoDataSource.GetGroupsAsync(); + _items = new List(); + foreach (var group in groups.Take(3)) + { + foreach (var item in group.Items) + { + _items.Add(item); + } + } } } } diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/FlyoutPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/FlyoutPage.xaml index 236cf45cc5..a80d3d30cf 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/FlyoutPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/FlyoutPage.xaml @@ -9,31 +9,26 @@ // //********************************************************* --> - - + - + - - - + +
public sealed partial class GridViewPage : Page { - private IEnumerable _groups; + private List _items; public GridViewPage() { this.InitializeComponent(); } - public IEnumerable Groups + public List Groups { - get { return this._groups; } + get { return this._items; } } protected async override void OnNavigatedTo(NavigationEventArgs e) { base.OnNavigatedTo(e); - _groups = await ControlInfoDataSource.GetGroupsAsync(); + var groups = NavigationRootPage.Current.Groups.Any() ? NavigationRootPage.Current.Groups : await ControlInfoDataSource.GetGroupsAsync(); + _items = new List(); + foreach (var group in groups.Take(3)) + { + foreach (var item in group.Items) + { + _items.Add(item); + } + } } private void ItemTemplate_Click(object sender, RoutedEventArgs e) @@ -53,8 +62,8 @@ private void Control1_SelectionChanged(object sender, SelectionChangedEventArgs GridView gridView = sender as GridView; if (gridView != null) { - SelectionOutput.Text = string.Format("You have selected {0} item(s).", gridView.SelectedItems.Count); - } + SelectionOutput.Text = string.Format("You have selected {0} item(s).", gridView.SelectedItems.Count); + } } private void Control1_ItemClick(object sender, ItemClickEventArgs e) diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/HubPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/HubPage.xaml index c8b7f6a901..c32e5eeb97 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/HubPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/HubPage.xaml @@ -22,11 +22,11 @@ - - + + - @@ -48,7 +48,7 @@ - @@ -59,7 +59,7 @@ - @@ -70,7 +70,7 @@ - diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/HyperlinkButtonPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/HyperlinkButtonPage.xaml index 0a128a3e6e..57141be2c6 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/HyperlinkButtonPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/HyperlinkButtonPage.xaml @@ -49,7 +49,7 @@ - + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ImagePage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ImagePage.xaml index 3d00e0c0af..6db8c7d75b 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ImagePage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ImagePage.xaml @@ -1,112 +1,89 @@ - - - - - - - - - - - <Image Source="/Assets/treetops.jpg" Height="100" /> - - - - - - - - - - - - - - - None - - <Image Height="100"> - <Image.Source> - <BitmapImage UriSource="/Assets/treetops.jpg" - DecodePixelHeight="100" /> - </Image.Source> - </Image> - - - - - - - - - - - - - - - - - - - - <Image Stretch="None" Height="100" Width="100" Source="/Assets/valley.jpg" /> - - - - - - - - - - - - - - - - - - - <Image Source="/Assets/ninegrid.gif" Height="82" /> - <Image Source="/Assets/ninegrid.gif" NineGrid="3,3,3,3" Height="164" /> - <Image Source="/Assets/ninegrid.gif" NineGrid="30,20,30,20" Height="164" /> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + <Image Source="/Assets/treetops.jpg" Height="100" /> + + + + + + + + + + + + + + + + None + + + <Image Height="100"> + <Image.Source> + <BitmapImage UriSource="/Assets/treetops.jpg" + DecodePixelHeight="100" /> + </Image.Source> + </Image> + + + + + + + + + + + + + + + + + + + + <Image Stretch="None" Height="100" Width="100" Source="/Assets/valley.jpg" /> + + + + + + + + + + + + + + + + + + <Image Source="/Assets/ninegrid.gif" Height="82" /> + <Image Source="/Assets/ninegrid.gif" NineGrid="3,3,3,3" Height="164" /> + <Image Source="/Assets/ninegrid.gif" NineGrid="30,20,30,20" Height="164" /> + + + + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/InkCanvasPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/InkCanvasPage.xaml index d37b4c2894..350c99c633 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/InkCanvasPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/InkCanvasPage.xaml @@ -1,31 +1,28 @@ - - - - - + + + + - + SelectionChanged="penColor_SelectionChanged"> Black Red Blue Green - - - + + +
public sealed partial class ListViewPage : Page { - private IEnumerable _groups; + private List _items; public ListViewPage() { this.InitializeComponent(); } - public IEnumerable Groups + public List Items { - get { return this._groups; } + get { return this._items; } } protected async override void OnNavigatedTo(NavigationEventArgs e) { base.OnNavigatedTo(e); - _groups = await ControlInfoDataSource.GetGroupsAsync(); + var groups = NavigationRootPage.Current.Groups.Any() ? NavigationRootPage.Current.Groups : await ControlInfoDataSource.GetGroupsAsync(); + _items = new List(); + foreach (var group in groups.Take(3)) + { + foreach (var item in group.Items) + { + _items.Add(item); + } + } } private void ItemTemplate_Click(object sender, RoutedEventArgs e) @@ -54,7 +63,7 @@ private void Control1_SelectionChanged(object sender, SelectionChangedEventArgs if (listView != null) { SelectionOutput.Text = string.Format("You have selected {0} item(s).", listView.SelectedItems.Count); - } + } } private void Control1_ItemClick(object sender, ItemClickEventArgs e) diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaElementPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaElementPage.xaml index 231091f588..8031b6d928 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaElementPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaElementPage.xaml @@ -1,19 +1,13 @@ - - - + + - + @@ -24,12 +18,9 @@ - - + @@ -39,29 +30,5 @@ - - - - - - - - - - - - - - - - - - - - - - - - - + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaPlayerElementPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaPlayerElementPage.xaml new file mode 100644 index 0000000000..bd78643a95 --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaPlayerElementPage.xaml @@ -0,0 +1,34 @@ + + + + + + + + + <MediaPlayerElement Source="/Assets/ladybug.wmv" + MaxWidth="400" + AutoPlay="False" + AreTransportControlsEnabled="True" /> + + + + + + + + + + <MediaPlayerElement Source="Assets/fishes.wmv" + MaxWidth="400" + AutoPlay="True" /> + + + + + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaPlayerElementPage.xaml.cs b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaPlayerElementPage.xaml.cs new file mode 100644 index 0000000000..5e5e828085 --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MediaPlayerElementPage.xaml.cs @@ -0,0 +1,30 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Runtime.InteropServices.WindowsRuntime; +using Windows.Foundation; +using Windows.Foundation.Collections; +using Windows.UI.Xaml; +using Windows.UI.Xaml.Controls; +using Windows.UI.Xaml.Controls.Primitives; +using Windows.UI.Xaml.Data; +using Windows.UI.Xaml.Input; +using Windows.UI.Xaml.Media; +using Windows.UI.Xaml.Navigation; + +// The Blank Page item template is documented at http://go.microsoft.com/fwlink/?LinkId=234238 + +namespace AppUIBasics.ControlPages +{ + /// + /// An empty page that can be used on its own or navigated to within a Frame. + /// + public sealed partial class MediaPlayerElementPage : Page + { + public MediaPlayerElementPage() + { + this.InitializeComponent(); + } + } +} diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MenuFlyoutPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MenuFlyoutPage.xaml index 8d69dc1ec0..15f8a72984 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MenuFlyoutPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/MenuFlyoutPage.xaml @@ -9,78 +9,71 @@ // //********************************************************* --> - - - - - - - - - - - - - - - - - - - - <AppBarButton Icon="Sort" IsCompact="True"> - <AppBarButton.Flyout> - <MenuFlyout> - <MenuFlyoutItem Text="By rating" Click="MenuFlyoutItem_Click" Tag="rating"> - <MenuFlyoutItem Text="By match" Click="MenuFlyoutItem_Click" Tag="match"> - <MenuFlyoutItem Text="By distance" Click="MenuFlyoutItem_Click" Tag="distance> - </MenuFlyout> - </AppBarButton.Flyout> - </AppBarButton> - - - - - - - - - <Button Content="Options"> - <Button.Flyout> - <MenuFlyout> - <MenuFlyoutItem Text="Reset"/> - <MenuFlyoutSeparator/> - <MenuFlyoutItem Text="Repeat"/> - <MenuFlyoutItem Text="Shuffle"/> - </MenuFlyout> - </Button.Flyout> - </Button> - - - - - + + + + + + <AppBarButton Icon="Sort" IsCompact="True"> + <AppBarButton.Flyout> + <MenuFlyout> + <MenuFlyoutItem Text="By rating" Click="MenuFlyoutItem_Click" Tag="rating"> + <MenuFlyoutItem Text="By match" Click="MenuFlyoutItem_Click" Tag="match"> + <MenuFlyoutItem Text="By distance" Click="MenuFlyoutItem_Click" Tag="distance> + </MenuFlyout> + </AppBarButton.Flyout> + </AppBarButton> + + + + + + + + <Button Content="Options"> + <Button.Flyout> + <MenuFlyout> + <MenuFlyoutItem Text="Reset"/> + <MenuFlyoutSeparator/> + <ToggleMenuFlyoutItem Text="Repeat"/> + <ToggleMenuFlyoutItem Text="Shuffle"/> + </MenuFlyout> + </Button.Flyout> + </Button> + + + + + + <Button Content="Edit Options"> + <Button.Flyout> + <MenuFlyout> + <MenuFlyoutItem Text="Share"> + <MenuFlyoutItem.Icon> + <FontIcon Glyph="&#xE72D;"/> + </MenuFlyoutItem.Icon> + </MenuFlyoutItem> + <MenuFlyoutItem Text="Copy"/> + <MenuFlyoutItem Text="Delete"/> + <MenuFlyoutSeparator/> + <MenuFlyoutItem Text="Rename"/> + <MenuFlyoutItem Text="Select"/> + </MenuFlyout> + </Button.Flyout> + </Button> + + + + \ No newline at end of file diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/NavigationViewPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/NavigationViewPage.xaml new file mode 100644 index 0000000000..af4263ee47 --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/NavigationViewPage.xaml @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <controls:NavigationView IsSettingsVisible="" + Header="This is header text."> + + <controls:NavigationView.MenuItems> + <controls:NavigationMenuItem Icon="Play" Text="Menu Item1" /> + <controls:NavigationMenuItemSeparator/> + <controls:NavigationMenuItem Icon="Save" Text="Menu Item2" /> + <controls:NavigationMenuItem Icon="Refresh" Text="Menu Item3" /> + </controls:NavigationView.MenuItems> + <Frame x:Name="contentFrame" /> + </controls:NavigationView> + + + + + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/NavigationViewPage.xaml.cs b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/NavigationViewPage.xaml.cs new file mode 100644 index 0000000000..37ec8406ed --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/NavigationViewPage.xaml.cs @@ -0,0 +1,62 @@ +using AppUIBasics.SamplePages; +using Windows.Foundation; +using Windows.UI.Xaml.Controls; + +// The Blank Page item template is documented at https://go.microsoft.com/fwlink/?LinkId=234238 + +namespace AppUIBasics.ControlPages +{ + /// + /// An empty page that can be used on its own or navigated to within a Frame. + /// + public sealed partial class NavigationViewPage : Page + { + public NavigationViewPage() + { + this.InitializeComponent(); + + AddMenuItem(Symbol.Play, "Menu Item1", NavigationMenuItem_Invoked); + AddMenuItem(Symbol.Save, "Menu Item2", NavigationMenuItem_Invoked_1); + AddMenuItem(Symbol.Refresh, "Menu Item3", NavigationMenuItem_Invoked_2); + } + + private void AddMenuItem(Symbol icon, string text, TypedEventHandler handler) + { + var item = new NavigationMenuItem() { + Icon = new SymbolIcon(icon), + Text = text }; + item.Invoked += handler; + nvSample.MenuItems.Add(item); + } + + private void NavigationMenuItem_Invoked(Windows.UI.Xaml.Controls.NavigationMenuItem sender, object args) + { + contentFrame.Navigate(typeof(SamplePage1)); + } + + private void NavigationMenuItem_Invoked_1(Windows.UI.Xaml.Controls.NavigationMenuItem sender, object args) + { + contentFrame.Navigate(typeof(SamplePage2)); + } + + private void NavigationMenuItem_Invoked_2(Windows.UI.Xaml.Controls.NavigationMenuItem sender, object args) + { + contentFrame.Navigate(typeof(SamplePage3)); + } + + private void NavigationView_Loaded(object sender, Windows.UI.Xaml.RoutedEventArgs e) + { + contentFrame.Navigate(typeof(SamplePage1)); + } + + private void rootGrid_SizeChanged(object sender, Windows.UI.Xaml.SizeChangedEventArgs e) + { + Example1.Width = e.NewSize.Width; + } + + private void NavigationView_SettingsInvoked(Windows.UI.Xaml.Controls.NavigationView sender, object args) + { + contentFrame.Navigate(typeof(SampleSettingsPage)); + } + } +} diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ParallaxViewPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ParallaxViewPage.xaml new file mode 100644 index 0000000000..1985029186 --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ParallaxViewPage.xaml @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <Grid> + <controls:ParallaxView Source="{Binding ElementName=listView}" VerticalShift="500"> + <Image Source="ms-appx:///Assets/cliff.jpg" /> + </controls:ParallaxView> + <ListView x:Name="listView" ItemsSource="{x:Bind Items}"> + <ListView.Header> + <Grid> + <controls:ParallaxView Source="{x:Bind listView}" VerticalShift="100" + VerticalSourceOffsetKind="Absolute" VerticalSourceStartOffset="-50" + VerticalSourceEndOffset="250"> + <Image Source="ms-appx:///Assets/cliff.jpg" /> + </controls:ParallaxView> + <TextBlock Text="Scroll the list to see parallaxing of images" /> + </Grid> + </ListView.Header> + </ListView> + </Grid> + + + + + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ParallaxViewPage.xaml.cs b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ParallaxViewPage.xaml.cs new file mode 100644 index 0000000000..53c805ba76 --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ParallaxViewPage.xaml.cs @@ -0,0 +1,64 @@ +using AppUIBasics.Data; +using System; +using System.Collections.Generic; +using System.ComponentModel; +using System.Linq; +using System.Runtime.CompilerServices; +using Windows.UI.Xaml.Controls; +using Windows.UI.Xaml.Navigation; + +// The Blank Page item template is documented at https://go.microsoft.com/fwlink/?LinkId=234238 + +namespace AppUIBasics.ControlPages +{ + /// + /// An empty page that can be used on its own or navigated to within a Frame. + /// + public sealed partial class ParallaxViewPage : Page, INotifyPropertyChanged + { + private List _items; + + public ParallaxViewPage() + { + this.InitializeComponent(); + } + + public List Items + { + get { return _items; } + set { SetProperty(ref _items, value); } + } + + protected async override void OnNavigatedTo(NavigationEventArgs e) + { + base.OnNavigatedTo(e); + + IEnumerable groups = await ControlInfoDataSource.GetGroupsAsync(); + List items = new List(); + foreach (ControlInfoDataGroup group in groups) + { + foreach (ControlInfoDataItem item in group.Items) + { + items.Add(item); + } + } + Items = items.OrderBy(item => item.Title).ToList(); + } + + public event PropertyChangedEventHandler PropertyChanged; + + private bool SetProperty(ref T storage, T value, [CallerMemberName] String propertyName = null) + { + if (Equals(storage, value)) return false; + + storage = value; + this.OnPropertyChanged(propertyName); + return true; + } + + private void OnPropertyChanged([CallerMemberName] string propertyName = null) + { + this.PropertyChanged?.Invoke(this, new PropertyChangedEventArgs(propertyName)); + } + } +} diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PersonPicturePage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PersonPicturePage.xaml new file mode 100644 index 0000000000..79c1a6567e --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PersonPicturePage.xaml @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + <PersonPicture ProfilePicture="ms-appx:///Assets/grapes.jpg" /> + + + <PersonPicture DisplayName="John Doe" /> + + + <PersonPicture Initials="SB" /> + + + + + + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PersonPicturePage.xaml.cs b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PersonPicturePage.xaml.cs new file mode 100644 index 0000000000..e9c756ad4f --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PersonPicturePage.xaml.cs @@ -0,0 +1,17 @@ +using Windows.UI.Xaml.Controls; + +// The Blank Page item template is documented at https://go.microsoft.com/fwlink/?LinkId=234238 + +namespace AppUIBasics.ControlPages +{ + /// + /// An empty page that can be used on its own or navigated to within a Frame. + /// + public sealed partial class PersonPicturePage : Page + { + public PersonPicturePage() + { + this.InitializeComponent(); + } + } +} diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PivotPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PivotPage.xaml index 5598384501..251731140c 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PivotPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PivotPage.xaml @@ -6,39 +6,40 @@ xmlns:d="http://schemas.microsoft.com/expression/blend/2008" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" mc:Ignorable="d"> - - - - - - - - - - - - - - - - - - - <Pivot Title="EMAIL"> - <PivotItem Header="All"> - <TextBlock Text="all emails go here." /> - </PivotItem> - <PivotItem Header="Unread"> - <TextBlock Text="unread emails go here." /> - </PivotItem> - <PivotItem Header="Flagged"> - <TextBlock Text="flagged emails go here." /> - </PivotItem> - <PivotItem Header="Urgent"> - <TextBlock Text="urgent emails go here." /> - </PivotItem> - </Pivot> - - - + + + + + + + + + + + + + + + + + + + <Pivot Title="EMAIL"> + <PivotItem Header="All"> + <TextBlock Text="all emails go here." /> + </PivotItem> + <PivotItem Header="Unread"> + <TextBlock Text="unread emails go here." /> + </PivotItem> + <PivotItem Header="Flagged"> + <TextBlock Text="flagged emails go here." /> + </PivotItem> + <PivotItem Header="Urgent"> + <TextBlock Text="urgent emails go here." /> + </PivotItem> + </Pivot> + + + + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ProgressBarPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ProgressBarPage.xaml index daaa6d9e99..38572d6be4 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ProgressBarPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ProgressBarPage.xaml @@ -9,42 +9,38 @@ // //********************************************************* --> - - - - - - - - - - + + + + + + + + + - <ProgressBar Width="130" IsIndeterminate="True" ShowPaused="" + <ProgressBar Width="130" IsIndeterminate="True" + ShowPaused="" ShowError="" /> - - + - - - + + + @@ -53,5 +49,24 @@ - - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ProgressRingPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ProgressRingPage.xaml index 9e5676b942..186ca239b1 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ProgressRingPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/ProgressRingPage.xaml @@ -17,18 +17,20 @@ xmlns:d="http://schemas.microsoft.com/expression/blend/2008" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" mc:Ignorable="d"> - - - - - + + + + + + + + + <ProgressRing IsActive="" /> + + + + - - - - <ProgressRing IsActive="" /> - - - - \ No newline at end of file diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PullToRefreshPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PullToRefreshPage.xaml new file mode 100644 index 0000000000..f59e73d96b --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PullToRefreshPage.xaml @@ -0,0 +1,58 @@ + + + + + + + \ No newline at end of file diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PullToRefreshPage.xaml.cs b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PullToRefreshPage.xaml.cs new file mode 100644 index 0000000000..c8e059b8fe --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/PullToRefreshPage.xaml.cs @@ -0,0 +1,64 @@ +//********************************************************* +// +// Copyright (c) Microsoft. All rights reserved. +// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF +// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY +// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR +// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT. +// +//********************************************************* +//using Microsoft.UI.Preview; +using System; +using System.Collections.Generic; +using System.Collections.ObjectModel; +using System.IO; +using System.Linq; +using System.Runtime.InteropServices.WindowsRuntime; +using Windows.Foundation; +using Windows.Foundation.Collections; +using Windows.UI.Xaml; +using Windows.UI.Xaml.Controls; +using Windows.UI.Xaml.Controls.Primitives; +using Windows.UI.Xaml.Data; +using Windows.UI.Xaml.Input; +using Windows.UI.Xaml.Media; +using Windows.UI.Xaml.Navigation; + +// The Blank Page item template is documented at http://go.microsoft.com/fwlink/?LinkId=234238 + +namespace AppUIBasics.ControlPages +{ + /// + /// An empty page that can be used on its own or navigated to within a Frame. + /// + public sealed partial class PullToRefreshPage : Page + { +#if false + private ObservableCollection items = new ObservableCollection(); + + public PullToRefreshPage() + { + this.InitializeComponent(); + foreach (var c in @"AcrylicBrush ColorPicker NavigationView ParallaxView PersonPicture PullToRefresh RatingsControl RevealBrush TreeView".Split(' ')) + items.Add(c); + lv.ItemsSource = items; + } + + private void Rc_Loaded(object sender, RoutedEventArgs e) + { + rc.RefreshVisualizer.RefreshRequested += RefreshVisualizer_RefreshRequested; + } + + private void RefreshVisualizer_RefreshRequested(Windows.UI.Xaml.Controls.RefreshVisualizer sender, Windows.UI.Xaml.Controls.RefreshRequestedEventArgs args) + { + items.Insert(0, "NewControl"); + args.GetDeferral().Complete(); + } +#else + public PullToRefreshPage() + { + this.InitializeComponent(); + } +#endif + } +} diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RadioButtonPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RadioButtonPage.xaml index cde7228b99..e302e15369 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RadioButtonPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RadioButtonPage.xaml @@ -9,30 +9,28 @@ // //********************************************************* --> - - + - + - - + + - - - - + + + + - + @@ -45,28 +43,41 @@ - - + - - - - - - - + + + + + + + + + - - - - - - - + + + + + + + + + - + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RatingsControlPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RatingsControlPage.xaml new file mode 100644 index 0000000000..d2ba98579c --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RatingsControlPage.xaml @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + <controls:RatingsControl IsClearEnabled="" + IsReadOnly=" " /> + + + + + + + + + + + + + + <controls:RatingsControl PlaceholderValue="" /> + + + + + + diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RatingsControlPage.xaml.cs b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RatingsControlPage.xaml.cs new file mode 100644 index 0000000000..f31725a19b --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RatingsControlPage.xaml.cs @@ -0,0 +1,17 @@ +using Windows.UI.Xaml.Controls; + +// The Blank Page item template is documented at https://go.microsoft.com/fwlink/?LinkId=234238 + +namespace AppUIBasics.ControlPages +{ + /// + /// An empty page that can be used on its own or navigated to within a Frame. + /// + public sealed partial class RatingsControlPage : Page + { + public RatingsControlPage() + { + this.InitializeComponent(); + } + } +} diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RepeatButtonPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RepeatButtonPage.xaml index e044e66a1a..6aae2113ae 100644 --- a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RepeatButtonPage.xaml +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RepeatButtonPage.xaml @@ -17,17 +17,22 @@ xmlns:d="http://schemas.microsoft.com/expression/blend/2008" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" mc:Ignorable="d"> - - - - - - - - - - <RepeatButton Content="Click and hold" Click="RepeatButton_Click"/> - - - + + + + + + + + + + <RepeatButton Content="Click and hold" Click="RepeatButton_Click"/> + + + + \ No newline at end of file diff --git a/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RevealPage.xaml b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RevealPage.xaml new file mode 100644 index 0000000000..23671832c2 --- /dev/null +++ b/Samples/XamlUIBasics/cs/AppUIBasics/ControlPages/RevealPage.xaml @@ -0,0 +1,114 @@ + + + + + + + + + + + +