Merge branch 'faster'

dac
Ondřej Hruška 7 years ago
commit 76d1cd3a24
Signed by: MightyPork
GPG Key ID: 2C5FD5035250423D
  1. 46
      TinyFrame/TF_Integration.c
  2. 10
      TinyFrame/TinyFrame.c
  3. 2
      TinyFrame/TinyFrame.h
  4. 2
      USB/usbd_cdc_if.c
  5. 8
      comm/messages.c
  6. 4
      platform/plat_compat.h
  7. 15
      units/adc/_adc_core.c

@ -4,11 +4,11 @@
// TinyFrame integration // TinyFrame integration
// //
#include <USB/usb_device.h>
#include "platform.h" #include "platform.h"
#include "task_main.h" #include "task_main.h"
#include "USB/usbd_cdc_if.h" #include "USB/usbd_cdc_if.h"
#include "USB/usb_device.h"
#include "TinyFrame.h" #include "TinyFrame.h"
extern osSemaphoreId semVcomTxReadyHandle; extern osSemaphoreId semVcomTxReadyHandle;
@ -16,6 +16,30 @@ extern osMutexId mutTinyFrameTxHandle;
void TF_WriteImpl(TinyFrame *tf, const uint8_t *buff, uint32_t len) void TF_WriteImpl(TinyFrame *tf, const uint8_t *buff, uint32_t len)
{ {
#if 1
const uint32_t real_size = len;
// Padding to a multiple of 64 bytes - this is supposed to maximize the bulk transfer speed
if (len&0x3F) {
uint32_t pad = (64 - (len&0x3F));
memset((void *) (buff + len), 0, pad);
len += pad; // padding to a multiple of 64 (size of the endpoint)
}
// We bypass the USBD driver library's overhead by using the HAL function directly
assert_param(HAL_OK == HAL_PCD_EP_Transmit(hUsbDeviceFS.pData, CDC_IN_EP, (uint8_t *) buff, len));
// The buffer is the TF transmit buffer, we can't leave it to work asynchronously because
// the next call could modify it before it's been transmitted (in the case of a chunked / multi-part frame)
// the assumption here is that all until the last chunk use the full buffer capacity
if (real_size == TF_SENDBUF_LEN) {
if (pdTRUE != xSemaphoreTake(semVcomTxReadyHandle, 100)) {
TF_Error("Tx stalled in WriteImpl");
return;
}
}
#else
(void) tf; (void) tf;
#define CHUNK 64 // same as TF_SENDBUF_LEN, so we should always have only one run of the loop #define CHUNK 64 // same as TF_SENDBUF_LEN, so we should always have only one run of the loop
int32_t total = (int32_t) len; int32_t total = (int32_t) len;
@ -37,16 +61,26 @@ void TF_WriteImpl(TinyFrame *tf, const uint8_t *buff, uint32_t len)
buff += chunksize; buff += chunksize;
total -= chunksize; total -= chunksize;
} }
#endif
} }
/** Claim the TX interface before composing and sending a frame */ /** Claim the TX interface before composing and sending a frame */
bool TF_ClaimTx(TinyFrame *tf) bool TF_ClaimTx(TinyFrame *tf)
{ {
(void) tf; (void) tf;
// assert_param(osThreadGetId() != tskMainHandle); // assert_param(!inIRQ()); // useless delay
assert_param(!inIRQ()); assert_param(pdTRUE == xSemaphoreTake(mutTinyFrameTxHandle, 5000)); // trips the wd
// The last chunk from some previous frame may still be being transmitted,
// wait for it to finish (the semaphore is given in the CDC tx done handler)
if (pdTRUE != xSemaphoreTake(semVcomTxReadyHandle, 100)) {
TF_Error("Tx stalled in Claim");
// release the guarding mutex again
assert_param(pdTRUE == xSemaphoreGive(mutTinyFrameTxHandle));
return false;
}
assert_param(osOK == osMutexWait(mutTinyFrameTxHandle, 5000));
return true; return true;
} }
@ -54,5 +88,7 @@ bool TF_ClaimTx(TinyFrame *tf)
void TF_ReleaseTx(TinyFrame *tf) void TF_ReleaseTx(TinyFrame *tf)
{ {
(void) tf; (void) tf;
assert_param(osOK == osMutexRelease(mutTinyFrameTxHandle)); assert_param(pdTRUE == xSemaphoreGive(mutTinyFrameTxHandle));
// the last payload is sent asynchronously
} }

@ -870,7 +870,11 @@ static inline uint32_t _TF_FN TF_ComposeTail(uint8_t *outbuff, TF_CKSUM *cksum)
*/ */
static bool _TF_FN TF_SendFrame_Begin(TinyFrame *tf, TF_Msg *msg, TF_Listener listener, TF_TICKS timeout) static bool _TF_FN TF_SendFrame_Begin(TinyFrame *tf, TF_Msg *msg, TF_Listener listener, TF_TICKS timeout)
{ {
TF_TRY(TF_ClaimTx(tf)); bool suc = TF_ClaimTx(tf);
if (!suc) {
TF_Error("TF lock not free");
return false;
}
tf->tx_pos = (uint32_t) TF_ComposeHead(tf, tf->sendbuf, msg); // frame ID is incremented here if it's not a response tf->tx_pos = (uint32_t) TF_ComposeHead(tf, tf->sendbuf, msg); // frame ID is incremented here if it's not a response
tf->tx_len = msg->len; tf->tx_len = msg->len;
@ -1031,10 +1035,10 @@ bool _TF_FN TF_Query_Multipart(TinyFrame *tf, TF_Msg *msg, TF_Listener listener,
return TF_Query(tf, msg, listener, timeout); return TF_Query(tf, msg, listener, timeout);
} }
void _TF_FN TF_Respond_Multipart(TinyFrame *tf, TF_Msg *msg) bool _TF_FN TF_Respond_Multipart(TinyFrame *tf, TF_Msg *msg)
{ {
msg->data = NULL; msg->data = NULL;
TF_Respond(tf, msg); return TF_Respond(tf, msg);
} }
void _TF_FN TF_Multipart_Payload(TinyFrame *tf, const uint8_t *buff, uint32_t length) void _TF_FN TF_Multipart_Payload(TinyFrame *tf, const uint8_t *buff, uint32_t length)

@ -369,7 +369,7 @@ bool TF_Query_Multipart(TinyFrame *tf, TF_Msg *msg, TF_Listener listener, TF_TIC
* TF_Respond() with multipart payload. * TF_Respond() with multipart payload.
* msg.data is ignored and set to NULL * msg.data is ignored and set to NULL
*/ */
void TF_Respond_Multipart(TinyFrame *tf, TF_Msg *msg); bool TF_Respond_Multipart(TinyFrame *tf, TF_Msg *msg);
/** /**
* Send the payload for a started multipart frame. This can be called multiple times * Send the payload for a started multipart frame. This can be called multiple times

@ -314,7 +314,7 @@ void USBD_CDC_TransmitDone(USBD_HandleTypeDef *pdev)
assert_param(inIRQ()); assert_param(inIRQ());
portBASE_TYPE taskWoken = pdFALSE; portBASE_TYPE taskWoken = pdFALSE;
assert_param(xSemaphoreGiveFromISR(semVcomTxReadyHandle, &taskWoken) == pdTRUE); assert_param(pdTRUE == xSemaphoreGiveFromISR(semVcomTxReadyHandle, &taskWoken));
portYIELD_FROM_ISR(taskWoken); portYIELD_FROM_ISR(taskWoken);
} }
/* USER CODE END PRIVATE_FUNCTIONS_IMPLEMENTATION */ /* USER CODE END PRIVATE_FUNCTIONS_IMPLEMENTATION */

@ -65,7 +65,7 @@ static void settings_bulkread_cb(BulkRead *bulk, uint32_t chunk, uint8_t *buffer
if (buffer == NULL) { if (buffer == NULL) {
free_ck(bulk); free_ck(bulk);
iw_end(); iw_end();
dbg("INI read complete."); // dbg("INI read complete.");
return; return;
} }
@ -81,7 +81,7 @@ static void settings_bulkread_cb(BulkRead *bulk, uint32_t chunk, uint8_t *buffer
*/ */
static TF_Result lst_ini_export(TinyFrame *tf, TF_Msg *msg) static TF_Result lst_ini_export(TinyFrame *tf, TF_Msg *msg)
{ {
dbg("Bulk read INI file"); // dbg("Bulk read INI file");
BulkRead *bulk = malloc_ck(sizeof(BulkRead)); BulkRead *bulk = malloc_ck(sizeof(BulkRead));
assert_param(bulk != NULL); assert_param(bulk != NULL);
@ -114,7 +114,7 @@ static void settings_bulkwrite_cb(BulkWrite *bulk, const uint8_t *chunk, uint32_
if (bulk->offset > 0) { if (bulk->offset > 0) {
settings_load_ini_end(); settings_load_ini_end();
dbg("INI write complete"); // dbg("INI write complete");
} else { } else {
dbg("INI write failed"); dbg("INI write failed");
} }
@ -131,7 +131,7 @@ static void settings_bulkwrite_cb(BulkWrite *bulk, const uint8_t *chunk, uint32_
*/ */
static TF_Result lst_ini_import(TinyFrame *tf, TF_Msg *msg) static TF_Result lst_ini_import(TinyFrame *tf, TF_Msg *msg)
{ {
dbg("Bulk write INI file"); // dbg("Bulk write INI file");
BulkWrite *bulk = malloc_ck(sizeof(BulkWrite)); BulkWrite *bulk = malloc_ck(sizeof(BulkWrite));
assert_param(bulk); assert_param(bulk);

@ -30,7 +30,7 @@
#define BULK_READ_BUF_LEN 256 // Buffer for TF bulk reads #define BULK_READ_BUF_LEN 256 // Buffer for TF bulk reads
#define UNIT_TMP_LEN 512 // Buffer for internal unit operations #define UNIT_TMP_LEN 256 // Buffer for internal unit operations
#define FLASH_SAVE_BUF_LEN 128 // Malloc'd buffer for saving to flash #define FLASH_SAVE_BUF_LEN 128 // Malloc'd buffer for saving to flash
@ -38,7 +38,7 @@
#define RX_QUE_CAPACITY 16 // TinyFrame rx queue size (64 bytes each) #define RX_QUE_CAPACITY 16 // TinyFrame rx queue size (64 bytes each)
#define TF_MAX_PAYLOAD_RX 512 // TF max Rx payload #define TF_MAX_PAYLOAD_RX 512 // TF max Rx payload
#define TF_SENDBUF_LEN 64 // TF transmit buffer (can be less than a full frame) #define TF_SENDBUF_LEN 512 // TF transmit buffer (can be less than a full frame)
#define TF_MAX_ID_LST 4 // Frame ID listener count #define TF_MAX_ID_LST 4 // Frame ID listener count
#define TF_MAX_TYPE_LST 6 // Frame Type listener count #define TF_MAX_TYPE_LST 6 // Frame Type listener count

@ -40,7 +40,8 @@ static void UADC_JobSendBlockChunk(Job *job)
.len = (TF_LEN) (1 /*seq*/ + count * sizeof(uint16_t)), .len = (TF_LEN) (1 /*seq*/ + count * sizeof(uint16_t)),
.type = type, .type = type,
}; };
TF_Respond_Multipart(comm, &msg);
assert_param(true == TF_Respond_Multipart(comm, &msg));
TF_Multipart_Payload(comm, &priv->stream_serial, 1); TF_Multipart_Payload(comm, &priv->stream_serial, 1);
TF_Multipart_Payload(comm, (uint8_t *) (priv->dma_buffer + start), count * sizeof(uint16_t)); TF_Multipart_Payload(comm, (uint8_t *) (priv->dma_buffer + start), count * sizeof(uint16_t));
TF_Multipart_Close(comm); TF_Multipart_Close(comm);
@ -171,7 +172,7 @@ static void handle_httc(Unit *unit, bool tc)
const bool m_fixcpt = priv->opmode == ADC_OPMODE_BLCAP; const bool m_fixcpt = priv->opmode == ADC_OPMODE_BLCAP;
if (ht) { if (ht) {
end = (priv->buf_itemcount / 2); end = (priv->buf_itemcount >> 1); // div2
} }
else { else {
end = priv->buf_itemcount; end = priv->buf_itemcount;
@ -234,7 +235,7 @@ static void handle_httc(Unit *unit, bool tc)
priv->stream_startpos = 0; priv->stream_startpos = 0;
} }
else { else {
priv->stream_startpos = priv->buf_itemcount / 2; priv->stream_startpos = priv->buf_itemcount >> 1; // div2
} }
} }
@ -289,7 +290,7 @@ void UADC_DMA_Handler(void *arg)
const bool m_stream = priv->opmode == ADC_OPMODE_STREAM; const bool m_stream = priv->opmode == ADC_OPMODE_STREAM;
const bool m_fixcpt = priv->opmode == ADC_OPMODE_BLCAP; const bool m_fixcpt = priv->opmode == ADC_OPMODE_BLCAP;
if (m_trigd || m_stream || m_fixcpt) { if (m_trigd || m_stream || m_fixcpt) {
const uint32_t half = (uint32_t) (priv->buf_itemcount / 2); const uint32_t half = (uint32_t) (priv->buf_itemcount >> 1); // div2
if (ht && tc) { if (ht && tc) {
// dual event interrupt - may happen if we missed both and they were pending after // dual event interrupt - may happen if we missed both and they were pending after
// interrupts became enabled again (this can happen due to the EOS or other higher prio irq's) // interrupts became enabled again (this can happen due to the EOS or other higher prio irq's)
@ -336,8 +337,10 @@ void UADC_ADC_EOS_Handler(void *arg)
if (priv->opmode == ADC_OPMODE_UNINIT) return; if (priv->opmode == ADC_OPMODE_UNINIT) return;
// Wait for the DMA to complete copying the last sample // Wait for the DMA to complete copying the last sample
uint32_t dmapos; uint32_t dmapos = DMA_POS(priv);
hw_wait_while((dmapos = DMA_POS(priv)) % priv->nb_channels != 0, 100); // XXX this could be changed to reading it from the DR instead if ((DMA_POS(priv) % priv->nb_channels) != 0) {
hw_wait_while((dmapos = DMA_POS(priv)) % priv->nb_channels != 0, 100); // XXX this could be changed to reading it from the DR instead
}
uint32_t sample_pos; uint32_t sample_pos;
if (dmapos == 0) { if (dmapos == 0) {

Loading…
Cancel
Save