Refactor packet queueing and use for both xmit and recv.

This commit is contained in:
Kristian Høgsberg 2007-04-11 18:22:36 -04:00
parent d9fd708f82
commit 194edcc9cd
2 changed files with 54 additions and 129 deletions

View File

@ -123,9 +123,9 @@ struct raw1394_handle {
int irq_interval; int irq_interval;
int packet_phase; int packet_phase;
int packet_count; int packet_count;
int packet_index;
int buf_packets; int buf_packets;
int max_packet_size; int max_packet_size;
int packet_header_index;
int prebuffer; int prebuffer;
int start_on_cycle; int start_on_cycle;
enum raw1394_iso_dma_recv_mode recv_mode; enum raw1394_iso_dma_recv_mode recv_mode;

View File

@ -30,74 +30,70 @@
#include "juju.h" #include "juju.h"
static enum raw1394_iso_disposition static int
queue_xmit_packets(raw1394handle_t handle) queue_packet(raw1394handle_t handle,
unsigned int length, unsigned int header_length,
unsigned char tag, unsigned char sy)
{ {
struct fw_cdev_iso_packet *p = handle->iso.packets;
struct fw_cdev_queue_iso queue_iso; struct fw_cdev_queue_iso queue_iso;
enum raw1394_iso_disposition d; struct fw_cdev_iso_packet *p;
unsigned int len, dropped; int err;
unsigned char tag, sy, *first_payload;
int cycle, i;
first_payload = handle->iso.head; p = &handle->iso.packets[handle->iso.packet_index];
d = RAW1394_ISO_OK; p->payload_length = length;
for (i = 0; i < handle->iso.irq_interval; i++) {
cycle = -1;
dropped = 0;
if (handle->iso.head + handle->iso.max_packet_size >
handle->iso.buffer_end) {
handle->iso.head = handle->iso.buffer;
break;
}
d = handle->iso.xmit_handler(handle, handle->iso.head,
&len, &tag, &sy, cycle, dropped);
if (d != RAW1394_ISO_OK)
break;
p->payload_length = len;
p->interrupt = p->interrupt =
handle->iso.packet_phase == handle->iso.irq_interval - 1; handle->iso.packet_phase == handle->iso.irq_interval - 1;
p->skip = 0; p->skip = 0;
p->tag = tag; p->tag = tag;
p->sy = sy; p->sy = sy;
p->header_length = 0; p->header_length = header_length;
handle->iso.head += len; handle->iso.head += length;
handle->iso.packet_count++; handle->iso.packet_count++;
handle->iso.packet_phase++; handle->iso.packet_phase++;
handle->iso.packet_index++;
if (handle->iso.packet_phase == handle->iso.irq_interval) if (handle->iso.packet_phase == handle->iso.irq_interval)
handle->iso.packet_phase = 0; handle->iso.packet_phase = 0;
}
if (handle->iso.head + handle->iso.max_packet_size > handle->iso.buffer_end)
handle->iso.head = handle->iso.buffer;
/* Queue the packets in the kernel if we filled up the packets
* array or wrapped the payload buffer. */
if (handle->iso.packet_index == handle->iso.irq_interval ||
handle->iso.head == handle->iso.buffer) {
queue_iso.packets = ptr_to_u64(handle->iso.packets); queue_iso.packets = ptr_to_u64(handle->iso.packets);
queue_iso.size = i * sizeof handle->iso.packets[0]; queue_iso.size = handle->iso.packet_index * sizeof handle->iso.packets[0];
queue_iso.data = ptr_to_u64(first_payload); queue_iso.data = ptr_to_u64(handle->iso.first_payload);
handle->iso.packet_index = 0;
handle->iso.first_payload = handle->iso.head;
len = ioctl(handle->iso.fd, FW_CDEV_IOC_QUEUE_ISO, &queue_iso); err = ioctl(handle->iso.fd, FW_CDEV_IOC_QUEUE_ISO, &queue_iso);
if (len < 0) if (err < 0)
return RAW1394_ISO_ERROR; return -1;
}
return d;
} }
static int static int
flush_xmit_packets(raw1394handle_t handle, int limit) queue_xmit_packets(raw1394handle_t handle, int limit)
{ {
enum raw1394_iso_disposition d; enum raw1394_iso_disposition d;
unsigned char tag, sy;
int len, cycle, dropped;
if (handle->iso.xmit_handler == NULL) if (handle->iso.xmit_handler == NULL)
return 0; return 0;
if (limit < handle->iso.irq_interval) while (handle->iso.packet_count < limit) {
limit = handle->iso.irq_interval;
d = handle->iso.xmit_handler(handle, handle->iso.head,
&len, &tag, &sy, cycle, dropped);
while (handle->iso.packet_count + handle->iso.irq_interval <= limit) {
d = queue_xmit_packets(handle);
switch (d) { switch (d) {
case RAW1394_ISO_OK:
queue_packet(handle, len, 0, tag, sy);
break;
case RAW1394_ISO_DEFER: case RAW1394_ISO_DEFER:
case RAW1394_ISO_AGAIN: case RAW1394_ISO_AGAIN:
default: default:
@ -125,7 +121,7 @@ int raw1394_iso_xmit_start(raw1394handle_t handle, int start_on_cycle,
handle->iso.prebuffer = prebuffer_packets; handle->iso.prebuffer = prebuffer_packets;
handle->iso.start_on_cycle = start_on_cycle; handle->iso.start_on_cycle = start_on_cycle;
flush_xmit_packets(handle, prebuffer_packets); queue_xmit_packets(handle, prebuffer_packets);
if (handle->iso.prebuffer <= handle->iso.packet_count) { if (handle->iso.prebuffer <= handle->iso.packet_count) {
start_iso.cycle = start_on_cycle; start_iso.cycle = start_on_cycle;
@ -136,48 +132,14 @@ int raw1394_iso_xmit_start(raw1394handle_t handle, int start_on_cycle,
return retval; return retval;
} }
return flush_xmit_packets(handle, handle->iso.buf_packets); return queue_xmit_packets(handle, handle->iso.buf_packets);
} }
static int static int
queue_recv_packets(raw1394handle_t handle) queue_recv_packets(raw1394handle_t handle)
{ {
struct fw_cdev_queue_iso queue_iso; while (handle->iso.packet_count <= handle->iso.buf_packets)
struct fw_cdev_iso_packet *p = handle->iso.packets; queue_packet(handle, handle->iso.max_packet_size, 4, 0, 0);
unsigned int len;
unsigned char *first_payload;
int i;
first_payload = handle->iso.head;
for (i = 0; i < handle->iso.irq_interval; i++, p++) {
if (handle->iso.head + handle->iso.max_packet_size >
handle->iso.buffer_end) {
handle->iso.head = handle->iso.buffer;
break;
}
p->payload_length = handle->iso.max_packet_size;
p->interrupt = handle->iso.packet_phase == handle->iso.irq_interval - 1;
p->skip = 0;
p->tag = 0;
p->sy = 0;
p->header_length = 4;
handle->iso.head += handle->iso.max_packet_size;
handle->iso.packet_count++;
handle->iso.packet_phase++;
if (handle->iso.packet_phase == handle->iso.irq_interval)
handle->iso.packet_phase = 0;
}
queue_iso.packets = ptr_to_u64(handle->iso.packets);
queue_iso.size = i * sizeof handle->iso.packets[0];
queue_iso.data = ptr_to_u64(first_payload);
len = ioctl(handle->iso.fd, FW_CDEV_IOC_QUEUE_ISO, &queue_iso);
if (len < 0)
return -1;
return 0; return 0;
} }
@ -232,8 +194,6 @@ flush_recv_packets(raw1394handle_t handle,
return 0; return 0;
} }
while (handle->iso.packet_count + handle->iso.irq_interval <=
handle->iso.buf_packets)
queue_recv_packets(handle); queue_recv_packets(handle);
return 0; return 0;
@ -244,8 +204,6 @@ int raw1394_iso_recv_start(raw1394handle_t handle, int start_on_cycle,
{ {
struct fw_cdev_start_iso start_iso; struct fw_cdev_start_iso start_iso;
while (handle->iso.packet_count + handle->iso.irq_interval <=
handle->iso.buf_packets)
queue_recv_packets(handle); queue_recv_packets(handle);
start_iso.cycle = start_on_cycle; start_iso.cycle = start_on_cycle;
@ -274,7 +232,7 @@ static int handle_iso_event(raw1394handle_t handle,
switch (handle->iso.type) { switch (handle->iso.type) {
case FW_CDEV_ISO_CONTEXT_TRANSMIT: case FW_CDEV_ISO_CONTEXT_TRANSMIT:
handle->iso.packet_count -= handle->iso.irq_interval; handle->iso.packet_count -= handle->iso.irq_interval;
return flush_xmit_packets(handle, handle->iso.buf_packets); return queue_xmit_packets(handle, handle->iso.buf_packets);
case FW_CDEV_ISO_CONTEXT_RECEIVE: case FW_CDEV_ISO_CONTEXT_RECEIVE:
return flush_recv_packets(handle, interrupt); return flush_recv_packets(handle, interrupt);
default: default:
@ -301,42 +259,9 @@ int raw1394_iso_xmit_write(raw1394handle_t handle, unsigned char *data,
handle->iso.buf_packets) handle->iso.buf_packets)
raw1394_loop_iterate(handle); raw1394_loop_iterate(handle);
p = &handle->iso.packets[handle->iso.packet_header_index];
p->payload_length = len;
p->interrupt =
handle->iso.packet_phase == handle->iso.irq_interval - 1;
p->skip = 0;
p->tag = tag;
p->sy = sy;
p->header_length = 0;
memcpy(handle->iso.head, data, len); memcpy(handle->iso.head, data, len);
if (queue_packet(handle, len, 0, tag, sy) < 0)
handle->iso.head += len;
handle->iso.packet_count++;
handle->iso.packet_phase++;
handle->iso.packet_header_index++;
if (handle->iso.packet_phase == handle->iso.irq_interval)
handle->iso.packet_phase = 0;
if (handle->iso.head + handle->iso.max_packet_size > handle->iso.buffer_end)
handle->iso.head = handle->iso.buffer;
/* Queue the packets in the kernel if we filled up the packets
* array or wrapped the payload buffer. */
if (handle->iso.packet_header_index == handle->iso.irq_interval ||
handle->iso.head == handle->iso.buffer) {
queue_iso.packets = ptr_to_u64(handle->iso.packets);
queue_iso.size = handle->iso.packet_header_index * sizeof handle->iso.packets[0];
queue_iso.data = ptr_to_u64(handle->iso.first_payload);
handle->iso.packet_header_index = 0;
handle->iso.first_payload = handle->iso.head;
len = ioctl(handle->iso.fd, FW_CDEV_IOC_QUEUE_ISO, &queue_iso);
if (len < 0)
return -1; return -1;
}
/* Start the streaming if it's not already running and if /* Start the streaming if it's not already running and if
* we've buffered up enough packets. */ * we've buffered up enough packets. */