mirror of
https://github.com/enjoy-digital/litex.git
synced 2025-01-04 09:52:26 -05:00
Remove code that will be into MiSoC or other packages.
This commit is contained in:
parent
5253b0c06e
commit
dec2e23fc7
38 changed files with 0 additions and 3961 deletions
|
@ -1,20 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.genlib.complex import *
|
||||
from migen.fhdl import verilog
|
||||
|
||||
|
||||
class Example(Module):
|
||||
def __init__(self):
|
||||
w = Complex(32, 42)
|
||||
A = SignalC(16)
|
||||
B = SignalC(16)
|
||||
Bw = SignalC(16)
|
||||
C = SignalC(16)
|
||||
D = SignalC(16)
|
||||
self.comb += Bw.eq(B*w)
|
||||
self.sync += [
|
||||
C.eq(A + Bw),
|
||||
D.eq(A - Bw)
|
||||
]
|
||||
|
||||
print(verilog.convert(Example()))
|
|
@ -1,37 +0,0 @@
|
|||
from migen.fhdl import verilog
|
||||
from migen.fhdl.std import *
|
||||
from migen.genlib.mhamgen import HammingGenerator, HammingChecker
|
||||
|
||||
|
||||
# Instantiates Hamming code generator and checker modules back
|
||||
# to back. Also creates an intermediate bus between generator
|
||||
# and checker and injects a single-bit error on the bus, to
|
||||
# demonstrate the correction.
|
||||
class gen_check(Module):
|
||||
def __init__(self, width=8):
|
||||
# Save module parameters and instantiate generator and checker
|
||||
self.width = width
|
||||
hg = HammingGenerator(self.width)
|
||||
hc = HammingChecker(self.width, correct=True)
|
||||
self.submodules += hg
|
||||
self.submodules += hc
|
||||
|
||||
# Create the intermediate bus and inject a single-bit error on
|
||||
# the bus. Position of the error bit is controllable by the
|
||||
# error_bit input.
|
||||
data = Signal(width)
|
||||
error_bit = Signal(bits_for(width))
|
||||
self.comb += data.eq(hg.data_in ^ (1 << error_bit))
|
||||
self.comb += hc.code_in.eq(hg.code_out)
|
||||
self.comb += hc.data_in.eq(data)
|
||||
|
||||
# Call out I/O necessary for testing the generator/checker
|
||||
self.io = set()
|
||||
self.io.add(hg.data_in)
|
||||
self.io.add(hc.enable)
|
||||
self.io.add(error_bit)
|
||||
self.io.add(hc.code_out)
|
||||
self.io.add(hc.data_out)
|
||||
|
||||
gc = gen_check()
|
||||
print(verilog.convert(gc, gc.io, name="gen_check"))
|
|
@ -1,29 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.fhdl import verilog
|
||||
from migen.genlib.cdc import MultiReg
|
||||
from migen.bank import description, csrgen
|
||||
|
||||
|
||||
class Example(Module):
|
||||
def __init__(self, ninputs=32, noutputs=32):
|
||||
r_o = description.CSRStorage(noutputs, atomic_write=True)
|
||||
r_i = description.CSRStatus(ninputs)
|
||||
|
||||
self.submodules.bank = csrgen.Bank([r_o, r_i])
|
||||
self.gpio_in = Signal(ninputs)
|
||||
self.gpio_out = Signal(ninputs)
|
||||
|
||||
###
|
||||
|
||||
gpio_in_s = Signal(ninputs)
|
||||
self.specials += MultiReg(self.gpio_in, gpio_in_s)
|
||||
self.comb += [
|
||||
self.gpio_out.eq(r_o.storage),
|
||||
r_i.status.eq(gpio_in_s)
|
||||
]
|
||||
|
||||
example = Example()
|
||||
i = example.bank.bus
|
||||
v = verilog.convert(example, {i.dat_r, i.adr, i.we, i.dat_w,
|
||||
example.gpio_in, example.gpio_out})
|
||||
print(v)
|
|
@ -1,105 +0,0 @@
|
|||
from random import Random
|
||||
|
||||
from migen.fhdl.std import *
|
||||
from migen.flow.network import *
|
||||
from migen.flow.transactions import *
|
||||
from migen.actorlib import dma_wishbone
|
||||
from migen.actorlib.sim import *
|
||||
from migen.bus import wishbone
|
||||
from migen.sim.generic import run_simulation
|
||||
|
||||
|
||||
class MyModel:
|
||||
def read(self, address):
|
||||
return address + 4
|
||||
|
||||
|
||||
class MyModelWB(MyModel, wishbone.TargetModel):
|
||||
def __init__(self):
|
||||
self.prng = Random(763627)
|
||||
|
||||
def can_ack(self, bus):
|
||||
return self.prng.randrange(0, 2)
|
||||
|
||||
|
||||
def adrgen_gen():
|
||||
for i in range(10):
|
||||
print("Address: " + hex(i))
|
||||
yield Token("address", {"a": i})
|
||||
|
||||
|
||||
class SimAdrGen(SimActor):
|
||||
def __init__(self, nbits):
|
||||
self.address = Source([("a", nbits)])
|
||||
SimActor.__init__(self, adrgen_gen())
|
||||
|
||||
|
||||
def dumper_gen():
|
||||
while True:
|
||||
t = Token("data", idle_wait=True)
|
||||
yield t
|
||||
print("Received: " + hex(t.value["d"]))
|
||||
|
||||
|
||||
class SimDumper(SimActor):
|
||||
def __init__(self):
|
||||
self.data = Sink([("d", 32)])
|
||||
SimActor.__init__(self, dumper_gen())
|
||||
|
||||
|
||||
def trgen_gen():
|
||||
for i in range(10):
|
||||
a = i
|
||||
d = i+10
|
||||
print("Address: " + hex(a) + " Data: " + hex(d))
|
||||
yield Token("address_data", {"a": a, "d": d})
|
||||
|
||||
|
||||
class SimTrGen(SimActor):
|
||||
def __init__(self, a_nbits):
|
||||
self.address_data = Source([("a", a_nbits), ("d", 32)])
|
||||
SimActor.__init__(self, trgen_gen())
|
||||
|
||||
|
||||
class TBWishbone(Module):
|
||||
def __init__(self, master):
|
||||
self.submodules.peripheral = wishbone.Target(MyModelWB())
|
||||
self.submodules.tap = wishbone.Tap(self.peripheral.bus)
|
||||
self.submodules.interconnect = wishbone.InterconnectPointToPoint(master.bus,
|
||||
self.peripheral.bus)
|
||||
|
||||
|
||||
class TBWishboneReader(TBWishbone):
|
||||
def __init__(self):
|
||||
self.adrgen = SimAdrGen(30)
|
||||
self.reader = dma_wishbone.Reader()
|
||||
self.dumper = SimDumper()
|
||||
g = DataFlowGraph()
|
||||
g.add_connection(self.adrgen, self.reader)
|
||||
g.add_connection(self.reader, self.dumper)
|
||||
self.submodules.comp = CompositeActor(g)
|
||||
TBWishbone.__init__(self, self.reader)
|
||||
|
||||
|
||||
class TBWishboneWriter(TBWishbone):
|
||||
def __init__(self):
|
||||
self.trgen = SimTrGen(30)
|
||||
self.writer = dma_wishbone.Writer()
|
||||
g = DataFlowGraph()
|
||||
g.add_connection(self.trgen, self.writer)
|
||||
self.submodules.comp = CompositeActor(g)
|
||||
TBWishbone.__init__(self, self.writer)
|
||||
|
||||
|
||||
def test_wb_reader():
|
||||
print("*** Testing Wishbone reader")
|
||||
run_simulation(TBWishboneReader(), 100)
|
||||
|
||||
|
||||
def test_wb_writer():
|
||||
print("*** Testing Wishbone writer")
|
||||
run_simulation(TBWishboneWriter(), 100)
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_wb_reader()
|
||||
test_wb_writer()
|
|
@ -1,41 +0,0 @@
|
|||
from migen.flow.network import *
|
||||
from migen.flow.transactions import *
|
||||
from migen.actorlib import misc
|
||||
from migen.actorlib.sim import *
|
||||
from migen.sim.generic import run_simulation
|
||||
|
||||
|
||||
def source_gen():
|
||||
for i in range(10):
|
||||
v = i + 5
|
||||
print("==> " + str(v))
|
||||
yield Token("source", {"maximum": v})
|
||||
|
||||
|
||||
class SimSource(SimActor):
|
||||
def __init__(self):
|
||||
self.source = Source([("maximum", 32)])
|
||||
SimActor.__init__(self, source_gen())
|
||||
|
||||
|
||||
def sink_gen():
|
||||
while True:
|
||||
t = Token("sink")
|
||||
yield t
|
||||
print(t.value["value"])
|
||||
|
||||
|
||||
class SimSink(SimActor):
|
||||
def __init__(self):
|
||||
self.sink = Sink([("value", 32)])
|
||||
SimActor.__init__(self, sink_gen())
|
||||
|
||||
if __name__ == "__main__":
|
||||
source = SimSource()
|
||||
loop = misc.IntSequence(32)
|
||||
sink = SimSink()
|
||||
g = DataFlowGraph()
|
||||
g.add_connection(source, loop)
|
||||
g.add_connection(loop, sink)
|
||||
comp = CompositeActor(g)
|
||||
run_simulation(comp, ncycles=500)
|
|
@ -1,73 +0,0 @@
|
|||
from itertools import count
|
||||
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
from migen.flow.network import *
|
||||
from migen.flow.transactions import *
|
||||
from migen.actorlib import structuring
|
||||
from migen.actorlib.sim import *
|
||||
from migen.flow import perftools
|
||||
from migen.sim.generic import run_simulation
|
||||
|
||||
pack_factor = 5
|
||||
base_layout = [("value", 32)]
|
||||
packed_layout = structuring.pack_layout(base_layout, pack_factor)
|
||||
rawbits_layout = [("value", 32*pack_factor)]
|
||||
|
||||
|
||||
def source_gen():
|
||||
for i in count(0):
|
||||
yield Token("source", {"value": i})
|
||||
|
||||
|
||||
class SimSource(SimActor):
|
||||
def __init__(self):
|
||||
self.source = Source(base_layout)
|
||||
SimActor.__init__(self, source_gen())
|
||||
|
||||
|
||||
def sink_gen():
|
||||
while True:
|
||||
t = Token("sink")
|
||||
yield t
|
||||
print(t.value["value"])
|
||||
|
||||
|
||||
class SimSink(SimActor):
|
||||
def __init__(self):
|
||||
self.sink = Sink(base_layout)
|
||||
SimActor.__init__(self, sink_gen())
|
||||
|
||||
|
||||
class TB(Module):
|
||||
def __init__(self):
|
||||
source = SimSource()
|
||||
sink = SimSink()
|
||||
|
||||
# A tortuous way of passing integer tokens.
|
||||
packer = structuring.Pack(base_layout, pack_factor)
|
||||
to_raw = structuring.Cast(packed_layout, rawbits_layout)
|
||||
from_raw = structuring.Cast(rawbits_layout, packed_layout)
|
||||
unpacker = structuring.Unpack(pack_factor, base_layout)
|
||||
|
||||
self.g = DataFlowGraph()
|
||||
self.g.add_connection(source, packer)
|
||||
self.g.add_connection(packer, to_raw)
|
||||
self.g.add_connection(to_raw, from_raw)
|
||||
self.g.add_connection(from_raw, unpacker)
|
||||
self.g.add_connection(unpacker, sink)
|
||||
self.submodules.comp = CompositeActor(self.g)
|
||||
self.submodules.reporter = perftools.DFGReporter(self.g)
|
||||
|
||||
if __name__ == "__main__":
|
||||
tb = TB()
|
||||
run_simulation(tb, ncycles=1000)
|
||||
|
||||
g = nx.MultiDiGraph()
|
||||
for u, v, edge in tb.g.edges_iter():
|
||||
g.add_edge(u, v, **edge)
|
||||
g_layout = nx.spectral_layout(g)
|
||||
nx.draw(g, g_layout)
|
||||
nx.draw_networkx_edge_labels(g, g_layout, tb.reporter.get_edge_labels())
|
||||
plt.show()
|
|
@ -1,56 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.bus import wishbone
|
||||
from migen.flow.actor import *
|
||||
|
||||
|
||||
class Reader(Module):
|
||||
def __init__(self):
|
||||
self.bus = wishbone.Interface()
|
||||
self.address = Sink([("a", 30)])
|
||||
self.data = Source([("d", 32)])
|
||||
self.busy = Signal()
|
||||
|
||||
###
|
||||
|
||||
bus_stb = Signal()
|
||||
data_reg_loaded = Signal()
|
||||
data_reg = Signal(32)
|
||||
|
||||
self.comb += [
|
||||
self.busy.eq(data_reg_loaded),
|
||||
self.bus.we.eq(0),
|
||||
bus_stb.eq(self.address.stb & (~data_reg_loaded | self.data.ack)),
|
||||
self.bus.cyc.eq(bus_stb),
|
||||
self.bus.stb.eq(bus_stb),
|
||||
self.bus.adr.eq(self.address.a),
|
||||
self.address.ack.eq(self.bus.ack),
|
||||
self.data.stb.eq(data_reg_loaded),
|
||||
self.data.d.eq(data_reg)
|
||||
]
|
||||
self.sync += [
|
||||
If(self.data.ack, data_reg_loaded.eq(0)),
|
||||
If(self.bus.ack,
|
||||
data_reg_loaded.eq(1),
|
||||
data_reg.eq(self.bus.dat_r)
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class Writer(Module):
|
||||
def __init__(self):
|
||||
self.bus = wishbone.Interface()
|
||||
self.address_data = Sink([("a", 30), ("d", 32)])
|
||||
self.busy = Signal()
|
||||
|
||||
###
|
||||
|
||||
self.comb += [
|
||||
self.busy.eq(0),
|
||||
self.bus.we.eq(1),
|
||||
self.bus.cyc.eq(self.address_data.stb),
|
||||
self.bus.stb.eq(self.address_data.stb),
|
||||
self.bus.adr.eq(self.address_data.a),
|
||||
self.bus.sel.eq(0xf),
|
||||
self.bus.dat_w.eq(self.address_data.d),
|
||||
self.address_data.ack.eq(self.bus.ack)
|
||||
]
|
|
@ -1,57 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.flow.actor import *
|
||||
from migen.genlib import fifo
|
||||
|
||||
|
||||
class _FIFOActor(Module):
|
||||
def __init__(self, fifo_class, layout, depth):
|
||||
self.sink = Sink(layout)
|
||||
self.source = Source(layout)
|
||||
self.busy = Signal()
|
||||
|
||||
###
|
||||
|
||||
description = self.sink.description
|
||||
fifo_layout = [
|
||||
("payload", description.payload_layout),
|
||||
# Note : Can be optimized by passing parameters
|
||||
# in another fifo. We will only have one
|
||||
# data per packet.
|
||||
("param", description.param_layout)
|
||||
]
|
||||
if description.packetized:
|
||||
fifo_layout += [("sop", 1), ("eop", 1)]
|
||||
|
||||
self.submodules.fifo = fifo_class(fifo_layout, depth)
|
||||
|
||||
self.comb += [
|
||||
self.sink.ack.eq(self.fifo.writable),
|
||||
self.fifo.we.eq(self.sink.stb),
|
||||
self.fifo.din.payload.eq(self.sink.payload),
|
||||
self.fifo.din.param.eq(self.sink.param),
|
||||
|
||||
self.source.stb.eq(self.fifo.readable),
|
||||
self.source.payload.eq(self.fifo.dout.payload),
|
||||
self.source.param.eq(self.fifo.dout.param),
|
||||
self.fifo.re.eq(self.source.ack)
|
||||
]
|
||||
if description.packetized:
|
||||
self.comb += [
|
||||
self.fifo.din.sop.eq(self.sink.sop),
|
||||
self.fifo.din.eop.eq(self.sink.eop),
|
||||
self.source.sop.eq(self.fifo.dout.sop),
|
||||
self.source.eop.eq(self.fifo.dout.eop)
|
||||
]
|
||||
|
||||
|
||||
class SyncFIFO(_FIFOActor):
|
||||
def __init__(self, layout, depth, buffered=False):
|
||||
_FIFOActor.__init__(
|
||||
self,
|
||||
fifo.SyncFIFOBuffered if buffered else fifo.SyncFIFO,
|
||||
layout, depth)
|
||||
|
||||
|
||||
class AsyncFIFO(_FIFOActor):
|
||||
def __init__(self, layout, depth):
|
||||
_FIFOActor.__init__(self, fifo.AsyncFIFO, layout, depth)
|
|
@ -1,96 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.genlib.record import *
|
||||
from migen.genlib.fsm import *
|
||||
from migen.flow.actor import *
|
||||
from migen.flow.plumbing import Buffer
|
||||
|
||||
|
||||
# Generates integers from start to maximum-1
|
||||
class IntSequence(Module):
|
||||
def __init__(self, nbits, offsetbits=0, step=1):
|
||||
parameters_layout = [("maximum", nbits)]
|
||||
if offsetbits:
|
||||
parameters_layout.append(("offset", offsetbits))
|
||||
|
||||
self.parameters = Sink(parameters_layout)
|
||||
self.source = Source([("value", max(nbits, offsetbits))])
|
||||
self.busy = Signal()
|
||||
|
||||
###
|
||||
|
||||
load = Signal()
|
||||
ce = Signal()
|
||||
last = Signal()
|
||||
|
||||
maximum = Signal(nbits)
|
||||
if offsetbits:
|
||||
offset = Signal(offsetbits)
|
||||
counter = Signal(nbits)
|
||||
|
||||
if step > 1:
|
||||
self.comb += last.eq(counter + step >= maximum)
|
||||
else:
|
||||
self.comb += last.eq(counter + 1 == maximum)
|
||||
self.sync += [
|
||||
If(load,
|
||||
counter.eq(0),
|
||||
maximum.eq(self.parameters.maximum),
|
||||
offset.eq(self.parameters.offset) if offsetbits else None
|
||||
).Elif(ce,
|
||||
If(last,
|
||||
counter.eq(0)
|
||||
).Else(
|
||||
counter.eq(counter + step)
|
||||
)
|
||||
)
|
||||
]
|
||||
if offsetbits:
|
||||
self.comb += self.source.value.eq(counter + offset)
|
||||
else:
|
||||
self.comb += self.source.value.eq(counter)
|
||||
|
||||
fsm = FSM()
|
||||
self.submodules += fsm
|
||||
fsm.act("IDLE",
|
||||
load.eq(1),
|
||||
self.parameters.ack.eq(1),
|
||||
If(self.parameters.stb, NextState("ACTIVE"))
|
||||
)
|
||||
fsm.act("ACTIVE",
|
||||
self.busy.eq(1),
|
||||
self.source.stb.eq(1),
|
||||
If(self.source.ack,
|
||||
ce.eq(1),
|
||||
If(last, NextState("IDLE"))
|
||||
)
|
||||
)
|
||||
|
||||
# Add buffers on Endpoints (can be used to improve timings)
|
||||
class BufferizeEndpoints(ModuleTransformer):
|
||||
def __init__(self, *names):
|
||||
self.names = names
|
||||
|
||||
def transform_instance(self, submodule):
|
||||
endpoints = get_endpoints(submodule)
|
||||
sinks = {}
|
||||
sources = {}
|
||||
for name, endpoint in endpoints.items():
|
||||
if not self.names or name in self.names:
|
||||
if isinstance(endpoint, Sink):
|
||||
sinks.update({name: endpoint})
|
||||
elif isinstance(endpoint, Source):
|
||||
sources.update({name: endpoint})
|
||||
|
||||
# add buffer on sinks
|
||||
for name, sink in sinks.items():
|
||||
buf = Buffer(sink.description)
|
||||
submodule.submodules += buf
|
||||
setattr(submodule, name, buf.d)
|
||||
submodule.comb += Record.connect(buf.q, sink)
|
||||
|
||||
# add buffer on sources
|
||||
for name, source in sources.items():
|
||||
buf = Buffer(source.description)
|
||||
submodule.submodules += buf
|
||||
submodule.comb += Record.connect(source, buf.d)
|
||||
setattr(submodule, name, buf.q)
|
|
@ -1,365 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.genlib.roundrobin import *
|
||||
from migen.genlib.record import *
|
||||
from migen.flow.actor import *
|
||||
from migen.actorlib.fifo import SyncFIFO
|
||||
from migen.genlib.fsm import FSM, NextState
|
||||
from migen.genlib.misc import reverse_bytes, Counter
|
||||
|
||||
|
||||
class Status(Module):
|
||||
def __init__(self, endpoint):
|
||||
self.sop = sop = Signal()
|
||||
self.eop = eop =Signal()
|
||||
self.ongoing = Signal()
|
||||
|
||||
ongoing = Signal()
|
||||
self.comb += \
|
||||
If(endpoint.stb,
|
||||
sop.eq(endpoint.sop),
|
||||
eop.eq(endpoint.eop & endpoint.ack)
|
||||
)
|
||||
self.sync += ongoing.eq((sop | ongoing) & ~eop)
|
||||
self.comb += self.ongoing.eq((sop | ongoing) & ~eop)
|
||||
|
||||
|
||||
class Arbiter(Module):
|
||||
def __init__(self, masters, slave):
|
||||
if len(masters) == 0:
|
||||
pass
|
||||
elif len(masters) == 1:
|
||||
self.grant = Signal()
|
||||
self.comb += Record.connect(masters.pop(), slave)
|
||||
else:
|
||||
self.submodules.rr = RoundRobin(len(masters))
|
||||
self.grant = self.rr.grant
|
||||
cases = {}
|
||||
for i, master in enumerate(masters):
|
||||
status = Status(master)
|
||||
self.submodules += status
|
||||
self.comb += self.rr.request[i].eq(status.ongoing)
|
||||
cases[i] = [Record.connect(master, slave)]
|
||||
self.comb += Case(self.grant, cases)
|
||||
|
||||
|
||||
class Dispatcher(Module):
|
||||
def __init__(self, master, slaves, one_hot=False):
|
||||
if len(slaves) == 0:
|
||||
self.sel = Signal()
|
||||
elif len(slaves) == 1:
|
||||
self.comb += Record.connect(master, slaves.pop())
|
||||
self.sel = Signal()
|
||||
else:
|
||||
if one_hot:
|
||||
self.sel = Signal(len(slaves))
|
||||
else:
|
||||
self.sel = Signal(max=len(slaves))
|
||||
|
||||
# # #
|
||||
|
||||
status = Status(master)
|
||||
self.submodules += status
|
||||
|
||||
sel = Signal.like(self.sel)
|
||||
sel_ongoing = Signal.like(self.sel)
|
||||
self.sync += \
|
||||
If(status.sop,
|
||||
sel_ongoing.eq(self.sel)
|
||||
)
|
||||
self.comb += \
|
||||
If(status.sop,
|
||||
sel.eq(self.sel)
|
||||
).Else(
|
||||
sel.eq(sel_ongoing)
|
||||
)
|
||||
cases = {}
|
||||
for i, slave in enumerate(slaves):
|
||||
if one_hot:
|
||||
idx = 2**i
|
||||
else:
|
||||
idx = i
|
||||
cases[idx] = [Record.connect(master, slave)]
|
||||
cases["default"] = [master.ack.eq(1)]
|
||||
self.comb += Case(sel, cases)
|
||||
|
||||
|
||||
class HeaderField:
|
||||
def __init__(self, byte, offset, width):
|
||||
self.byte = byte
|
||||
self.offset = offset
|
||||
self.width = width
|
||||
|
||||
|
||||
class Header:
|
||||
def __init__(self, fields, length, swap_field_bytes=True):
|
||||
self.fields = fields
|
||||
self.length = length
|
||||
self.swap_field_bytes = swap_field_bytes
|
||||
|
||||
def get_layout(self):
|
||||
layout = []
|
||||
for k, v in sorted(self.fields.items()):
|
||||
layout.append((k, v.width))
|
||||
return layout
|
||||
|
||||
def get_field(self, obj, name, width):
|
||||
if "_lsb" in name:
|
||||
field = getattr(obj, name.replace("_lsb", ""))[:width]
|
||||
elif "_msb" in name:
|
||||
field = getattr(obj, name.replace("_msb", ""))[width:2*width]
|
||||
else:
|
||||
field = getattr(obj, name)
|
||||
return field
|
||||
|
||||
def encode(self, obj, signal):
|
||||
r = []
|
||||
for k, v in sorted(self.fields.items()):
|
||||
start = v.byte*8+v.offset
|
||||
end = start+v.width
|
||||
field = self.get_field(obj, k, v.width)
|
||||
if self.swap_field_bytes:
|
||||
field = reverse_bytes(field)
|
||||
r.append(signal[start:end].eq(field))
|
||||
return r
|
||||
|
||||
def decode(self, signal, obj):
|
||||
r = []
|
||||
for k, v in sorted(self.fields.items()):
|
||||
start = v.byte*8+v.offset
|
||||
end = start+v.width
|
||||
field = self.get_field(obj, k, v.width)
|
||||
if self.swap_field_bytes:
|
||||
r.append(field.eq(reverse_bytes(signal[start:end])))
|
||||
else:
|
||||
r.append(field.eq(signal[start:end]))
|
||||
return r
|
||||
|
||||
|
||||
class Packetizer(Module):
|
||||
def __init__(self, sink_description, source_description, header):
|
||||
self.sink = sink = Sink(sink_description)
|
||||
self.source = source = Source(source_description)
|
||||
self.header = Signal(header.length*8)
|
||||
|
||||
# # #
|
||||
|
||||
dw = flen(self.sink.data)
|
||||
|
||||
header_reg = Signal(header.length*8)
|
||||
header_words = (header.length*8)//dw
|
||||
load = Signal()
|
||||
shift = Signal()
|
||||
counter = Counter(max=max(header_words, 2))
|
||||
self.submodules += counter
|
||||
|
||||
self.comb += header.encode(sink, self.header)
|
||||
if header_words == 1:
|
||||
self.sync += [
|
||||
If(load,
|
||||
header_reg.eq(self.header)
|
||||
)
|
||||
]
|
||||
else:
|
||||
self.sync += [
|
||||
If(load,
|
||||
header_reg.eq(self.header)
|
||||
).Elif(shift,
|
||||
header_reg.eq(Cat(header_reg[dw:], Signal(dw)))
|
||||
)
|
||||
]
|
||||
|
||||
fsm = FSM(reset_state="IDLE")
|
||||
self.submodules += fsm
|
||||
|
||||
if header_words == 1:
|
||||
idle_next_state = "COPY"
|
||||
else:
|
||||
idle_next_state = "SEND_HEADER"
|
||||
|
||||
fsm.act("IDLE",
|
||||
sink.ack.eq(1),
|
||||
counter.reset.eq(1),
|
||||
If(sink.stb & sink.sop,
|
||||
sink.ack.eq(0),
|
||||
source.stb.eq(1),
|
||||
source.sop.eq(1),
|
||||
source.eop.eq(0),
|
||||
source.data.eq(self.header[:dw]),
|
||||
If(source.stb & source.ack,
|
||||
load.eq(1),
|
||||
NextState(idle_next_state)
|
||||
)
|
||||
)
|
||||
)
|
||||
if header_words != 1:
|
||||
fsm.act("SEND_HEADER",
|
||||
source.stb.eq(1),
|
||||
source.sop.eq(0),
|
||||
source.eop.eq(0),
|
||||
source.data.eq(header_reg[dw:2*dw]),
|
||||
If(source.stb & source.ack,
|
||||
shift.eq(1),
|
||||
counter.ce.eq(1),
|
||||
If(counter.value == header_words-2,
|
||||
NextState("COPY")
|
||||
)
|
||||
)
|
||||
)
|
||||
fsm.act("COPY",
|
||||
source.stb.eq(sink.stb),
|
||||
source.sop.eq(0),
|
||||
source.eop.eq(sink.eop),
|
||||
source.data.eq(sink.data),
|
||||
source.error.eq(sink.error),
|
||||
If(source.stb & source.ack,
|
||||
sink.ack.eq(1),
|
||||
If(source.eop,
|
||||
NextState("IDLE")
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class Depacketizer(Module):
|
||||
def __init__(self, sink_description, source_description, header):
|
||||
self.sink = sink = Sink(sink_description)
|
||||
self.source = source = Source(source_description)
|
||||
self.header = Signal(header.length*8)
|
||||
|
||||
# # #
|
||||
|
||||
dw = flen(sink.data)
|
||||
|
||||
header_words = (header.length*8)//dw
|
||||
|
||||
shift = Signal()
|
||||
counter = Counter(max=max(header_words, 2))
|
||||
self.submodules += counter
|
||||
|
||||
if header_words == 1:
|
||||
self.sync += \
|
||||
If(shift,
|
||||
self.header.eq(sink.data)
|
||||
)
|
||||
else:
|
||||
self.sync += \
|
||||
If(shift,
|
||||
self.header.eq(Cat(self.header[dw:], sink.data))
|
||||
)
|
||||
|
||||
fsm = FSM(reset_state="IDLE")
|
||||
self.submodules += fsm
|
||||
|
||||
if header_words == 1:
|
||||
idle_next_state = "COPY"
|
||||
else:
|
||||
idle_next_state = "RECEIVE_HEADER"
|
||||
|
||||
fsm.act("IDLE",
|
||||
sink.ack.eq(1),
|
||||
counter.reset.eq(1),
|
||||
If(sink.stb,
|
||||
shift.eq(1),
|
||||
NextState(idle_next_state)
|
||||
)
|
||||
)
|
||||
if header_words != 1:
|
||||
fsm.act("RECEIVE_HEADER",
|
||||
sink.ack.eq(1),
|
||||
If(sink.stb,
|
||||
counter.ce.eq(1),
|
||||
shift.eq(1),
|
||||
If(counter.value == header_words-2,
|
||||
NextState("COPY")
|
||||
)
|
||||
)
|
||||
)
|
||||
no_payload = Signal()
|
||||
self.sync += \
|
||||
If(fsm.before_entering("COPY"),
|
||||
source.sop.eq(1),
|
||||
no_payload.eq(sink.eop)
|
||||
).Elif(source.stb & source.ack,
|
||||
source.sop.eq(0)
|
||||
)
|
||||
|
||||
if hasattr(sink, "error"):
|
||||
self.comb += source.error.eq(sink.error)
|
||||
self.comb += [
|
||||
source.eop.eq(sink.eop | no_payload),
|
||||
source.data.eq(sink.data),
|
||||
header.decode(self.header, source)
|
||||
]
|
||||
fsm.act("COPY",
|
||||
sink.ack.eq(source.ack),
|
||||
source.stb.eq(sink.stb | no_payload),
|
||||
If(source.stb & source.ack & source.eop,
|
||||
NextState("IDLE")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class Buffer(Module):
|
||||
def __init__(self, description, data_depth, cmd_depth=4, almost_full=None):
|
||||
self.sink = sink = Sink(description)
|
||||
self.source = source = Source(description)
|
||||
|
||||
# # #
|
||||
|
||||
sink_status = Status(self.sink)
|
||||
source_status = Status(self.source)
|
||||
self.submodules += sink_status, source_status
|
||||
|
||||
# store incoming packets
|
||||
# cmds
|
||||
def cmd_description():
|
||||
layout = [("error", 1)]
|
||||
return EndpointDescription(layout)
|
||||
cmd_fifo = SyncFIFO(cmd_description(), cmd_depth)
|
||||
self.submodules += cmd_fifo
|
||||
self.comb += cmd_fifo.sink.stb.eq(sink_status.eop)
|
||||
if hasattr(sink, "error"):
|
||||
self.comb += cmd_fifo.sink.error.eq(sink.error)
|
||||
|
||||
# data
|
||||
data_fifo = SyncFIFO(description, data_depth, buffered=True)
|
||||
self.submodules += data_fifo
|
||||
self.comb += [
|
||||
Record.connect(self.sink, data_fifo.sink),
|
||||
data_fifo.sink.stb.eq(self.sink.stb & cmd_fifo.sink.ack),
|
||||
self.sink.ack.eq(data_fifo.sink.ack & cmd_fifo.sink.ack),
|
||||
]
|
||||
|
||||
# output packets
|
||||
self.fsm = fsm = FSM(reset_state="IDLE")
|
||||
self.submodules += fsm
|
||||
fsm.act("IDLE",
|
||||
If(cmd_fifo.source.stb,
|
||||
NextState("SEEK_SOP")
|
||||
)
|
||||
)
|
||||
fsm.act("SEEK_SOP",
|
||||
If(~data_fifo.source.sop,
|
||||
data_fifo.source.ack.eq(1)
|
||||
).Else(
|
||||
NextState("OUTPUT")
|
||||
)
|
||||
)
|
||||
if hasattr(source, "error"):
|
||||
source_error = self.source.error
|
||||
else:
|
||||
source_error = Signal()
|
||||
|
||||
fsm.act("OUTPUT",
|
||||
Record.connect(data_fifo.source, self.source),
|
||||
source_error.eq(cmd_fifo.source.error),
|
||||
If(source_status.eop,
|
||||
cmd_fifo.source.ack.eq(1),
|
||||
NextState("IDLE")
|
||||
)
|
||||
)
|
||||
|
||||
# compute almost full
|
||||
if almost_full is not None:
|
||||
self.almost_full = Signal()
|
||||
self.comb += self.almost_full.eq(data_fifo.fifo.level > almost_full)
|
|
@ -1,122 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.flow.actor import *
|
||||
from migen.flow.transactions import *
|
||||
from migen.util.misc import xdir
|
||||
|
||||
|
||||
def _sim_multiread(sim, obj):
|
||||
if isinstance(obj, Signal):
|
||||
return sim.rd(obj)
|
||||
else:
|
||||
r = {}
|
||||
for k, v in xdir(obj, True):
|
||||
rd = _sim_multiread(sim, v)
|
||||
if isinstance(rd, int) or rd:
|
||||
r[k] = rd
|
||||
return r
|
||||
|
||||
|
||||
def _sim_multiwrite(sim, obj, value):
|
||||
if isinstance(obj, Signal):
|
||||
sim.wr(obj, value)
|
||||
else:
|
||||
for k, v in value.items():
|
||||
_sim_multiwrite(sim, getattr(obj, k), v)
|
||||
|
||||
|
||||
# Generators yield None or a tuple of Tokens.
|
||||
# Tokens for Sink endpoints are pulled and the "value" field filled in.
|
||||
# Tokens for Source endpoints are pushed according to their "value" field.
|
||||
#
|
||||
# NB: the possibility to push several tokens at once is important to interact
|
||||
# with actors that only accept a group of tokens when all of them are available.
|
||||
class TokenExchanger(Module):
|
||||
def __init__(self, generator, actor):
|
||||
self.generator = generator
|
||||
self.actor = actor
|
||||
self.active = set()
|
||||
self.busy = True
|
||||
self.done = False
|
||||
|
||||
def _process_transactions(self, selfp):
|
||||
completed = set()
|
||||
for token in self.active:
|
||||
ep = getattr(self.actor, token.endpoint)
|
||||
if isinstance(ep, Sink):
|
||||
if selfp.simulator.rd(ep.ack) and selfp.simulator.rd(ep.stb):
|
||||
token.value = _sim_multiread(selfp.simulator, ep.payload)
|
||||
completed.add(token)
|
||||
selfp.simulator.wr(ep.ack, 0)
|
||||
elif isinstance(ep, Source):
|
||||
if selfp.simulator.rd(ep.ack) and selfp.simulator.rd(ep.stb):
|
||||
completed.add(token)
|
||||
selfp.simulator.wr(ep.stb, 0)
|
||||
else:
|
||||
raise TypeError
|
||||
self.active -= completed
|
||||
if not self.active:
|
||||
self.busy = True
|
||||
|
||||
def _update_control_signals(self, selfp):
|
||||
for token in self.active:
|
||||
ep = getattr(self.actor, token.endpoint)
|
||||
if isinstance(ep, Sink):
|
||||
selfp.simulator.wr(ep.ack, 1)
|
||||
elif isinstance(ep, Source):
|
||||
_sim_multiwrite(selfp.simulator, ep.payload, token.value)
|
||||
selfp.simulator.wr(ep.stb, 1)
|
||||
else:
|
||||
raise TypeError
|
||||
|
||||
def _next_transactions(self):
|
||||
try:
|
||||
transactions = next(self.generator)
|
||||
except StopIteration:
|
||||
self.busy = False
|
||||
self.done = True
|
||||
raise StopSimulation
|
||||
if isinstance(transactions, Token):
|
||||
self.active = {transactions}
|
||||
elif isinstance(transactions, (tuple, list, set)):
|
||||
self.active = set(transactions)
|
||||
elif transactions is None:
|
||||
self.active = set()
|
||||
else:
|
||||
raise TypeError
|
||||
if self.active and all(transaction.idle_wait for transaction in self.active):
|
||||
self.busy = False
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
if self.active:
|
||||
self._process_transactions(selfp)
|
||||
if not self.active:
|
||||
self._next_transactions()
|
||||
self._update_control_signals(selfp)
|
||||
do_simulation.passive = True
|
||||
|
||||
|
||||
class SimActor(Module):
|
||||
def __init__(self, generator):
|
||||
self.busy = Signal()
|
||||
self.submodules.token_exchanger = TokenExchanger(generator, self)
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
selfp.busy = self.token_exchanger.busy
|
||||
do_simulation.passive = True
|
||||
|
||||
|
||||
def _dumper_gen(prefix):
|
||||
while True:
|
||||
t = Token("result")
|
||||
yield t
|
||||
if len(t.value) > 1:
|
||||
s = str(t.value)
|
||||
else:
|
||||
s = str(list(t.value.values())[0])
|
||||
print(prefix + s)
|
||||
|
||||
|
||||
class Dumper(SimActor):
|
||||
def __init__(self, layout, prefix=""):
|
||||
self.result = Sink(layout)
|
||||
SimActor.__init__(self, _dumper_gen(prefix))
|
|
@ -1,184 +0,0 @@
|
|||
# Simple Processor Interface
|
||||
|
||||
from migen.fhdl.std import *
|
||||
from migen.bank.description import *
|
||||
from migen.flow.actor import *
|
||||
from migen.flow.network import *
|
||||
from migen.flow import plumbing
|
||||
from migen.actorlib import misc
|
||||
|
||||
|
||||
# layout is a list of tuples, either:
|
||||
# - (name, nbits, [reset value], [alignment bits])
|
||||
# - (name, sublayout)
|
||||
|
||||
def _convert_layout(layout):
|
||||
r = []
|
||||
for element in layout:
|
||||
if isinstance(element[1], list):
|
||||
r.append((element[0], _convert_layout(element[1])))
|
||||
else:
|
||||
r.append((element[0], element[1]))
|
||||
return r
|
||||
|
||||
(MODE_EXTERNAL, MODE_SINGLE_SHOT, MODE_CONTINUOUS) = range(3)
|
||||
|
||||
|
||||
class SingleGenerator(Module, AutoCSR):
|
||||
def __init__(self, layout, mode):
|
||||
self.source = Source(_convert_layout(layout))
|
||||
self.busy = Signal()
|
||||
|
||||
self.comb += self.busy.eq(self.source.stb)
|
||||
|
||||
if mode == MODE_EXTERNAL:
|
||||
self.trigger = Signal()
|
||||
trigger = self.trigger
|
||||
elif mode == MODE_SINGLE_SHOT:
|
||||
self._shoot = CSR()
|
||||
trigger = self._shoot.re
|
||||
elif mode == MODE_CONTINUOUS:
|
||||
self._enable = CSRStorage()
|
||||
trigger = self._enable.storage
|
||||
else:
|
||||
raise ValueError
|
||||
self.sync += If(self.source.ack | ~self.source.stb, self.source.stb.eq(trigger))
|
||||
|
||||
self._create_csrs(layout, self.source.payload, mode != MODE_SINGLE_SHOT)
|
||||
|
||||
def _create_csrs(self, layout, target, atomic, prefix=""):
|
||||
for element in layout:
|
||||
if isinstance(element[1], list):
|
||||
self._create_csrs(element[1], atomic,
|
||||
getattr(target, element[0]),
|
||||
element[0] + "_")
|
||||
else:
|
||||
name = element[0]
|
||||
nbits = element[1]
|
||||
if len(element) > 2:
|
||||
reset = element[2]
|
||||
else:
|
||||
reset = 0
|
||||
if len(element) > 3:
|
||||
alignment = element[3]
|
||||
else:
|
||||
alignment = 0
|
||||
regname = prefix + name
|
||||
reg = CSRStorage(nbits + alignment, reset=reset, atomic_write=atomic,
|
||||
alignment_bits=alignment, name=regname)
|
||||
setattr(self, "_"+regname, reg)
|
||||
self.sync += If(self.source.ack | ~self.source.stb,
|
||||
getattr(target, name).eq(reg.storage))
|
||||
|
||||
|
||||
class Collector(Module, AutoCSR):
|
||||
def __init__(self, layout, depth=1024):
|
||||
self.sink = Sink(layout)
|
||||
self.busy = Signal()
|
||||
dw = sum(len(s) for s in self.sink.payload.flatten())
|
||||
|
||||
self._wa = CSRStorage(bits_for(depth-1), write_from_dev=True)
|
||||
self._wc = CSRStorage(bits_for(depth), write_from_dev=True, atomic_write=True)
|
||||
self._ra = CSRStorage(bits_for(depth-1))
|
||||
self._rd = CSRStatus(dw)
|
||||
|
||||
###
|
||||
|
||||
mem = Memory(dw, depth)
|
||||
self.specials += mem
|
||||
wp = mem.get_port(write_capable=True)
|
||||
rp = mem.get_port()
|
||||
self.specials += wp, rp
|
||||
|
||||
self.comb += [
|
||||
self.busy.eq(0),
|
||||
|
||||
If(self._wc.r != 0,
|
||||
self.sink.ack.eq(1),
|
||||
If(self.sink.stb,
|
||||
self._wa.we.eq(1),
|
||||
self._wc.we.eq(1),
|
||||
wp.we.eq(1)
|
||||
)
|
||||
),
|
||||
self._wa.dat_w.eq(self._wa.storage + 1),
|
||||
self._wc.dat_w.eq(self._wc.storage - 1),
|
||||
|
||||
wp.adr.eq(self._wa.storage),
|
||||
wp.dat_w.eq(self.sink.payload.raw_bits()),
|
||||
|
||||
rp.adr.eq(self._ra.storage),
|
||||
self._rd.status.eq(rp.dat_r)
|
||||
]
|
||||
|
||||
|
||||
class _DMAController(Module):
|
||||
def __init__(self, bus_accessor, bus_aw, bus_dw, mode, base_reset=0, length_reset=0):
|
||||
self.alignment_bits = bits_for(bus_dw//8) - 1
|
||||
layout = [
|
||||
("length", bus_aw + self.alignment_bits, length_reset, self.alignment_bits),
|
||||
("base", bus_aw + self.alignment_bits, base_reset, self.alignment_bits)
|
||||
]
|
||||
self.generator = SingleGenerator(layout, mode)
|
||||
self._busy = CSRStatus()
|
||||
|
||||
self.length = self.generator._length.storage
|
||||
self.base = self.generator._base.storage
|
||||
if hasattr(self.generator, "trigger"):
|
||||
self.trigger = self.generator.trigger
|
||||
|
||||
def get_csrs(self):
|
||||
return self.generator.get_csrs() + [self._busy]
|
||||
|
||||
|
||||
class DMAReadController(_DMAController):
|
||||
def __init__(self, bus_accessor, *args, **kwargs):
|
||||
bus_aw = flen(bus_accessor.address.a)
|
||||
bus_dw = flen(bus_accessor.data.d)
|
||||
_DMAController.__init__(self, bus_accessor, bus_aw, bus_dw, *args, **kwargs)
|
||||
|
||||
g = DataFlowGraph()
|
||||
g.add_pipeline(self.generator,
|
||||
misc.IntSequence(bus_aw, bus_aw),
|
||||
AbstractActor(plumbing.Buffer),
|
||||
bus_accessor,
|
||||
AbstractActor(plumbing.Buffer))
|
||||
comp_actor = CompositeActor(g)
|
||||
self.submodules += comp_actor
|
||||
|
||||
self.data = comp_actor.q
|
||||
self.busy = comp_actor.busy
|
||||
self.comb += self._busy.status.eq(self.busy)
|
||||
|
||||
|
||||
class DMAWriteController(_DMAController):
|
||||
def __init__(self, bus_accessor, *args, ack_when_inactive=False, **kwargs):
|
||||
bus_aw = flen(bus_accessor.address_data.a)
|
||||
bus_dw = flen(bus_accessor.address_data.d)
|
||||
_DMAController.__init__(self, bus_accessor, bus_aw, bus_dw, *args, **kwargs)
|
||||
|
||||
g = DataFlowGraph()
|
||||
adr_buffer = AbstractActor(plumbing.Buffer)
|
||||
int_sequence = misc.IntSequence(bus_aw, bus_aw)
|
||||
g.add_pipeline(self.generator,
|
||||
int_sequence,
|
||||
adr_buffer)
|
||||
g.add_connection(adr_buffer, bus_accessor, sink_subr=["a"])
|
||||
g.add_connection(AbstractActor(plumbing.Buffer), bus_accessor, sink_subr=["d"])
|
||||
comp_actor = CompositeActor(g)
|
||||
self.submodules += comp_actor
|
||||
|
||||
if ack_when_inactive:
|
||||
demultiplexer = plumbing.Demultiplexer([("d", bus_dw)], 2)
|
||||
self.comb += [
|
||||
demultiplexer.sel.eq(~adr_buffer.busy),
|
||||
demultiplexer.source0.connect(comp_actor.d),
|
||||
demultiplexer.source1.ack.eq(1),
|
||||
]
|
||||
self.submodules += demultiplexer
|
||||
self.data = demultiplexer.sink
|
||||
else:
|
||||
self.data = comp_actor.d
|
||||
|
||||
self.busy = comp_actor.busy
|
||||
self.comb += self._busy.status.eq(self.busy)
|
|
@ -1,269 +0,0 @@
|
|||
from copy import copy
|
||||
|
||||
from migen.fhdl.std import *
|
||||
from migen.genlib.record import *
|
||||
from migen.flow.actor import *
|
||||
|
||||
|
||||
def _rawbits_layout(l):
|
||||
if isinstance(l, int):
|
||||
return [("rawbits", l)]
|
||||
else:
|
||||
return l
|
||||
|
||||
|
||||
class Cast(CombinatorialActor):
|
||||
def __init__(self, layout_from, layout_to, reverse_from=False, reverse_to=False):
|
||||
self.sink = Sink(_rawbits_layout(layout_from))
|
||||
self.source = Source(_rawbits_layout(layout_to))
|
||||
CombinatorialActor.__init__(self)
|
||||
|
||||
###
|
||||
|
||||
sigs_from = self.sink.payload.flatten()
|
||||
if reverse_from:
|
||||
sigs_from = list(reversed(sigs_from))
|
||||
sigs_to = self.source.payload.flatten()
|
||||
if reverse_to:
|
||||
sigs_to = list(reversed(sigs_to))
|
||||
if sum(flen(s) for s in sigs_from) != sum(flen(s) for s in sigs_to):
|
||||
raise TypeError
|
||||
self.comb += Cat(*sigs_to).eq(Cat(*sigs_from))
|
||||
|
||||
|
||||
def pack_layout(l, n):
|
||||
return [("chunk"+str(i), l) for i in range(n)]
|
||||
|
||||
|
||||
class Unpack(Module):
|
||||
def __init__(self, n, layout_to, reverse=False):
|
||||
self.source = source = Source(layout_to)
|
||||
description_from = copy(source.description)
|
||||
description_from.payload_layout = pack_layout(description_from.payload_layout, n)
|
||||
self.sink = sink = Sink(description_from)
|
||||
|
||||
self.busy = Signal()
|
||||
|
||||
###
|
||||
|
||||
mux = Signal(max=n)
|
||||
first = Signal()
|
||||
last = Signal()
|
||||
self.comb += [
|
||||
first.eq(mux == 0),
|
||||
last.eq(mux == (n-1)),
|
||||
source.stb.eq(sink.stb),
|
||||
sink.ack.eq(last & source.ack)
|
||||
]
|
||||
self.sync += [
|
||||
If(source.stb & source.ack,
|
||||
If(last,
|
||||
mux.eq(0)
|
||||
).Else(
|
||||
mux.eq(mux + 1)
|
||||
)
|
||||
)
|
||||
]
|
||||
cases = {}
|
||||
for i in range(n):
|
||||
chunk = n-i-1 if reverse else i
|
||||
cases[i] = [source.payload.raw_bits().eq(getattr(sink.payload, "chunk"+str(chunk)).raw_bits())]
|
||||
self.comb += Case(mux, cases).makedefault()
|
||||
|
||||
for f in description_from.param_layout:
|
||||
src = getattr(self.sink, f[0])
|
||||
dst = getattr(self.source, f[0])
|
||||
self.comb += dst.eq(src)
|
||||
|
||||
if description_from.packetized:
|
||||
self.comb += [
|
||||
source.sop.eq(sink.sop & first),
|
||||
source.eop.eq(sink.eop & last)
|
||||
]
|
||||
|
||||
|
||||
class Pack(Module):
|
||||
def __init__(self, layout_from, n, reverse=False):
|
||||
self.sink = sink = Sink(layout_from)
|
||||
description_to = copy(sink.description)
|
||||
description_to.payload_layout = pack_layout(description_to.payload_layout, n)
|
||||
self.source = source = Source(description_to)
|
||||
self.busy = Signal()
|
||||
|
||||
###
|
||||
|
||||
demux = Signal(max=n)
|
||||
|
||||
load_part = Signal()
|
||||
strobe_all = Signal()
|
||||
cases = {}
|
||||
for i in range(n):
|
||||
chunk = n-i-1 if reverse else i
|
||||
cases[i] = [getattr(source.payload, "chunk"+str(chunk)).raw_bits().eq(sink.payload.raw_bits())]
|
||||
self.comb += [
|
||||
self.busy.eq(strobe_all),
|
||||
sink.ack.eq(~strobe_all | source.ack),
|
||||
source.stb.eq(strobe_all),
|
||||
load_part.eq(sink.stb & sink.ack)
|
||||
]
|
||||
|
||||
for f in description_to.param_layout:
|
||||
src = getattr(self.sink, f[0])
|
||||
dst = getattr(self.source, f[0])
|
||||
self.comb += dst.eq(src)
|
||||
|
||||
if description_to.packetized:
|
||||
demux_last = ((demux == (n - 1)) | sink.eop)
|
||||
else:
|
||||
demux_last = (demux == (n - 1))
|
||||
|
||||
self.sync += [
|
||||
If(source.ack, strobe_all.eq(0)),
|
||||
If(load_part,
|
||||
Case(demux, cases),
|
||||
If(demux_last,
|
||||
demux.eq(0),
|
||||
strobe_all.eq(1)
|
||||
).Else(
|
||||
demux.eq(demux + 1)
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
if description_to.packetized:
|
||||
self.sync += [
|
||||
If(source.stb & source.ack,
|
||||
source.sop.eq(sink.sop),
|
||||
source.eop.eq(sink.eop),
|
||||
).Elif(sink.stb & sink.ack,
|
||||
source.sop.eq(sink.sop | source.sop),
|
||||
source.eop.eq(sink.eop | source.eop)
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class Chunkerize(CombinatorialActor):
|
||||
def __init__(self, layout_from, layout_to, n, reverse=False):
|
||||
self.sink = Sink(layout_from)
|
||||
if isinstance(layout_to, EndpointDescription):
|
||||
layout_to = copy(layout_to)
|
||||
layout_to.payload_layout = pack_layout(layout_to.payload_layout, n)
|
||||
else:
|
||||
layout_to = pack_layout(layout_to, n)
|
||||
self.source = Source(layout_to)
|
||||
CombinatorialActor.__init__(self)
|
||||
|
||||
###
|
||||
|
||||
for i in range(n):
|
||||
chunk = n-i-1 if reverse else i
|
||||
for f in self.sink.description.payload_layout:
|
||||
src = getattr(self.sink, f[0])
|
||||
dst = getattr(getattr(self.source, "chunk"+str(chunk)), f[0])
|
||||
self.comb += dst.eq(src[i*flen(src)//n:(i+1)*flen(src)//n])
|
||||
|
||||
for f in self.sink.description.param_layout:
|
||||
src = getattr(self.sink, f[0])
|
||||
dst = getattr(self.source, f[0])
|
||||
self.comb += dst.eq(src)
|
||||
|
||||
|
||||
class Unchunkerize(CombinatorialActor):
|
||||
def __init__(self, layout_from, n, layout_to, reverse=False):
|
||||
if isinstance(layout_from, EndpointDescription):
|
||||
fields = layout_from.payload_layout
|
||||
layout_from = copy(layout_from)
|
||||
layout_from.payload_layout = pack_layout(layout_from.payload_layout, n)
|
||||
else:
|
||||
fields = layout_from
|
||||
layout_from = pack_layout(layout_from, n)
|
||||
self.sink = Sink(layout_from)
|
||||
self.source = Source(layout_to)
|
||||
CombinatorialActor.__init__(self)
|
||||
|
||||
###
|
||||
|
||||
for i in range(n):
|
||||
chunk = n-i-1 if reverse else i
|
||||
for f in fields:
|
||||
src = getattr(getattr(self.sink, "chunk"+str(chunk)), f[0])
|
||||
dst = getattr(self.source, f[0])
|
||||
self.comb += dst[i*flen(dst)//n:(i+1)*flen(dst)//n].eq(src)
|
||||
|
||||
for f in self.sink.description.param_layout:
|
||||
src = getattr(self.sink, f[0])
|
||||
dst = getattr(self.source, f[0])
|
||||
self.comb += dst.eq(src)
|
||||
|
||||
|
||||
class Converter(Module):
|
||||
def __init__(self, layout_from, layout_to, reverse=False):
|
||||
self.sink = Sink(layout_from)
|
||||
self.source = Source(layout_to)
|
||||
self.busy = Signal()
|
||||
|
||||
###
|
||||
|
||||
width_from = flen(self.sink.payload.raw_bits())
|
||||
width_to = flen(self.source.payload.raw_bits())
|
||||
|
||||
# downconverter
|
||||
if width_from > width_to:
|
||||
if width_from % width_to:
|
||||
raise ValueError
|
||||
ratio = width_from//width_to
|
||||
self.submodules.chunkerize = Chunkerize(layout_from, layout_to, ratio, reverse)
|
||||
self.submodules.unpack = Unpack(ratio, layout_to)
|
||||
|
||||
self.comb += [
|
||||
Record.connect(self.sink, self.chunkerize.sink),
|
||||
Record.connect(self.chunkerize.source, self.unpack.sink),
|
||||
Record.connect(self.unpack.source, self.source),
|
||||
self.busy.eq(self.unpack.busy)
|
||||
]
|
||||
# upconverter
|
||||
elif width_to > width_from:
|
||||
if width_to % width_from:
|
||||
raise ValueError
|
||||
ratio = width_to//width_from
|
||||
self.submodules.pack = Pack(layout_from, ratio)
|
||||
self.submodules.unchunkerize = Unchunkerize(layout_from, ratio, layout_to, reverse)
|
||||
|
||||
self.comb += [
|
||||
Record.connect(self.sink, self.pack.sink),
|
||||
Record.connect(self.pack.source, self.unchunkerize.sink),
|
||||
Record.connect(self.unchunkerize.source, self.source),
|
||||
self.busy.eq(self.pack.busy)
|
||||
]
|
||||
# direct connection
|
||||
else:
|
||||
self.comb += Record.connect(self.sink, self.source)
|
||||
|
||||
|
||||
class Pipeline(Module):
|
||||
def __init__(self, *modules):
|
||||
self.busy = Signal()
|
||||
n = len(modules)
|
||||
m = modules[0]
|
||||
# expose sink of first module
|
||||
# if available
|
||||
if hasattr(m, "sink"):
|
||||
self.sink = m.sink
|
||||
if hasattr(m, "busy"):
|
||||
busy = m.busy
|
||||
else:
|
||||
busy = 0
|
||||
for i in range(1, n):
|
||||
m_n = modules[i]
|
||||
if hasattr(m_n, "busy"):
|
||||
busy_n = m_n.busy
|
||||
else:
|
||||
busy_n = 0
|
||||
self.comb += m.source.connect(m_n.sink)
|
||||
m = m_n
|
||||
busy = busy | busy_n
|
||||
# expose source of last module
|
||||
# if available
|
||||
if hasattr(m, "source"):
|
||||
self.source = m.source
|
||||
self.comb += self.busy.eq(busy)
|
|
@ -1,25 +0,0 @@
|
|||
from migen.fhdl.std import Module, bits_for
|
||||
from migen.bank.description import CSR
|
||||
|
||||
|
||||
class GenericBank(Module):
|
||||
def __init__(self, description, busword):
|
||||
# Turn description into simple CSRs and claim ownership of compound CSR modules
|
||||
self.simple_csrs = []
|
||||
for c in description:
|
||||
if isinstance(c, CSR):
|
||||
self.simple_csrs.append(c)
|
||||
else:
|
||||
c.finalize(busword)
|
||||
self.simple_csrs += c.get_simple_csrs()
|
||||
self.submodules += c
|
||||
self.decode_bits = bits_for(len(self.simple_csrs)-1)
|
||||
|
||||
|
||||
def get_offset(description, name, busword):
|
||||
offset = 0
|
||||
for c in description:
|
||||
if c.name == name:
|
||||
return offset
|
||||
offset += (c.size + busword - 1)//busword
|
||||
raise KeyError("CSR not found: "+name)
|
|
@ -1,82 +0,0 @@
|
|||
from migen.util.misc import xdir
|
||||
from migen.fhdl.std import *
|
||||
from migen.bus import csr
|
||||
from migen.bank.bank import GenericBank
|
||||
|
||||
|
||||
class Bank(GenericBank):
|
||||
def __init__(self, description, address=0, bus=None):
|
||||
if bus is None:
|
||||
bus = csr.Interface()
|
||||
self.bus = bus
|
||||
|
||||
###
|
||||
|
||||
GenericBank.__init__(self, description, flen(self.bus.dat_w))
|
||||
|
||||
sel = Signal()
|
||||
self.comb += sel.eq(self.bus.adr[9:] == address)
|
||||
|
||||
for i, c in enumerate(self.simple_csrs):
|
||||
self.comb += [
|
||||
c.r.eq(self.bus.dat_w[:c.size]),
|
||||
c.re.eq(sel & \
|
||||
self.bus.we & \
|
||||
(self.bus.adr[:self.decode_bits] == i))
|
||||
]
|
||||
|
||||
brcases = dict((i, self.bus.dat_r.eq(c.w)) for i, c in enumerate(self.simple_csrs))
|
||||
self.sync += [
|
||||
self.bus.dat_r.eq(0),
|
||||
If(sel, Case(self.bus.adr[:self.decode_bits], brcases))
|
||||
]
|
||||
|
||||
|
||||
# address_map(name, memory) returns the CSR offset at which to map
|
||||
# the CSR object (register bank or memory).
|
||||
# If memory=None, the object is the register bank of object source.name.
|
||||
# Otherwise, it is a memory object belonging to source.name.
|
||||
# address_map is called exactly once for each object at each call to
|
||||
# scan(), so it can have side effects.
|
||||
class BankArray(Module):
|
||||
def __init__(self, source, address_map, *ifargs, **ifkwargs):
|
||||
self.source = source
|
||||
self.address_map = address_map
|
||||
self.scan(ifargs, ifkwargs)
|
||||
|
||||
def scan(self, ifargs, ifkwargs):
|
||||
self.banks = []
|
||||
self.srams = []
|
||||
for name, obj in xdir(self.source, True):
|
||||
if hasattr(obj, "get_csrs"):
|
||||
csrs = obj.get_csrs()
|
||||
else:
|
||||
csrs = []
|
||||
if hasattr(obj, "get_memories"):
|
||||
memories = obj.get_memories()
|
||||
for memory in memories:
|
||||
mapaddr = self.address_map(name, memory)
|
||||
if mapaddr is None:
|
||||
continue
|
||||
sram_bus = csr.Interface(*ifargs, **ifkwargs)
|
||||
mmap = csr.SRAM(memory, mapaddr, bus=sram_bus)
|
||||
self.submodules += mmap
|
||||
csrs += mmap.get_csrs()
|
||||
self.srams.append((name, memory, mapaddr, mmap))
|
||||
if csrs:
|
||||
mapaddr = self.address_map(name, None)
|
||||
if mapaddr is None:
|
||||
continue
|
||||
bank_bus = csr.Interface(*ifargs, **ifkwargs)
|
||||
rmap = Bank(csrs, mapaddr, bus=bank_bus)
|
||||
self.submodules += rmap
|
||||
self.banks.append((name, csrs, mapaddr, rmap))
|
||||
|
||||
def get_rmaps(self):
|
||||
return [rmap for name, csrs, mapaddr, rmap in self.banks]
|
||||
|
||||
def get_mmaps(self):
|
||||
return [mmap for name, memory, mapaddr, mmap in self.srams]
|
||||
|
||||
def get_buses(self):
|
||||
return [i.bus for i in self.get_rmaps() + self.get_mmaps()]
|
|
@ -1,147 +0,0 @@
|
|||
from migen.util.misc import xdir
|
||||
from migen.fhdl.std import *
|
||||
from migen.fhdl.tracer import get_obj_var_name
|
||||
|
||||
|
||||
class _CSRBase(HUID):
|
||||
def __init__(self, size, name):
|
||||
HUID.__init__(self)
|
||||
self.name = get_obj_var_name(name)
|
||||
if self.name is None:
|
||||
raise ValueError("Cannot extract CSR name from code, need to specify.")
|
||||
self.size = size
|
||||
|
||||
|
||||
class CSR(_CSRBase):
|
||||
def __init__(self, size=1, name=None):
|
||||
_CSRBase.__init__(self, size, name)
|
||||
self.re = Signal(name=self.name + "_re")
|
||||
self.r = Signal(self.size, name=self.name + "_r")
|
||||
self.w = Signal(self.size, name=self.name + "_w")
|
||||
|
||||
|
||||
class _CompoundCSR(_CSRBase, Module):
|
||||
def __init__(self, size, name):
|
||||
_CSRBase.__init__(self, size, name)
|
||||
self.simple_csrs = []
|
||||
|
||||
def get_simple_csrs(self):
|
||||
if not self.finalized:
|
||||
raise FinalizeError
|
||||
return self.simple_csrs
|
||||
|
||||
def do_finalize(self, busword):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class CSRStatus(_CompoundCSR):
|
||||
def __init__(self, size=1, reset=0, name=None):
|
||||
_CompoundCSR.__init__(self, size, name)
|
||||
self.status = Signal(self.size, reset=reset)
|
||||
|
||||
def do_finalize(self, busword):
|
||||
nwords = (self.size + busword - 1)//busword
|
||||
for i in reversed(range(nwords)):
|
||||
nbits = min(self.size - i*busword, busword)
|
||||
sc = CSR(nbits, self.name + str(i) if nwords > 1 else self.name)
|
||||
self.comb += sc.w.eq(self.status[i*busword:i*busword+nbits])
|
||||
self.simple_csrs.append(sc)
|
||||
|
||||
|
||||
class CSRStorage(_CompoundCSR):
|
||||
def __init__(self, size=1, reset=0, atomic_write=False, write_from_dev=False, alignment_bits=0, name=None):
|
||||
_CompoundCSR.__init__(self, size, name)
|
||||
self.alignment_bits = alignment_bits
|
||||
self.storage_full = Signal(self.size, reset=reset)
|
||||
self.storage = Signal(self.size - self.alignment_bits, reset=reset >> alignment_bits)
|
||||
self.comb += self.storage.eq(self.storage_full[self.alignment_bits:])
|
||||
self.atomic_write = atomic_write
|
||||
self.re = Signal()
|
||||
if write_from_dev:
|
||||
self.we = Signal()
|
||||
self.dat_w = Signal(self.size - self.alignment_bits)
|
||||
self.sync += If(self.we, self.storage_full.eq(self.dat_w << self.alignment_bits))
|
||||
|
||||
def do_finalize(self, busword):
|
||||
nwords = (self.size + busword - 1)//busword
|
||||
if nwords > 1 and self.atomic_write:
|
||||
backstore = Signal(self.size - busword, name=self.name + "_backstore")
|
||||
for i in reversed(range(nwords)):
|
||||
nbits = min(self.size - i*busword, busword)
|
||||
sc = CSR(nbits, self.name + str(i) if nwords else self.name)
|
||||
self.simple_csrs.append(sc)
|
||||
lo = i*busword
|
||||
hi = lo+nbits
|
||||
# read
|
||||
if lo >= self.alignment_bits:
|
||||
self.comb += sc.w.eq(self.storage_full[lo:hi])
|
||||
elif hi > self.alignment_bits:
|
||||
self.comb += sc.w.eq(Cat(Replicate(0, hi - self.alignment_bits),
|
||||
self.storage_full[self.alignment_bits:hi]))
|
||||
else:
|
||||
self.comb += sc.w.eq(0)
|
||||
# write
|
||||
if nwords > 1 and self.atomic_write:
|
||||
if i:
|
||||
self.sync += If(sc.re, backstore[lo-busword:hi-busword].eq(sc.r))
|
||||
else:
|
||||
self.sync += If(sc.re, self.storage_full.eq(Cat(sc.r, backstore)))
|
||||
else:
|
||||
self.sync += If(sc.re, self.storage_full[lo:hi].eq(sc.r))
|
||||
self.sync += self.re.eq(sc.re)
|
||||
|
||||
|
||||
def csrprefix(prefix, csrs, done):
|
||||
for csr in csrs:
|
||||
if csr.huid not in done:
|
||||
csr.name = prefix + csr.name
|
||||
done.add(csr.huid)
|
||||
|
||||
|
||||
def memprefix(prefix, memories, done):
|
||||
for memory in memories:
|
||||
if memory.huid not in done:
|
||||
memory.name_override = prefix + memory.name_override
|
||||
done.add(memory.huid)
|
||||
|
||||
|
||||
class AutoCSR:
|
||||
def get_memories(self):
|
||||
try:
|
||||
exclude = self.autocsr_exclude
|
||||
except AttributeError:
|
||||
exclude = {}
|
||||
try:
|
||||
prefixed = self.__prefixed
|
||||
except AttributeError:
|
||||
prefixed = self.__prefixed = set()
|
||||
r = []
|
||||
for k, v in xdir(self, True):
|
||||
if k not in exclude:
|
||||
if isinstance(v, Memory):
|
||||
r.append(v)
|
||||
elif hasattr(v, "get_memories") and callable(v.get_memories):
|
||||
memories = v.get_memories()
|
||||
memprefix(k + "_", memories, prefixed)
|
||||
r += memories
|
||||
return sorted(r, key=lambda x: x.huid)
|
||||
|
||||
def get_csrs(self):
|
||||
try:
|
||||
exclude = self.autocsr_exclude
|
||||
except AttributeError:
|
||||
exclude = {}
|
||||
try:
|
||||
prefixed = self.__prefixed
|
||||
except AttributeError:
|
||||
prefixed = self.__prefixed = set()
|
||||
r = []
|
||||
for k, v in xdir(self, True):
|
||||
if k not in exclude:
|
||||
if isinstance(v, _CSRBase):
|
||||
r.append(v)
|
||||
elif hasattr(v, "get_csrs") and callable(v.get_csrs):
|
||||
csrs = v.get_csrs()
|
||||
csrprefix(k + "_", csrs, prefixed)
|
||||
r += csrs
|
||||
return sorted(r, key=lambda x: x.huid)
|
|
@ -1,83 +0,0 @@
|
|||
from migen.util.misc import xdir
|
||||
from migen.fhdl.std import *
|
||||
from migen.bank.description import *
|
||||
from migen.genlib.misc import optree
|
||||
|
||||
|
||||
class _EventSource(HUID):
|
||||
def __init__(self):
|
||||
HUID.__init__(self)
|
||||
self.status = Signal() # value in the status register
|
||||
self.pending = Signal() # value in the pending register + assert irq if unmasked
|
||||
self.trigger = Signal() # trigger signal interface to the user design
|
||||
self.clear = Signal() # clearing attempt by W1C to pending register, ignored by some event sources
|
||||
|
||||
|
||||
# set on a positive trigger pulse
|
||||
class EventSourcePulse(Module, _EventSource):
|
||||
def __init__(self):
|
||||
_EventSource.__init__(self)
|
||||
self.comb += self.status.eq(0)
|
||||
self.sync += [
|
||||
If(self.clear, self.pending.eq(0)),
|
||||
If(self.trigger, self.pending.eq(1))
|
||||
]
|
||||
|
||||
|
||||
# set on the falling edge of the trigger, status = trigger
|
||||
class EventSourceProcess(Module, _EventSource):
|
||||
def __init__(self):
|
||||
_EventSource.__init__(self)
|
||||
self.comb += self.status.eq(self.trigger)
|
||||
old_trigger = Signal()
|
||||
self.sync += [
|
||||
If(self.clear, self.pending.eq(0)),
|
||||
old_trigger.eq(self.trigger),
|
||||
If(~self.trigger & old_trigger, self.pending.eq(1))
|
||||
]
|
||||
|
||||
|
||||
# all status set by external trigger
|
||||
class EventSourceLevel(Module, _EventSource):
|
||||
def __init__(self):
|
||||
_EventSource.__init__(self)
|
||||
self.comb += [
|
||||
self.status.eq(self.trigger),
|
||||
self.pending.eq(self.trigger)
|
||||
]
|
||||
|
||||
|
||||
class EventManager(Module, AutoCSR):
|
||||
def __init__(self):
|
||||
self.irq = Signal()
|
||||
|
||||
def do_finalize(self):
|
||||
sources_u = [v for k, v in xdir(self, True) if isinstance(v, _EventSource)]
|
||||
sources = sorted(sources_u, key=lambda x: x.huid)
|
||||
n = len(sources)
|
||||
self.status = CSR(n)
|
||||
self.pending = CSR(n)
|
||||
self.enable = CSRStorage(n)
|
||||
|
||||
for i, source in enumerate(sources):
|
||||
self.comb += [
|
||||
self.status.w[i].eq(source.status),
|
||||
If(self.pending.re & self.pending.r[i], source.clear.eq(1)),
|
||||
self.pending.w[i].eq(source.pending)
|
||||
]
|
||||
|
||||
irqs = [self.pending.w[i] & self.enable.storage[i] for i in range(n)]
|
||||
self.comb += self.irq.eq(optree("|", irqs))
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
object.__setattr__(self, name, value)
|
||||
if isinstance(value, _EventSource):
|
||||
if self.finalized:
|
||||
raise FinalizeError
|
||||
self.submodules += value
|
||||
|
||||
|
||||
class SharedIRQ(Module):
|
||||
def __init__(self, *event_managers):
|
||||
self.irq = Signal()
|
||||
self.comb += self.irq.eq(optree("|", [ev.irq for ev in event_managers]))
|
|
@ -1,27 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.bus import wishbone
|
||||
from migen.bank.bank import GenericBank
|
||||
|
||||
|
||||
class Bank(GenericBank):
|
||||
def __init__(self, description, bus=None):
|
||||
if bus is None:
|
||||
bus = wishbone.Interface()
|
||||
self.bus = bus
|
||||
|
||||
###
|
||||
|
||||
GenericBank.__init__(self, description, flen(self.bus.dat_w))
|
||||
|
||||
for i, c in enumerate(self.simple_csrs):
|
||||
self.comb += [
|
||||
c.r.eq(self.bus.dat_w[:c.size]),
|
||||
c.re.eq(self.bus.cyc & self.bus.stb & ~self.bus.ack & self.bus.we & \
|
||||
(self.bus.adr[:self.decode_bits] == i))
|
||||
]
|
||||
|
||||
brcases = dict((i, self.bus.dat_r.eq(c.w)) for i, c in enumerate(self.simple_csrs))
|
||||
self.sync += [
|
||||
Case(self.bus.adr[:self.decode_bits], brcases),
|
||||
If(bus.ack, bus.ack.eq(0)).Elif(bus.cyc & bus.stb, bus.ack.eq(1))
|
||||
]
|
134
migen/bus/csr.py
134
migen/bus/csr.py
|
@ -1,134 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.bus.transactions import *
|
||||
from migen.bank.description import CSRStorage
|
||||
from migen.genlib.record import *
|
||||
from migen.genlib.misc import chooser
|
||||
|
||||
_layout = [
|
||||
("adr", "address_width", DIR_M_TO_S),
|
||||
("we", 1, DIR_M_TO_S),
|
||||
("dat_w", "data_width", DIR_M_TO_S),
|
||||
("dat_r", "data_width", DIR_S_TO_M)
|
||||
]
|
||||
|
||||
|
||||
class Interface(Record):
|
||||
def __init__(self, data_width=8, address_width=14):
|
||||
Record.__init__(self, set_layout_parameters(_layout,
|
||||
data_width=data_width, address_width=address_width))
|
||||
|
||||
|
||||
class Interconnect(Module):
|
||||
def __init__(self, master, slaves):
|
||||
self.comb += master.connect(*slaves)
|
||||
|
||||
|
||||
class Initiator(Module):
|
||||
def __init__(self, generator, bus=None):
|
||||
self.generator = generator
|
||||
if bus is None:
|
||||
bus = Interface()
|
||||
self.bus = bus
|
||||
self.transaction = None
|
||||
self.read_data_ready = False
|
||||
self.done = False
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
if not self.done:
|
||||
if self.transaction is not None:
|
||||
if isinstance(self.transaction, TRead):
|
||||
if self.read_data_ready:
|
||||
self.transaction.data = selfp.bus.dat_r
|
||||
self.transaction = None
|
||||
self.read_data_ready = False
|
||||
else:
|
||||
self.read_data_ready = True
|
||||
else:
|
||||
selfp.bus.we = 0
|
||||
self.transaction = None
|
||||
if self.transaction is None:
|
||||
try:
|
||||
self.transaction = next(self.generator)
|
||||
except StopIteration:
|
||||
self.transaction = None
|
||||
raise StopSimulation
|
||||
if self.transaction is not None:
|
||||
selfp.bus.adr = self.transaction.address
|
||||
if isinstance(self.transaction, TWrite):
|
||||
selfp.bus.we = 1
|
||||
selfp.bus.dat_w = self.transaction.data
|
||||
|
||||
|
||||
class SRAM(Module):
|
||||
def __init__(self, mem_or_size, address, read_only=None, init=None, bus=None):
|
||||
if bus is None:
|
||||
bus = Interface()
|
||||
self.bus = bus
|
||||
data_width = flen(self.bus.dat_w)
|
||||
if isinstance(mem_or_size, Memory):
|
||||
mem = mem_or_size
|
||||
else:
|
||||
mem = Memory(data_width, mem_or_size//(data_width//8), init=init)
|
||||
csrw_per_memw = (mem.width + data_width - 1)//data_width
|
||||
word_bits = log2_int(csrw_per_memw)
|
||||
page_bits = log2_int((mem.depth*csrw_per_memw + 511)//512, False)
|
||||
if page_bits:
|
||||
self._page = CSRStorage(page_bits, name=mem.name_override + "_page")
|
||||
else:
|
||||
self._page = None
|
||||
if read_only is None:
|
||||
if hasattr(mem, "bus_read_only"):
|
||||
read_only = mem.bus_read_only
|
||||
else:
|
||||
read_only = False
|
||||
|
||||
###
|
||||
|
||||
port = mem.get_port(write_capable=not read_only)
|
||||
self.specials += mem, port
|
||||
|
||||
sel = Signal()
|
||||
sel_r = Signal()
|
||||
self.sync += sel_r.eq(sel)
|
||||
self.comb += sel.eq(self.bus.adr[9:] == address)
|
||||
|
||||
if word_bits:
|
||||
word_index = Signal(word_bits)
|
||||
word_expanded = Signal(csrw_per_memw*data_width)
|
||||
self.sync += word_index.eq(self.bus.adr[:word_bits])
|
||||
self.comb += [
|
||||
word_expanded.eq(port.dat_r),
|
||||
If(sel_r,
|
||||
chooser(word_expanded, word_index, self.bus.dat_r, n=csrw_per_memw, reverse=True)
|
||||
)
|
||||
]
|
||||
if not read_only:
|
||||
wregs = []
|
||||
for i in range(csrw_per_memw-1):
|
||||
wreg = Signal(data_width)
|
||||
self.sync += If(sel & self.bus.we & (self.bus.adr[:word_bits] == i), wreg.eq(self.bus.dat_w))
|
||||
wregs.append(wreg)
|
||||
memword_chunks = [self.bus.dat_w] + list(reversed(wregs))
|
||||
self.comb += [
|
||||
port.we.eq(sel & self.bus.we & (self.bus.adr[:word_bits] == csrw_per_memw - 1)),
|
||||
port.dat_w.eq(Cat(*memword_chunks))
|
||||
]
|
||||
else:
|
||||
self.comb += If(sel_r, self.bus.dat_r.eq(port.dat_r))
|
||||
if not read_only:
|
||||
self.comb += [
|
||||
port.we.eq(sel & self.bus.we),
|
||||
port.dat_w.eq(self.bus.dat_w)
|
||||
]
|
||||
|
||||
if self._page is None:
|
||||
self.comb += port.adr.eq(self.bus.adr[word_bits:word_bits+flen(port.adr)])
|
||||
else:
|
||||
pv = self._page.storage
|
||||
self.comb += port.adr.eq(Cat(self.bus.adr[word_bits:word_bits+flen(port.adr)-flen(pv)], pv))
|
||||
|
||||
def get_csrs(self):
|
||||
if self._page is None:
|
||||
return []
|
||||
else:
|
||||
return [self._page]
|
|
@ -1,36 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.bus.transactions import *
|
||||
|
||||
|
||||
def _byte_mask(orig, dat_w, sel):
|
||||
r = 0
|
||||
shift = 0
|
||||
while sel:
|
||||
if sel & 1:
|
||||
r |= (dat_w & 0xff) << shift
|
||||
else:
|
||||
r |= (orig & 0xff) << shift
|
||||
orig >>= 8
|
||||
dat_w >>= 8
|
||||
sel >>= 1
|
||||
shift += 8
|
||||
return r
|
||||
|
||||
|
||||
class Initiator(Module):
|
||||
def __init__(self, generator, mem):
|
||||
self.generator = generator
|
||||
self.mem = mem
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
try:
|
||||
transaction = next(self.generator)
|
||||
except StopIteration:
|
||||
transaction = None
|
||||
raise StopSimulation
|
||||
if isinstance(transaction, TRead):
|
||||
transaction.data = selfp.mem[transaction.address]
|
||||
elif isinstance(transaction, TWrite):
|
||||
d = selfp.mem[transaction.address]
|
||||
d_mask = _byte_mask(d, transaction.data, transaction.sel)
|
||||
selfp.mem[transaction.address] = d_mask
|
|
@ -1,24 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
|
||||
|
||||
class Transaction:
|
||||
def __init__(self, address, data=0, sel=None, busname=None):
|
||||
self.address = address
|
||||
self.data = data
|
||||
if sel is None:
|
||||
bytes = (bits_for(data) + 7)//8
|
||||
sel = 2**bytes - 1
|
||||
self.sel = sel
|
||||
self.busname = busname
|
||||
self.latency = 0
|
||||
|
||||
def __str__(self):
|
||||
return "<" + self.__class__.__name__ + " adr:" + hex(self.address) + " dat:" + hex(self.data) + ">"
|
||||
|
||||
|
||||
class TRead(Transaction):
|
||||
pass
|
||||
|
||||
|
||||
class TWrite(Transaction):
|
||||
pass
|
|
@ -1,691 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.genlib import roundrobin
|
||||
from migen.genlib.record import *
|
||||
from migen.genlib.misc import split, displacer, optree, chooser
|
||||
from migen.genlib.misc import FlipFlop, Counter
|
||||
from migen.genlib.fsm import FSM, NextState
|
||||
from migen.bus.transactions import *
|
||||
|
||||
_layout = [
|
||||
("adr", 30, DIR_M_TO_S),
|
||||
("dat_w", "data_width", DIR_M_TO_S),
|
||||
("dat_r", "data_width", DIR_S_TO_M),
|
||||
("sel", "sel_width", DIR_M_TO_S),
|
||||
("cyc", 1, DIR_M_TO_S),
|
||||
("stb", 1, DIR_M_TO_S),
|
||||
("ack", 1, DIR_S_TO_M),
|
||||
("we", 1, DIR_M_TO_S),
|
||||
("cti", 3, DIR_M_TO_S),
|
||||
("bte", 2, DIR_M_TO_S),
|
||||
("err", 1, DIR_S_TO_M)
|
||||
]
|
||||
|
||||
|
||||
class Interface(Record):
|
||||
def __init__(self, data_width=32):
|
||||
Record.__init__(self, set_layout_parameters(_layout,
|
||||
data_width=data_width,
|
||||
sel_width=data_width//8))
|
||||
|
||||
|
||||
class InterconnectPointToPoint(Module):
|
||||
def __init__(self, master, slave):
|
||||
self.comb += master.connect(slave)
|
||||
|
||||
|
||||
class Arbiter(Module):
|
||||
def __init__(self, masters, target):
|
||||
self.submodules.rr = roundrobin.RoundRobin(len(masters))
|
||||
|
||||
# mux master->slave signals
|
||||
for name, size, direction in _layout:
|
||||
if direction == DIR_M_TO_S:
|
||||
choices = Array(getattr(m, name) for m in masters)
|
||||
self.comb += getattr(target, name).eq(choices[self.rr.grant])
|
||||
|
||||
# connect slave->master signals
|
||||
for name, size, direction in _layout:
|
||||
if direction == DIR_S_TO_M:
|
||||
source = getattr(target, name)
|
||||
for i, m in enumerate(masters):
|
||||
dest = getattr(m, name)
|
||||
if name == "ack" or name == "err":
|
||||
self.comb += dest.eq(source & (self.rr.grant == i))
|
||||
else:
|
||||
self.comb += dest.eq(source)
|
||||
|
||||
# connect bus requests to round-robin selector
|
||||
reqs = [m.cyc for m in masters]
|
||||
self.comb += self.rr.request.eq(Cat(*reqs))
|
||||
|
||||
|
||||
class Decoder(Module):
|
||||
# slaves is a list of pairs:
|
||||
# 0) function that takes the address signal and returns a FHDL expression
|
||||
# that evaluates to 1 when the slave is selected and 0 otherwise.
|
||||
# 1) wishbone.Slave reference.
|
||||
# register adds flip-flops after the address comparators. Improves timing,
|
||||
# but breaks Wishbone combinatorial feedback.
|
||||
def __init__(self, master, slaves, register=False):
|
||||
ns = len(slaves)
|
||||
slave_sel = Signal(ns)
|
||||
slave_sel_r = Signal(ns)
|
||||
|
||||
# decode slave addresses
|
||||
self.comb += [slave_sel[i].eq(fun(master.adr))
|
||||
for i, (fun, bus) in enumerate(slaves)]
|
||||
if register:
|
||||
self.sync += slave_sel_r.eq(slave_sel)
|
||||
else:
|
||||
self.comb += slave_sel_r.eq(slave_sel)
|
||||
|
||||
# connect master->slaves signals except cyc
|
||||
for slave in slaves:
|
||||
for name, size, direction in _layout:
|
||||
if direction == DIR_M_TO_S and name != "cyc":
|
||||
self.comb += getattr(slave[1], name).eq(getattr(master, name))
|
||||
|
||||
# combine cyc with slave selection signals
|
||||
self.comb += [slave[1].cyc.eq(master.cyc & slave_sel[i])
|
||||
for i, slave in enumerate(slaves)]
|
||||
|
||||
# generate master ack (resp. err) by ORing all slave acks (resp. errs)
|
||||
self.comb += [
|
||||
master.ack.eq(optree("|", [slave[1].ack for slave in slaves])),
|
||||
master.err.eq(optree("|", [slave[1].err for slave in slaves]))
|
||||
]
|
||||
|
||||
# mux (1-hot) slave data return
|
||||
masked = [Replicate(slave_sel_r[i], flen(master.dat_r)) & slaves[i][1].dat_r for i in range(ns)]
|
||||
self.comb += master.dat_r.eq(optree("|", masked))
|
||||
|
||||
|
||||
class InterconnectShared(Module):
|
||||
def __init__(self, masters, slaves, register=False):
|
||||
shared = Interface()
|
||||
self.submodules += Arbiter(masters, shared)
|
||||
self.submodules += Decoder(shared, slaves, register)
|
||||
|
||||
|
||||
class Crossbar(Module):
|
||||
def __init__(self, masters, slaves, register=False):
|
||||
matches, busses = zip(*slaves)
|
||||
access = [[Interface() for j in slaves] for i in masters]
|
||||
# decode each master into its access row
|
||||
for row, master in zip(access, masters):
|
||||
row = list(zip(matches, row))
|
||||
self.submodules += Decoder(master, row, register)
|
||||
# arbitrate each access column onto its slave
|
||||
for column, bus in zip(zip(*access), busses):
|
||||
self.submodules += Arbiter(column, bus)
|
||||
|
||||
|
||||
class DownConverter(Module):
|
||||
"""DownConverter
|
||||
|
||||
This module splits Wishbone accesses from a master interface to a smaller
|
||||
slave interface.
|
||||
|
||||
Writes:
|
||||
Writes from master are splitted N writes to the slave. Access is acked when the last
|
||||
access is acked by the slave.
|
||||
|
||||
Reads:
|
||||
Read from master are splitted in N reads to the the slave. Read datas from
|
||||
the slave are cached before being presented concatenated on the last access.
|
||||
|
||||
TODO:
|
||||
Manage err signal? (Not implemented since we generally don't use it on Migen/MiSoC modules)
|
||||
"""
|
||||
def __init__(self, master, slave):
|
||||
dw_from = flen(master.dat_r)
|
||||
dw_to = flen(slave.dat_w)
|
||||
ratio = dw_from//dw_to
|
||||
|
||||
# # #
|
||||
|
||||
read = Signal()
|
||||
write = Signal()
|
||||
|
||||
counter = Counter(max=ratio)
|
||||
self.submodules += counter
|
||||
counter_done = Signal()
|
||||
self.comb += counter_done.eq(counter.value == ratio-1)
|
||||
|
||||
# Main FSM
|
||||
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
|
||||
fsm.act("IDLE",
|
||||
counter.reset.eq(1),
|
||||
If(master.stb & master.cyc,
|
||||
If(master.we,
|
||||
NextState("WRITE")
|
||||
).Else(
|
||||
NextState("READ")
|
||||
)
|
||||
)
|
||||
)
|
||||
fsm.act("WRITE",
|
||||
write.eq(1),
|
||||
slave.we.eq(1),
|
||||
slave.cyc.eq(1),
|
||||
If(master.stb & master.cyc,
|
||||
slave.stb.eq(1),
|
||||
If(slave.ack,
|
||||
counter.ce.eq(1),
|
||||
If(counter_done,
|
||||
master.ack.eq(1),
|
||||
NextState("IDLE")
|
||||
)
|
||||
)
|
||||
).Elif(~master.cyc,
|
||||
NextState("IDLE")
|
||||
)
|
||||
)
|
||||
fsm.act("READ",
|
||||
read.eq(1),
|
||||
slave.cyc.eq(1),
|
||||
If(master.stb & master.cyc,
|
||||
slave.stb.eq(1),
|
||||
If(slave.ack,
|
||||
counter.ce.eq(1),
|
||||
If(counter_done,
|
||||
master.ack.eq(1),
|
||||
NextState("IDLE")
|
||||
)
|
||||
)
|
||||
).Elif(~master.cyc,
|
||||
NextState("IDLE")
|
||||
)
|
||||
)
|
||||
|
||||
# Address
|
||||
self.comb += [
|
||||
If(counter_done,
|
||||
slave.cti.eq(7) # indicate end of burst
|
||||
).Else(
|
||||
slave.cti.eq(2)
|
||||
),
|
||||
slave.adr.eq(Cat(counter.value, master.adr))
|
||||
]
|
||||
|
||||
# Datapath
|
||||
cases = {}
|
||||
for i in range(ratio):
|
||||
cases[i] = [
|
||||
slave.sel.eq(master.sel[i*dw_to//8:(i+1)*dw_to]),
|
||||
slave.dat_w.eq(master.dat_w[i*dw_to:(i+1)*dw_to])
|
||||
]
|
||||
self.comb += Case(counter.value, cases)
|
||||
|
||||
|
||||
cached_data = Signal(dw_from)
|
||||
self.comb += master.dat_r.eq(Cat(cached_data[dw_to:], slave.dat_r))
|
||||
self.sync += \
|
||||
If(read & counter.ce,
|
||||
cached_data.eq(master.dat_r)
|
||||
)
|
||||
|
||||
|
||||
class UpConverter(Module):
|
||||
"""UpConverter
|
||||
|
||||
This module up-converts wishbone accesses and bursts from a master interface
|
||||
to a wider slave interface. This allows efficient use wishbone bursts.
|
||||
|
||||
Writes:
|
||||
Wishbone writes are cached before being written to the slave. Access to
|
||||
the slave is done at the end of a burst or when address reach end of burst
|
||||
addressing.
|
||||
|
||||
Reads:
|
||||
Cache is refilled only at the beginning of each burst, the subsequent
|
||||
reads of a burst use the cached data.
|
||||
|
||||
TODO:
|
||||
Manage err signal? (Not implemented since we generally don't use it on Migen/MiSoC modules)
|
||||
"""
|
||||
def __init__(self, master, slave):
|
||||
dw_from = flen(master.dat_r)
|
||||
dw_to = flen(slave.dat_w)
|
||||
ratio = dw_to//dw_from
|
||||
ratiobits = log2_int(ratio)
|
||||
|
||||
# # #
|
||||
|
||||
write = Signal()
|
||||
evict = Signal()
|
||||
refill = Signal()
|
||||
read = Signal()
|
||||
|
||||
address = FlipFlop(30)
|
||||
self.submodules += address
|
||||
self.comb += address.d.eq(master.adr)
|
||||
|
||||
counter = Counter(max=ratio)
|
||||
self.submodules += counter
|
||||
counter_offset = Signal(max=ratio)
|
||||
counter_done = Signal()
|
||||
self.comb += [
|
||||
counter_offset.eq(address.q),
|
||||
counter_done.eq((counter.value + counter_offset) == ratio-1)
|
||||
]
|
||||
|
||||
cached_data = Signal(dw_to)
|
||||
cached_sel = Signal(dw_to//8)
|
||||
|
||||
end_of_burst = Signal()
|
||||
self.comb += end_of_burst.eq(~master.cyc |
|
||||
(master.stb & master.cyc & master.ack & ((master.cti == 7) | counter_done)))
|
||||
|
||||
|
||||
need_refill = FlipFlop(reset=1)
|
||||
self.submodules += need_refill
|
||||
self.comb += [
|
||||
need_refill.reset.eq(end_of_burst),
|
||||
need_refill.d.eq(0)
|
||||
]
|
||||
|
||||
# Main FSM
|
||||
self.submodules.fsm = fsm = FSM()
|
||||
fsm.act("IDLE",
|
||||
counter.reset.eq(1),
|
||||
If(master.stb & master.cyc,
|
||||
address.ce.eq(1),
|
||||
If(master.we,
|
||||
NextState("WRITE")
|
||||
).Else(
|
||||
If(need_refill.q,
|
||||
NextState("REFILL")
|
||||
).Else(
|
||||
NextState("READ")
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
fsm.act("WRITE",
|
||||
If(master.stb & master.cyc,
|
||||
write.eq(1),
|
||||
counter.ce.eq(1),
|
||||
master.ack.eq(1),
|
||||
If(counter_done,
|
||||
NextState("EVICT")
|
||||
)
|
||||
).Elif(~master.cyc,
|
||||
NextState("EVICT")
|
||||
)
|
||||
)
|
||||
fsm.act("EVICT",
|
||||
evict.eq(1),
|
||||
slave.stb.eq(1),
|
||||
slave.we.eq(1),
|
||||
slave.cyc.eq(1),
|
||||
slave.dat_w.eq(cached_data),
|
||||
slave.sel.eq(cached_sel),
|
||||
If(slave.ack,
|
||||
NextState("IDLE")
|
||||
)
|
||||
)
|
||||
fsm.act("REFILL",
|
||||
refill.eq(1),
|
||||
slave.stb.eq(1),
|
||||
slave.cyc.eq(1),
|
||||
If(slave.ack,
|
||||
need_refill.ce.eq(1),
|
||||
NextState("READ")
|
||||
)
|
||||
)
|
||||
fsm.act("READ",
|
||||
read.eq(1),
|
||||
If(master.stb & master.cyc,
|
||||
master.ack.eq(1)
|
||||
),
|
||||
NextState("IDLE")
|
||||
)
|
||||
|
||||
# Address
|
||||
self.comb += [
|
||||
slave.cti.eq(7), # we are not able to generate bursts since up-converting
|
||||
slave.adr.eq(address.q[ratiobits:])
|
||||
]
|
||||
|
||||
# Datapath
|
||||
cached_datas = [FlipFlop(dw_from) for i in range(ratio)]
|
||||
cached_sels = [FlipFlop(dw_from//8) for i in range(ratio)]
|
||||
self.submodules += cached_datas, cached_sels
|
||||
|
||||
cases = {}
|
||||
for i in range(ratio):
|
||||
write_sel = Signal()
|
||||
cases[i] = write_sel.eq(1)
|
||||
self.comb += [
|
||||
cached_sels[i].reset.eq(counter.reset),
|
||||
If(write,
|
||||
cached_datas[i].d.eq(master.dat_w),
|
||||
).Else(
|
||||
cached_datas[i].d.eq(slave.dat_r[dw_from*i:dw_from*(i+1)])
|
||||
),
|
||||
cached_sels[i].d.eq(master.sel),
|
||||
If((write & write_sel) | refill,
|
||||
cached_datas[i].ce.eq(1),
|
||||
cached_sels[i].ce.eq(1)
|
||||
)
|
||||
]
|
||||
self.comb += Case(counter.value + counter_offset, cases)
|
||||
|
||||
cases = {}
|
||||
for i in range(ratio):
|
||||
cases[i] = master.dat_r.eq(cached_datas[i].q)
|
||||
self.comb += Case(address.q[:ratiobits], cases)
|
||||
|
||||
self.comb += [
|
||||
cached_data.eq(Cat([cached_data.q for cached_data in cached_datas])),
|
||||
cached_sel.eq(Cat([cached_sel.q for cached_sel in cached_sels]))
|
||||
]
|
||||
|
||||
|
||||
class Converter(Module):
|
||||
"""Converter
|
||||
|
||||
This module is a wrapper for DownConverter and UpConverter.
|
||||
It should preferably be used rather than direct instantiations
|
||||
of specific converters.
|
||||
"""
|
||||
def __init__(self, master, slave):
|
||||
self.master = master
|
||||
self.slave = slave
|
||||
|
||||
# # #
|
||||
|
||||
dw_from = flen(master.dat_r)
|
||||
dw_to = flen(slave.dat_r)
|
||||
if dw_from > dw_to:
|
||||
downconverter = DownConverter(master, slave)
|
||||
self.submodules += downconverter
|
||||
elif dw_from < dw_to:
|
||||
upconverter = UpConverter(master, slave)
|
||||
self.submodules += upconverter
|
||||
else:
|
||||
Record.connect(master, slave)
|
||||
|
||||
|
||||
class Cache(Module):
|
||||
"""Cache
|
||||
|
||||
This module is a write-back wishbone cache that can be used as a L2 cache.
|
||||
Cachesize (in 32-bit words) is the size of the data store and must be a power of 2
|
||||
"""
|
||||
def __init__(self, cachesize, master, slave):
|
||||
self.master = master
|
||||
self.slave = slave
|
||||
|
||||
###
|
||||
|
||||
dw_from = flen(master.dat_r)
|
||||
dw_to = flen(slave.dat_r)
|
||||
if dw_to > dw_from and (dw_to % dw_from) != 0:
|
||||
raise ValueError("Slave data width must be a multiple of {dw}".format(dw=dw_from))
|
||||
if dw_to < dw_from and (dw_from % dw_to) != 0:
|
||||
raise ValueError("Master data width must be a multiple of {dw}".format(dw=dw_to))
|
||||
|
||||
# Split address:
|
||||
# TAG | LINE NUMBER | LINE OFFSET
|
||||
offsetbits = log2_int(max(dw_to//dw_from, 1))
|
||||
addressbits = flen(slave.adr) + offsetbits
|
||||
linebits = log2_int(cachesize) - offsetbits
|
||||
tagbits = addressbits - linebits
|
||||
wordbits = log2_int(max(dw_from//dw_to, 1))
|
||||
adr_offset, adr_line, adr_tag = split(master.adr, offsetbits, linebits, tagbits)
|
||||
word = Signal(wordbits) if wordbits else None
|
||||
|
||||
# Data memory
|
||||
data_mem = Memory(dw_to*2**wordbits, 2**linebits)
|
||||
data_port = data_mem.get_port(write_capable=True, we_granularity=8)
|
||||
self.specials += data_mem, data_port
|
||||
|
||||
write_from_slave = Signal()
|
||||
if adr_offset is None:
|
||||
adr_offset_r = None
|
||||
else:
|
||||
adr_offset_r = Signal(offsetbits)
|
||||
self.sync += adr_offset_r.eq(adr_offset)
|
||||
|
||||
self.comb += [
|
||||
data_port.adr.eq(adr_line),
|
||||
If(write_from_slave,
|
||||
displacer(slave.dat_r, word, data_port.dat_w),
|
||||
displacer(Replicate(1, dw_to//8), word, data_port.we)
|
||||
).Else(
|
||||
data_port.dat_w.eq(Replicate(master.dat_w, max(dw_to//dw_from, 1))),
|
||||
If(master.cyc & master.stb & master.we & master.ack,
|
||||
displacer(master.sel, adr_offset, data_port.we, 2**offsetbits, reverse=True)
|
||||
)
|
||||
),
|
||||
chooser(data_port.dat_r, word, slave.dat_w),
|
||||
slave.sel.eq(2**(dw_to//8)-1),
|
||||
chooser(data_port.dat_r, adr_offset_r, master.dat_r, reverse=True)
|
||||
]
|
||||
|
||||
|
||||
# Tag memory
|
||||
tag_layout = [("tag", tagbits), ("dirty", 1)]
|
||||
tag_mem = Memory(layout_len(tag_layout), 2**linebits)
|
||||
tag_port = tag_mem.get_port(write_capable=True)
|
||||
self.specials += tag_mem, tag_port
|
||||
tag_do = Record(tag_layout)
|
||||
tag_di = Record(tag_layout)
|
||||
self.comb += [
|
||||
tag_do.raw_bits().eq(tag_port.dat_r),
|
||||
tag_port.dat_w.eq(tag_di.raw_bits())
|
||||
]
|
||||
|
||||
self.comb += [
|
||||
tag_port.adr.eq(adr_line),
|
||||
tag_di.tag.eq(adr_tag)
|
||||
]
|
||||
if word is not None:
|
||||
self.comb += slave.adr.eq(Cat(word, adr_line, tag_do.tag))
|
||||
else:
|
||||
self.comb += slave.adr.eq(Cat(adr_line, tag_do.tag))
|
||||
|
||||
# slave word computation, word_clr and word_inc will be simplified
|
||||
# at synthesis when wordbits=0
|
||||
word_clr = Signal()
|
||||
word_inc = Signal()
|
||||
if word is not None:
|
||||
self.sync += \
|
||||
If(word_clr,
|
||||
word.eq(0),
|
||||
).Elif(word_inc,
|
||||
word.eq(word+1)
|
||||
)
|
||||
|
||||
def word_is_last(word):
|
||||
if word is not None:
|
||||
return word == 2**wordbits-1
|
||||
else:
|
||||
return 1
|
||||
|
||||
# Control FSM
|
||||
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
|
||||
fsm.act("IDLE",
|
||||
If(master.cyc & master.stb,
|
||||
NextState("TEST_HIT")
|
||||
)
|
||||
)
|
||||
fsm.act("TEST_HIT",
|
||||
word_clr.eq(1),
|
||||
If(tag_do.tag == adr_tag,
|
||||
master.ack.eq(1),
|
||||
If(master.we,
|
||||
tag_di.dirty.eq(1),
|
||||
tag_port.we.eq(1)
|
||||
),
|
||||
NextState("IDLE")
|
||||
).Else(
|
||||
If(tag_do.dirty,
|
||||
NextState("EVICT")
|
||||
).Else(
|
||||
NextState("REFILL_WRTAG")
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
fsm.act("EVICT",
|
||||
slave.stb.eq(1),
|
||||
slave.cyc.eq(1),
|
||||
slave.we.eq(1),
|
||||
If(slave.ack,
|
||||
word_inc.eq(1),
|
||||
If(word_is_last(word),
|
||||
NextState("REFILL_WRTAG")
|
||||
)
|
||||
)
|
||||
)
|
||||
fsm.act("REFILL_WRTAG",
|
||||
# Write the tag first to set the slave address
|
||||
tag_port.we.eq(1),
|
||||
word_clr.eq(1),
|
||||
NextState("REFILL")
|
||||
)
|
||||
fsm.act("REFILL",
|
||||
slave.stb.eq(1),
|
||||
slave.cyc.eq(1),
|
||||
slave.we.eq(0),
|
||||
If(slave.ack,
|
||||
write_from_slave.eq(1),
|
||||
word_inc.eq(1),
|
||||
If(word_is_last(word),
|
||||
NextState("TEST_HIT"),
|
||||
).Else(
|
||||
NextState("REFILL")
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class Tap(Module):
|
||||
def __init__(self, bus, handler=print):
|
||||
self.bus = bus
|
||||
self.handler = handler
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
if selfp.bus.ack:
|
||||
assert(selfp.bus.cyc and selfp.bus.stb)
|
||||
if selfp.bus.we:
|
||||
transaction = TWrite(selfp.bus.adr,
|
||||
selfp.bus.dat_w,
|
||||
selfp.bus.sel)
|
||||
else:
|
||||
transaction = TRead(selfp.bus.adr,
|
||||
selfp.bus.dat_r)
|
||||
self.handler(transaction)
|
||||
do_simulation.passive = True
|
||||
|
||||
|
||||
class Initiator(Module):
|
||||
def __init__(self, generator, bus=None):
|
||||
self.generator = generator
|
||||
if bus is None:
|
||||
bus = Interface()
|
||||
self.bus = bus
|
||||
self.transaction_start = 0
|
||||
self.transaction = None
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
if self.transaction is None or selfp.bus.ack:
|
||||
if self.transaction is not None:
|
||||
self.transaction.latency = selfp.simulator.cycle_counter - self.transaction_start - 1
|
||||
if isinstance(self.transaction, TRead):
|
||||
self.transaction.data = selfp.bus.dat_r
|
||||
try:
|
||||
self.transaction = next(self.generator)
|
||||
except StopIteration:
|
||||
selfp.bus.cyc = 0
|
||||
selfp.bus.stb = 0
|
||||
raise StopSimulation
|
||||
if self.transaction is not None:
|
||||
self.transaction_start = selfp.simulator.cycle_counter
|
||||
selfp.bus.cyc = 1
|
||||
selfp.bus.stb = 1
|
||||
selfp.bus.adr = self.transaction.address
|
||||
if isinstance(self.transaction, TWrite):
|
||||
selfp.bus.we = 1
|
||||
selfp.bus.sel = self.transaction.sel
|
||||
selfp.bus.dat_w = self.transaction.data
|
||||
else:
|
||||
selfp.bus.we = 0
|
||||
else:
|
||||
selfp.bus.cyc = 0
|
||||
selfp.bus.stb = 0
|
||||
|
||||
|
||||
class TargetModel:
|
||||
def read(self, address):
|
||||
return 0
|
||||
|
||||
def write(self, address, data, sel):
|
||||
pass
|
||||
|
||||
def can_ack(self, bus):
|
||||
return True
|
||||
|
||||
|
||||
class Target(Module):
|
||||
def __init__(self, model, bus=None):
|
||||
if bus is None:
|
||||
bus = Interface()
|
||||
self.bus = bus
|
||||
self.model = model
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
bus = selfp.bus
|
||||
if not bus.ack:
|
||||
if self.model.can_ack(bus) and bus.cyc and bus.stb:
|
||||
if bus.we:
|
||||
self.model.write(bus.adr, bus.dat_w, bus.sel)
|
||||
else:
|
||||
bus.dat_r = self.model.read(bus.adr)
|
||||
bus.ack = 1
|
||||
else:
|
||||
bus.ack = 0
|
||||
do_simulation.passive = True
|
||||
|
||||
|
||||
class SRAM(Module):
|
||||
def __init__(self, mem_or_size, read_only=None, init=None, bus=None):
|
||||
if bus is None:
|
||||
bus = Interface()
|
||||
self.bus = bus
|
||||
bus_data_width = flen(self.bus.dat_r)
|
||||
if isinstance(mem_or_size, Memory):
|
||||
assert(mem_or_size.width <= bus_data_width)
|
||||
self.mem = mem_or_size
|
||||
else:
|
||||
self.mem = Memory(bus_data_width, mem_or_size//(bus_data_width//8), init=init)
|
||||
if read_only is None:
|
||||
if hasattr(self.mem, "bus_read_only"):
|
||||
read_only = self.mem.bus_read_only
|
||||
else:
|
||||
read_only = False
|
||||
|
||||
###
|
||||
|
||||
# memory
|
||||
port = self.mem.get_port(write_capable=not read_only, we_granularity=8)
|
||||
self.specials += self.mem, port
|
||||
# generate write enable signal
|
||||
if not read_only:
|
||||
self.comb += [port.we[i].eq(self.bus.cyc & self.bus.stb & self.bus.we & self.bus.sel[i])
|
||||
for i in range(4)]
|
||||
# address and data
|
||||
self.comb += [
|
||||
port.adr.eq(self.bus.adr[:flen(port.adr)]),
|
||||
self.bus.dat_r.eq(port.dat_r)
|
||||
]
|
||||
if not read_only:
|
||||
self.comb += port.dat_w.eq(self.bus.dat_w),
|
||||
# generate ack
|
||||
self.sync += [
|
||||
self.bus.ack.eq(0),
|
||||
If(self.bus.cyc & self.bus.stb & ~self.bus.ack, self.bus.ack.eq(1))
|
||||
]
|
|
@ -1,28 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.bus import wishbone
|
||||
from migen.bus import csr
|
||||
from migen.genlib.misc import timeline
|
||||
|
||||
|
||||
class WB2CSR(Module):
|
||||
def __init__(self, bus_wishbone=None, bus_csr=None):
|
||||
if bus_wishbone is None:
|
||||
bus_wishbone = wishbone.Interface()
|
||||
self.wishbone = bus_wishbone
|
||||
if bus_csr is None:
|
||||
bus_csr = csr.Interface()
|
||||
self.csr = bus_csr
|
||||
|
||||
###
|
||||
|
||||
self.sync += [
|
||||
self.csr.we.eq(0),
|
||||
self.csr.dat_w.eq(self.wishbone.dat_w),
|
||||
self.csr.adr.eq(self.wishbone.adr),
|
||||
self.wishbone.dat_r.eq(self.csr.dat_r)
|
||||
]
|
||||
self.sync += timeline(self.wishbone.cyc & self.wishbone.stb, [
|
||||
(1, [self.csr.we.eq(self.wishbone.we)]),
|
||||
(2, [self.wishbone.ack.eq(1)]),
|
||||
(3, [self.wishbone.ack.eq(0)])
|
||||
])
|
|
@ -1,184 +0,0 @@
|
|||
from migen.util.misc import xdir
|
||||
from migen.fhdl.std import *
|
||||
from migen.genlib.misc import optree
|
||||
from migen.genlib.record import *
|
||||
|
||||
|
||||
def _make_m2s(layout):
|
||||
r = []
|
||||
for f in layout:
|
||||
if isinstance(f[1], (int, tuple)):
|
||||
r.append((f[0], f[1], DIR_M_TO_S))
|
||||
else:
|
||||
r.append((f[0], _make_m2s(f[1])))
|
||||
return r
|
||||
|
||||
|
||||
class EndpointDescription:
|
||||
def __init__(self, payload_layout, param_layout=[], packetized=False):
|
||||
self.payload_layout = payload_layout
|
||||
self.param_layout = param_layout
|
||||
self.packetized = packetized
|
||||
|
||||
def get_full_layout(self):
|
||||
reserved = {"stb", "ack", "payload", "param", "sop", "eop", "description"}
|
||||
attributed = set()
|
||||
for f in self.payload_layout + self.param_layout:
|
||||
if f[0] in attributed:
|
||||
raise ValueError(f[0] + " already attributed in payload or param layout")
|
||||
if f[0] in reserved:
|
||||
raise ValueError(f[0] + " cannot be used in endpoint layout")
|
||||
attributed.add(f[0])
|
||||
|
||||
full_layout = [
|
||||
("payload", _make_m2s(self.payload_layout)),
|
||||
("param", _make_m2s(self.param_layout)),
|
||||
("stb", 1, DIR_M_TO_S),
|
||||
("ack", 1, DIR_S_TO_M)
|
||||
]
|
||||
if self.packetized:
|
||||
full_layout += [
|
||||
("sop", 1, DIR_M_TO_S),
|
||||
("eop", 1, DIR_M_TO_S)
|
||||
]
|
||||
return full_layout
|
||||
|
||||
|
||||
class _Endpoint(Record):
|
||||
def __init__(self, description_or_layout):
|
||||
if isinstance(description_or_layout, EndpointDescription):
|
||||
self.description = description_or_layout
|
||||
else:
|
||||
self.description = EndpointDescription(description_or_layout)
|
||||
Record.__init__(self, self.description.get_full_layout())
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return getattr(object.__getattribute__(self, "payload"), name)
|
||||
except:
|
||||
return getattr(object.__getattribute__(self, "param"), name)
|
||||
|
||||
|
||||
class Source(_Endpoint):
|
||||
def connect(self, sink):
|
||||
return Record.connect(self, sink)
|
||||
|
||||
|
||||
class Sink(_Endpoint):
|
||||
def connect(self, source):
|
||||
return source.connect(self)
|
||||
|
||||
|
||||
def get_endpoints(obj, filt=_Endpoint):
|
||||
if hasattr(obj, "get_endpoints") and callable(obj.get_endpoints):
|
||||
return obj.get_endpoints(filt)
|
||||
r = dict()
|
||||
for k, v in xdir(obj, True):
|
||||
if isinstance(v, filt):
|
||||
r[k] = v
|
||||
return r
|
||||
|
||||
|
||||
def get_single_ep(obj, filt):
|
||||
eps = get_endpoints(obj, filt)
|
||||
if len(eps) != 1:
|
||||
raise ValueError("More than one endpoint")
|
||||
return list(eps.items())[0]
|
||||
|
||||
|
||||
class BinaryActor(Module):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.busy = Signal()
|
||||
sink = get_single_ep(self, Sink)[1]
|
||||
source = get_single_ep(self, Source)[1]
|
||||
self.build_binary_control(sink, source, *args, **kwargs)
|
||||
|
||||
def build_binary_control(self, sink, source):
|
||||
raise NotImplementedError("Binary actor classes must overload build_binary_control_fragment")
|
||||
|
||||
|
||||
class CombinatorialActor(BinaryActor):
|
||||
def build_binary_control(self, sink, source):
|
||||
self.comb += [
|
||||
source.stb.eq(sink.stb),
|
||||
sink.ack.eq(source.ack),
|
||||
self.busy.eq(0)
|
||||
]
|
||||
if sink.description.packetized:
|
||||
self.comb += [
|
||||
source.sop.eq(sink.sop),
|
||||
source.eop.eq(sink.eop)
|
||||
]
|
||||
|
||||
|
||||
class SequentialActor(BinaryActor):
|
||||
def __init__(self, delay):
|
||||
self.trigger = Signal()
|
||||
BinaryActor.__init__(self, delay)
|
||||
|
||||
def build_binary_control(self, sink, source, delay):
|
||||
ready = Signal()
|
||||
timer = Signal(max=delay+1)
|
||||
self.comb += ready.eq(timer == 0)
|
||||
self.sync += If(self.trigger,
|
||||
timer.eq(delay)
|
||||
).Elif(~ready,
|
||||
timer.eq(timer - 1)
|
||||
)
|
||||
|
||||
mask = Signal()
|
||||
self.comb += [
|
||||
source.stb.eq(ready & mask),
|
||||
self.trigger.eq(sink.stb & (source.ack | ~mask) & ready),
|
||||
sink.ack.eq(self.trigger),
|
||||
self.busy.eq(~ready)
|
||||
]
|
||||
self.sync += [
|
||||
If(self.trigger, mask.eq(1)),
|
||||
If(source.stb & source.ack, mask.eq(0))
|
||||
]
|
||||
if sink.packetized:
|
||||
self.comb += [
|
||||
source.sop.eq(sink.sop),
|
||||
source.eop.eq(sink.eop)
|
||||
]
|
||||
|
||||
|
||||
class PipelinedActor(BinaryActor):
|
||||
def __init__(self, latency):
|
||||
self.pipe_ce = Signal()
|
||||
BinaryActor.__init__(self, latency)
|
||||
|
||||
def build_binary_control(self, sink, source, latency):
|
||||
busy = 0
|
||||
valid = sink.stb
|
||||
for i in range(latency):
|
||||
valid_n = Signal()
|
||||
self.sync += If(self.pipe_ce, valid_n.eq(valid))
|
||||
valid = valid_n
|
||||
busy = busy | valid
|
||||
|
||||
self.comb += [
|
||||
self.pipe_ce.eq(source.ack | ~valid),
|
||||
sink.ack.eq(self.pipe_ce),
|
||||
source.stb.eq(valid),
|
||||
self.busy.eq(busy)
|
||||
]
|
||||
if sink.description.packetized:
|
||||
sop = sink.stb & sink.sop
|
||||
eop = sink.stb & sink.eop
|
||||
for i in range(latency):
|
||||
sop_n = Signal()
|
||||
eop_n = Signal()
|
||||
self.sync += \
|
||||
If(self.pipe_ce,
|
||||
sop_n.eq(sop),
|
||||
eop_n.eq(eop)
|
||||
)
|
||||
sop = sop_n
|
||||
eop = eop_n
|
||||
|
||||
self.comb += [
|
||||
source.eop.eq(eop),
|
||||
source.sop.eq(sop)
|
||||
]
|
|
@ -1,44 +0,0 @@
|
|||
from collections import defaultdict
|
||||
|
||||
from migen.fhdl.std import *
|
||||
from migen.flow.actor import *
|
||||
|
||||
|
||||
class EndpointSimHook(Module):
|
||||
def __init__(self, endpoint):
|
||||
self.endpoint = endpoint
|
||||
|
||||
def on_ack(self):
|
||||
pass
|
||||
|
||||
def on_nack(self):
|
||||
pass
|
||||
|
||||
def on_inactive(self):
|
||||
pass
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
if selfp.endpoint.stb:
|
||||
if selfp.endpoint.ack:
|
||||
self.on_ack()
|
||||
else:
|
||||
self.on_nack()
|
||||
else:
|
||||
self.on_inactive()
|
||||
|
||||
|
||||
class DFGHook(Module):
|
||||
def __init__(self, dfg, create):
|
||||
assert(not dfg.is_abstract())
|
||||
self.nodepair_to_ep = defaultdict(dict)
|
||||
for hookn, (u, v, data) in enumerate(dfg.edges_iter(data=True)):
|
||||
ep_to_hook = self.nodepair_to_ep[(u, v)]
|
||||
ep = data["source"]
|
||||
h = create(u, ep, v)
|
||||
ep_to_hook[ep] = h
|
||||
setattr(self.submodules, "hook"+str(hookn), h)
|
||||
|
||||
def hooks_iter(self):
|
||||
for v1 in self.nodepair_to_ep.values():
|
||||
for v2 in v1.values():
|
||||
yield v2
|
|
@ -1,71 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.bank.description import *
|
||||
from migen.flow.hooks import DFGHook
|
||||
|
||||
ISD_MAGIC = 0x6ab4
|
||||
|
||||
|
||||
class EndpointReporter(Module, AutoCSR):
|
||||
def __init__(self, endpoint, nbits):
|
||||
self.reset = Signal()
|
||||
self.freeze = Signal()
|
||||
|
||||
self._ack_count = CSRStatus(nbits)
|
||||
self._nack_count = CSRStatus(nbits)
|
||||
self._cur_status = CSRStatus(2)
|
||||
|
||||
###
|
||||
|
||||
stb = Signal()
|
||||
ack = Signal()
|
||||
self.comb += self._cur_status.status.eq(Cat(stb, ack))
|
||||
ack_count = Signal(nbits)
|
||||
nack_count = Signal(nbits)
|
||||
self.sync += [
|
||||
# register monitored signals
|
||||
stb.eq(endpoint.stb),
|
||||
ack.eq(endpoint.ack),
|
||||
# count operations
|
||||
If(self.reset,
|
||||
ack_count.eq(0),
|
||||
nack_count.eq(0)
|
||||
).Else(
|
||||
If(stb,
|
||||
If(ack,
|
||||
ack_count.eq(ack_count + 1)
|
||||
).Else(
|
||||
nack_count.eq(nack_count + 1)
|
||||
)
|
||||
)
|
||||
),
|
||||
If(~self.freeze,
|
||||
self._ack_count.status.eq(ack_count),
|
||||
self._nack_count.status.eq(nack_count)
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class DFGReporter(DFGHook, AutoCSR):
|
||||
def __init__(self, dfg, nbits):
|
||||
self._magic = CSRStatus(16)
|
||||
self._neps = CSRStatus(8)
|
||||
self._nbits = CSRStatus(8)
|
||||
self._freeze = CSRStorage()
|
||||
self._reset = CSR()
|
||||
|
||||
###
|
||||
|
||||
DFGHook.__init__(self, dfg,
|
||||
lambda u, ep, v: EndpointReporter(getattr(u, ep), nbits))
|
||||
hooks = list(self.hooks_iter())
|
||||
|
||||
self.comb += [
|
||||
self._magic.status.eq(ISD_MAGIC),
|
||||
self._neps.status.eq(len(hooks)),
|
||||
self._nbits.status.eq(nbits)
|
||||
]
|
||||
for h in hooks:
|
||||
self.comb += [
|
||||
h.freeze.eq(self._freeze.storage),
|
||||
h.reset.eq(self._reset.re)
|
||||
]
|
|
@ -1,330 +0,0 @@
|
|||
from collections import defaultdict
|
||||
|
||||
from migen.fhdl.std import *
|
||||
from migen.genlib.misc import optree
|
||||
from migen.flow.actor import *
|
||||
from migen.flow import plumbing
|
||||
|
||||
# Abstract actors mean that the actor class should be instantiated with the parameters
|
||||
# from the dictionary. They are needed to enable actor duplication or sharing during
|
||||
# elaboration, and automatic parametrization of plumbing actors.
|
||||
|
||||
|
||||
class AbstractActor:
|
||||
def __init__(self, actor_class, parameters=dict(), name=None):
|
||||
self.actor_class = actor_class
|
||||
self.parameters = parameters
|
||||
self.name = name
|
||||
self.busy = Signal()
|
||||
|
||||
def create_instance(self):
|
||||
return self.actor_class(**self.parameters)
|
||||
|
||||
def __repr__(self):
|
||||
r = "<abstract " + self.actor_class.__name__
|
||||
if self.name is not None:
|
||||
r += ": " + self.name
|
||||
r += ">"
|
||||
return r
|
||||
|
||||
|
||||
class MultiDiGraph:
|
||||
def __init__(self):
|
||||
self.edges = defaultdict(list)
|
||||
self.incoming = defaultdict(set)
|
||||
self.outgoing = defaultdict(set)
|
||||
self.nodes = set()
|
||||
|
||||
def add_edge(self, a, b, **edge):
|
||||
self.edges[(a, b)].append(edge)
|
||||
self.incoming[b].add(a)
|
||||
self.outgoing[a].add(b)
|
||||
self.nodes |= {a, b}
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.nodes)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.nodes)
|
||||
|
||||
def edges_iter(self, data=True):
|
||||
assert data
|
||||
for (a, b), edges in self.edges.items():
|
||||
for edge in edges:
|
||||
yield a, b, edge
|
||||
|
||||
def get_edge_data(self, a, b):
|
||||
return dict(enumerate(self.edges[(a, b)]))
|
||||
|
||||
def add_node(self, node):
|
||||
self.nodes.add(node)
|
||||
|
||||
def remove_node(self, node):
|
||||
for i in self.incoming.pop(node):
|
||||
del self.edges[(i, node)]
|
||||
self.outgoing[i].remove(node)
|
||||
for i in self.outgoing.pop(node):
|
||||
del self.edges[(node, i)]
|
||||
self.incoming[i].remove(node)
|
||||
self.nodes.remove(node)
|
||||
|
||||
def remove_edge(self, a, b, key):
|
||||
e = self.edges[(a, b)]
|
||||
del e[key]
|
||||
if not e:
|
||||
self.incoming[b].remove(a)
|
||||
self.outgoing[a].remove(b)
|
||||
|
||||
def in_edges(self, sink, data=True):
|
||||
assert data
|
||||
e = []
|
||||
for source in self.incoming[sink]:
|
||||
for edge in self.edges[(source, sink)]:
|
||||
e.append((source, sink, edge))
|
||||
return e
|
||||
|
||||
def out_edges(self, source, data=True):
|
||||
assert data
|
||||
e = []
|
||||
for sink in self.outgoing[source]:
|
||||
for edge in self.edges[(source, sink)]:
|
||||
e.append((source, sink, edge))
|
||||
return e
|
||||
|
||||
|
||||
# TODO: rewrite this without non-determinism
|
||||
class DataFlowGraph(MultiDiGraph):
|
||||
def __init__(self):
|
||||
MultiDiGraph.__init__(self)
|
||||
self.elaborated = False
|
||||
self.abstract_busy_signals = dict()
|
||||
|
||||
def add_connection(self, source_node, sink_node,
|
||||
source_ep=None, sink_ep=None, # default: assume nodes have 1 source/sink and use that one
|
||||
source_subr=None, sink_subr=None): # default: use whole record
|
||||
self.add_edge(source_node, sink_node,
|
||||
source=source_ep, sink=sink_ep,
|
||||
source_subr=source_subr, sink_subr=sink_subr)
|
||||
|
||||
def add_buffered_connection(self, source_node, sink_node,
|
||||
source_ep=None, sink_ep=None,
|
||||
source_subr=None, sink_subr=None):
|
||||
buf = AbstractActor(plumbing.Buffer)
|
||||
self.add_connection(source_node, buf, source_ep=source_ep, source_subr=source_subr)
|
||||
self.add_connection(buf, sink_node, sink_ep=sink_ep, sink_subr=sink_subr)
|
||||
|
||||
def add_pipeline(self, *nodes):
|
||||
for n1, n2 in zip(nodes, nodes[1:]):
|
||||
self.add_connection(n1, n2)
|
||||
|
||||
def del_connections(self, source_node, sink_node, data_requirements):
|
||||
edges_to_delete = []
|
||||
edge_data = self.get_edge_data(source_node, sink_node)
|
||||
if edge_data is None:
|
||||
# the two nodes are already completely disconnected
|
||||
return
|
||||
for key, data in edge_data.items():
|
||||
if all(k not in data_requirements or data_requirements[k] == v
|
||||
for k, v in data.items()):
|
||||
edges_to_delete.append(key)
|
||||
for key in edges_to_delete:
|
||||
self.remove_edge(source_node, sink_node, key)
|
||||
|
||||
def replace_actor(self, old, new):
|
||||
self.add_node(new)
|
||||
for xold, v, data in self.out_edges(old, data=True):
|
||||
self.add_edge(new, v, **data)
|
||||
for u, xold, data in self.in_edges(old, data=True):
|
||||
self.add_edge(u, new, **data)
|
||||
self.remove_node(old)
|
||||
|
||||
def instantiate(self, actor):
|
||||
inst = actor.create_instance()
|
||||
self.abstract_busy_signals[id(inst)] = actor.busy
|
||||
self.replace_actor(actor, inst)
|
||||
|
||||
# Returns a dictionary
|
||||
# source -> [sink1, ..., sinkn]
|
||||
# source element is a (node, endpoint) pair.
|
||||
# sink elements are (node, endpoint, source subrecord, sink subrecord) triples.
|
||||
def _source_to_sinks(self):
|
||||
d = dict()
|
||||
for u, v, data in self.edges_iter(data=True):
|
||||
el_src = (u, data["source"])
|
||||
el_dst = (v, data["sink"], data["source_subr"], data["sink_subr"])
|
||||
if el_src in d:
|
||||
d[el_src].append(el_dst)
|
||||
else:
|
||||
d[el_src] = [el_dst]
|
||||
return d
|
||||
|
||||
# Returns a dictionary
|
||||
# sink -> [source1, ... sourcen]
|
||||
# sink element is a (node, endpoint) pair.
|
||||
# source elements are (node, endpoint, sink subrecord, source subrecord) triples.
|
||||
def _sink_to_sources(self):
|
||||
d = dict()
|
||||
for u, v, data in self.edges_iter(data=True):
|
||||
el_src = (u, data["source"], data["sink_subr"], data["source_subr"])
|
||||
el_dst = (v, data["sink"])
|
||||
if el_dst in d:
|
||||
d[el_dst].append(el_src)
|
||||
else:
|
||||
d[el_dst] = [el_src]
|
||||
return d
|
||||
|
||||
# List sources that feed more than one sink.
|
||||
def _list_divergences(self):
|
||||
d = self._source_to_sinks()
|
||||
return dict((k, v) for k, v in d.items() if len(v) > 1)
|
||||
|
||||
# A graph is abstract if any of these conditions is met:
|
||||
# (1) A node is an abstract actor.
|
||||
# (2) A subrecord is used.
|
||||
# (3) A single source feeds more than one sink.
|
||||
# NB: It is not allowed for a single sink to be fed by more than one source
|
||||
# (except with subrecords, i.e. when a combinator is used)
|
||||
def is_abstract(self):
|
||||
return any(isinstance(x, AbstractActor) for x in self) \
|
||||
or any(d["source_subr"] is not None or d["sink_subr"] is not None
|
||||
for u, v, d in self.edges_iter(data=True)) \
|
||||
or bool(self._list_divergences())
|
||||
|
||||
def _eliminate_subrecords_and_divergences(self):
|
||||
# Insert combinators.
|
||||
for (dst_node, dst_endpoint), sources in self._sink_to_sources().items():
|
||||
if len(sources) > 1 or sources[0][2] is not None:
|
||||
# build combinator
|
||||
# "layout" is filled in during instantiation
|
||||
subrecords = [dst_subrecord for src_node, src_endpoint, dst_subrecord, src_subrecord in sources]
|
||||
combinator = AbstractActor(plumbing.Combinator, {"subrecords": subrecords})
|
||||
# disconnect source1 -> sink ... sourcen -> sink
|
||||
# connect source1 -> combinator_sink1 ... sourcen -> combinator_sinkn
|
||||
for n, (src_node, src_endpoint, dst_subrecord, src_subrecord) in enumerate(sources):
|
||||
self.del_connections(src_node, dst_node,
|
||||
{"source": src_endpoint, "sink": dst_endpoint})
|
||||
self.add_connection(src_node, combinator,
|
||||
src_endpoint, "sink{0}".format(n), source_subr=src_subrecord)
|
||||
# connect combinator_source -> sink
|
||||
self.add_connection(combinator, dst_node, "source", dst_endpoint)
|
||||
# Insert splitters.
|
||||
for (src_node, src_endpoint), sinks in self._source_to_sinks().items():
|
||||
if len(sinks) > 1 or sinks[0][2] is not None:
|
||||
subrecords = [src_subrecord for dst_node, dst_endpoint, src_subrecord, dst_subrecord in sinks]
|
||||
splitter = AbstractActor(plumbing.Splitter, {"subrecords": subrecords})
|
||||
# disconnect source -> sink1 ... source -> sinkn
|
||||
# connect splitter_source1 -> sink1 ... splitter_sourcen -> sinkn
|
||||
for n, (dst_node, dst_endpoint, src_subrecord, dst_subrecord) in enumerate(sinks):
|
||||
self.del_connections(src_node, dst_node,
|
||||
{"source": src_endpoint, "sink": dst_endpoint})
|
||||
self.add_connection(splitter, dst_node,
|
||||
"source{0}".format(n), dst_endpoint)
|
||||
# connect source -> splitter_sink
|
||||
self.add_connection(src_node, splitter, src_endpoint, "sink")
|
||||
|
||||
def _infer_plumbing_layout(self):
|
||||
while True:
|
||||
ap = [a for a in self if isinstance(a, AbstractActor) and a.actor_class in plumbing.actors]
|
||||
if not ap:
|
||||
break
|
||||
for a in ap:
|
||||
in_edges = self.in_edges(a, data=True)
|
||||
out_edges = self.out_edges(a, data=True)
|
||||
if a.actor_class in plumbing.layout_sink and len(in_edges) == 1:
|
||||
other, me, data = in_edges[0]
|
||||
if isinstance(other, AbstractActor):
|
||||
continue
|
||||
other_ep = data["source"]
|
||||
if other_ep is None:
|
||||
other_ep = get_single_ep(other, Source)[1]
|
||||
else:
|
||||
other_ep = getattr(other, other_ep)
|
||||
elif a.actor_class in plumbing.layout_source and len(out_edges) == 1:
|
||||
me, other, data = out_edges[0]
|
||||
if isinstance(other, AbstractActor):
|
||||
continue
|
||||
other_ep = data["sink"]
|
||||
if other_ep is None:
|
||||
other_ep = get_single_ep(other, Sink)[1]
|
||||
else:
|
||||
other_ep = getattr(other, other_ep)
|
||||
else:
|
||||
raise AssertionError
|
||||
layout = other_ep.payload.layout
|
||||
a.parameters["layout"] = layout
|
||||
self.instantiate(a)
|
||||
|
||||
def _instantiate_actors(self):
|
||||
# 1. instantiate all abstract non-plumbing actors
|
||||
for actor in list(self):
|
||||
if isinstance(actor, AbstractActor) and actor.actor_class not in plumbing.actors:
|
||||
self.instantiate(actor)
|
||||
# 2. infer plumbing layout and instantiate plumbing
|
||||
self._infer_plumbing_layout()
|
||||
# 3. resolve default eps
|
||||
for u, v, d in self.edges_iter(data=True):
|
||||
if d["source"] is None:
|
||||
d["source"] = get_single_ep(u, Source)[0]
|
||||
if d["sink"] is None:
|
||||
d["sink"] = get_single_ep(v, Sink)[0]
|
||||
|
||||
# Elaboration turns an abstract DFG into a physical one.
|
||||
# Pass 1: eliminate subrecords and divergences
|
||||
# by inserting Combinator/Splitter actors
|
||||
# Pass 2: run optimizer (e.g. share and duplicate actors)
|
||||
# Pass 3: instantiate all abstract actors and explicit "None" endpoints
|
||||
def elaborate(self, optimizer=None):
|
||||
if self.elaborated:
|
||||
return
|
||||
self.elaborated = True
|
||||
|
||||
self._eliminate_subrecords_and_divergences()
|
||||
if optimizer is not None:
|
||||
optimizer(self)
|
||||
self._instantiate_actors()
|
||||
|
||||
|
||||
class CompositeActor(Module):
|
||||
def __init__(self, dfg):
|
||||
dfg.elaborate()
|
||||
|
||||
# expose unconnected endpoints
|
||||
uc_eps_by_node = dict((node, get_endpoints(node)) for node in dfg)
|
||||
for u, v, d in dfg.edges_iter(data=True):
|
||||
uc_eps_u = uc_eps_by_node[u]
|
||||
source = d["source"]
|
||||
try:
|
||||
del uc_eps_u[source]
|
||||
except KeyError:
|
||||
pass
|
||||
uc_eps_v = uc_eps_by_node[v]
|
||||
sink = d["sink"]
|
||||
try:
|
||||
del uc_eps_v[sink]
|
||||
except KeyError:
|
||||
pass
|
||||
for node, uc_eps in uc_eps_by_node.items():
|
||||
for k, v in uc_eps.items():
|
||||
assert(not hasattr(self, k))
|
||||
setattr(self, k, v)
|
||||
|
||||
# connect abstract busy signals
|
||||
for node in dfg:
|
||||
try:
|
||||
abstract_busy_signal = dfg.abstract_busy_signals[id(node)]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
self.comb += abstract_busy_signal.eq(node.busy)
|
||||
|
||||
# generate busy signal
|
||||
self.busy = Signal()
|
||||
self.comb += self.busy.eq(optree("|", [node.busy for node in dfg]))
|
||||
|
||||
# claim ownership of sub-actors and establish connections
|
||||
for node in dfg:
|
||||
self.submodules += node
|
||||
for u, v, d in dfg.edges_iter(data=True):
|
||||
ep_src = getattr(u, d["source"])
|
||||
ep_dst = getattr(v, d["sink"])
|
||||
self.comb += ep_src.connect_flat(ep_dst)
|
|
@ -1,54 +0,0 @@
|
|||
from migen.flow.hooks import *
|
||||
|
||||
|
||||
class EndpointReporter(EndpointSimHook):
|
||||
def __init__(self, endpoint):
|
||||
EndpointSimHook.__init__(self, endpoint)
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.inactive = 0
|
||||
self.ack = 0
|
||||
self.nack = 0
|
||||
|
||||
# Total number of cycles per token (inverse token rate)
|
||||
def cpt(self):
|
||||
return (self.inactive + self.nack + self.ack)/self.ack
|
||||
|
||||
# Inactivity cycles per token (slack)
|
||||
def ipt(self):
|
||||
return self.inactive/self.ack
|
||||
|
||||
# NAK cycles per token (backpressure)
|
||||
def npt(self):
|
||||
return self.nack/self.ack
|
||||
|
||||
def report_str(self):
|
||||
if self.ack:
|
||||
return "C/T={:.2f}\nI/T={:.2f}\nN/T={:.2f}".format(self.cpt(), self.ipt(), self.npt())
|
||||
else:
|
||||
return "N/A"
|
||||
|
||||
def on_ack(self):
|
||||
self.ack += 1
|
||||
|
||||
def on_nack(self):
|
||||
self.nack += 1
|
||||
|
||||
def on_inactive(self):
|
||||
self.inactive += 1
|
||||
|
||||
|
||||
class DFGReporter(DFGHook):
|
||||
def __init__(self, dfg):
|
||||
DFGHook.__init__(self, dfg, lambda u, ep, v: EndpointReporter(getattr(u, ep)))
|
||||
|
||||
def get_edge_labels(self):
|
||||
d = dict()
|
||||
for (u, v), eps in self.nodepair_to_ep.items():
|
||||
if len(eps) == 1:
|
||||
d[(u, v)] = list(eps.values())[0].report_str()
|
||||
else:
|
||||
d[(u, v)] = "\n".join(ep + ":\n" + reporter.report_str()
|
||||
for ep, reporter in eps)
|
||||
return d
|
|
@ -1,107 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
from migen.flow.actor import *
|
||||
from migen.genlib.record import *
|
||||
from migen.genlib.misc import optree
|
||||
|
||||
|
||||
class Buffer(PipelinedActor):
|
||||
def __init__(self, layout):
|
||||
self.d = Sink(layout)
|
||||
self.q = Source(layout)
|
||||
PipelinedActor.__init__(self, 1)
|
||||
self.sync += \
|
||||
If(self.pipe_ce,
|
||||
self.q.payload.eq(self.d.payload),
|
||||
self.q.param.eq(self.d.param)
|
||||
)
|
||||
|
||||
|
||||
class Combinator(Module):
|
||||
def __init__(self, layout, subrecords):
|
||||
self.source = Source(layout)
|
||||
sinks = []
|
||||
for n, r in enumerate(subrecords):
|
||||
s = Sink(layout_partial(layout, *r))
|
||||
setattr(self, "sink"+str(n), s)
|
||||
sinks.append(s)
|
||||
self.busy = Signal()
|
||||
|
||||
###
|
||||
|
||||
self.comb += [
|
||||
self.busy.eq(0),
|
||||
self.source.stb.eq(optree("&", [sink.stb for sink in sinks]))
|
||||
]
|
||||
self.comb += [sink.ack.eq(self.source.ack & self.source.stb) for sink in sinks]
|
||||
self.comb += [self.source.payload.eq(sink.payload) for sink in sinks]
|
||||
self.comb += [self.source.param.eq(sink.param) for sink in sinks]
|
||||
|
||||
|
||||
class Splitter(Module):
|
||||
def __init__(self, layout, subrecords):
|
||||
self.sink = Sink(layout)
|
||||
sources = []
|
||||
for n, r in enumerate(subrecords):
|
||||
s = Source(layout_partial(layout, *r))
|
||||
setattr(self, "source"+str(n), s)
|
||||
sources.append(s)
|
||||
self.busy = Signal()
|
||||
|
||||
###
|
||||
|
||||
self.comb += [source.payload.eq(self.sink.payload) for source in sources]
|
||||
self.comb += [source.param.eq(self.sink.param) for source in sources]
|
||||
already_acked = Signal(len(sources))
|
||||
self.sync += If(self.sink.stb,
|
||||
already_acked.eq(already_acked | Cat(*[s.ack for s in sources])),
|
||||
If(self.sink.ack, already_acked.eq(0))
|
||||
)
|
||||
self.comb += self.sink.ack.eq(optree("&",
|
||||
[s.ack | already_acked[n] for n, s in enumerate(sources)]))
|
||||
for n, s in enumerate(sources):
|
||||
self.comb += s.stb.eq(self.sink.stb & ~already_acked[n])
|
||||
|
||||
|
||||
class Multiplexer(Module):
|
||||
def __init__(self, layout, n):
|
||||
self.source = Source(layout)
|
||||
sinks = []
|
||||
for i in range(n):
|
||||
sink = Sink(layout)
|
||||
setattr(self, "sink"+str(i), sink)
|
||||
sinks.append(sink)
|
||||
self.busy = Signal()
|
||||
self.sel = Signal(max=n)
|
||||
|
||||
###
|
||||
|
||||
cases = {}
|
||||
for i, sink in enumerate(sinks):
|
||||
cases[i] = Record.connect(sink, self.source)
|
||||
self.comb += Case(self.sel, cases)
|
||||
|
||||
|
||||
class Demultiplexer(Module):
|
||||
def __init__(self, layout, n):
|
||||
self.sink = Sink(layout)
|
||||
sources = []
|
||||
for i in range(n):
|
||||
source = Source(layout)
|
||||
setattr(self, "source"+str(i), source)
|
||||
sources.append(source)
|
||||
self.busy = Signal()
|
||||
self.sel = Signal(max=n)
|
||||
|
||||
###
|
||||
|
||||
cases = {}
|
||||
for i, source in enumerate(sources):
|
||||
cases[i] = Record.connect(self.sink, source)
|
||||
self.comb += Case(self.sel, cases)
|
||||
|
||||
# Actors whose layout should be inferred from what their single sink is connected to.
|
||||
layout_sink = {Buffer, Splitter}
|
||||
# Actors whose layout should be inferred from what their single source is connected to.
|
||||
layout_source = {Buffer, Combinator}
|
||||
# All actors.
|
||||
actors = layout_sink | layout_source
|
|
@ -1,5 +0,0 @@
|
|||
class Token:
|
||||
def __init__(self, endpoint, value=None, idle_wait=False):
|
||||
self.endpoint = endpoint
|
||||
self.value = value
|
||||
self.idle_wait = idle_wait
|
|
@ -1,54 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
|
||||
|
||||
class Complex:
|
||||
def __init__(self, real, imag):
|
||||
self.real = real
|
||||
self.imag = imag
|
||||
|
||||
def __neg__(self):
|
||||
return Complex(-self.real, -self.imag)
|
||||
|
||||
def __add__(self, other):
|
||||
if isinstance(other, Complex):
|
||||
return Complex(self.real + other.real, self.imag + other.imag)
|
||||
else:
|
||||
return Complex(self.real + other, self.imag)
|
||||
__radd__ = __add__
|
||||
def __sub__(self, other):
|
||||
if isinstance(other, Complex):
|
||||
return Complex(self.real - other.real, self.imag - other.imag)
|
||||
else:
|
||||
return Complex(self.real - other, self.imag)
|
||||
def __rsub__(self, other):
|
||||
if isinstance(other, Complex):
|
||||
return Complex(other.real - self.real, other.imag - self.imag)
|
||||
else:
|
||||
return Complex(other - self.real, -self.imag)
|
||||
def __mul__(self, other):
|
||||
if isinstance(other, Complex):
|
||||
return Complex(self.real*other.real - self.imag*other.imag,
|
||||
self.real*other.imag + self.imag*other.real)
|
||||
else:
|
||||
return Complex(self.real*other, self.imag*other)
|
||||
__rmul__ = __mul__
|
||||
|
||||
def __lshift__(self, other):
|
||||
return Complex(self.real << other, self.imag << other)
|
||||
def __rshift__(self, other):
|
||||
return Complex(self.real >> other, self.imag >> other)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.real) + " + " + repr(self.imag) + "j"
|
||||
|
||||
def eq(self, r):
|
||||
if isinstance(r, Complex):
|
||||
return self.real.eq(r.real), self.imag.eq(r.imag)
|
||||
else:
|
||||
return self.real.eq(r), self.imag.eq(0)
|
||||
|
||||
|
||||
def SignalC(*args, **kwargs):
|
||||
real = Signal(*args, **kwargs)
|
||||
imag = Signal(*args, **kwargs)
|
||||
return Complex(real, imag)
|
|
@ -1,232 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (c) 2014 Guy Hutchison
|
||||
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import migen
|
||||
import operator
|
||||
from migen.fhdl.std import *
|
||||
from migen.fhdl.verilog import convert
|
||||
|
||||
|
||||
# Join two lists a and b, such that redundant terms are removed
|
||||
def join_lists(a, b):
|
||||
z = []
|
||||
for x in a+b:
|
||||
if x not in z:
|
||||
z.append(x)
|
||||
else:
|
||||
z.remove(x)
|
||||
return z
|
||||
|
||||
|
||||
def join_operator(list, op):
|
||||
if len(list) == 0:
|
||||
return []
|
||||
elif len(list) == 1:
|
||||
return list[0]
|
||||
elif len(list) == 2:
|
||||
return op(list[0], list[1])
|
||||
else:
|
||||
return op(list[0], join_operator(list[1:], op))
|
||||
|
||||
|
||||
def calc_code_bits(data_bits):
|
||||
m = 1
|
||||
c = 0
|
||||
|
||||
while c < data_bits:
|
||||
m += 1
|
||||
c = 2**m - m - 1
|
||||
return m
|
||||
|
||||
|
||||
# build_seq() is used to create the selection of bits which need
|
||||
# to be checked for a particular data parity bit.
|
||||
def build_seq(bnum, out_width):
|
||||
tmp = []
|
||||
|
||||
ptr = 0
|
||||
cur = 0
|
||||
skip = 2**bnum-1
|
||||
if skip == 0:
|
||||
check = 2**bnum
|
||||
else:
|
||||
check = 0
|
||||
while cur < out_width:
|
||||
if check > 0:
|
||||
if (cur != 2**bnum-1):
|
||||
tmp.append(cur)
|
||||
ptr += 1
|
||||
check -= 1
|
||||
if check == 0:
|
||||
skip = 2**bnum
|
||||
else:
|
||||
skip -= 1
|
||||
if skip == 0:
|
||||
check = 2**bnum
|
||||
cur += 1
|
||||
|
||||
return tmp
|
||||
|
||||
|
||||
# build_bits() is used for the generator portion, it combines the
|
||||
# bit sequences for all input and parity bits which are used and
|
||||
# removes redundant terms.
|
||||
def build_bits(in_width, gen_parity=True):
|
||||
pnum = 1
|
||||
innum = 0
|
||||
blist = []
|
||||
num_code_bits = calc_code_bits(in_width)
|
||||
out_width = in_width + num_code_bits
|
||||
v = [list()] * out_width
|
||||
code_bit_list = []
|
||||
|
||||
for b in range(out_width):
|
||||
if (b+1) == pnum:
|
||||
pnum = 2*pnum
|
||||
else:
|
||||
v[b] = [innum]
|
||||
innum += 1
|
||||
|
||||
for b in range(num_code_bits):
|
||||
vindex = 2**b-1
|
||||
blist = build_seq(b, out_width)
|
||||
for bli in blist:
|
||||
v[vindex] = join_lists(v[vindex], v[bli])
|
||||
code_bit_list.append(v[vindex])
|
||||
|
||||
# Calculate parity bit
|
||||
if gen_parity:
|
||||
pbit = []
|
||||
for b in v:
|
||||
pbit = join_lists(pbit, b)
|
||||
code_bit_list.append(pbit)
|
||||
return code_bit_list
|
||||
|
||||
|
||||
# xor_tree() takes a signal and a list of bits to be applied from
|
||||
# the signal and generates a balanced xor tree as output.
|
||||
def xor_tree(in_signal, in_bits):
|
||||
if len(in_bits) == 0:
|
||||
print ("ERROR: in_bits must be > 0")
|
||||
elif len(in_bits) == 1:
|
||||
return in_signal[in_bits[0]]
|
||||
elif len(in_bits) == 2:
|
||||
return in_signal[in_bits[0]] ^ in_signal[in_bits[1]]
|
||||
elif len(in_bits) == 3:
|
||||
return in_signal[in_bits[0]] ^ in_signal[in_bits[1]] ^ in_signal[in_bits[2]]
|
||||
else:
|
||||
split = int(len(in_bits)/2)
|
||||
return xor_tree(in_signal, in_bits[0:split]) ^ xor_tree(in_signal, in_bits[split:])
|
||||
|
||||
|
||||
# Base class for Hamming code generator/checker.
|
||||
|
||||
|
||||
# Hamming code generator class
|
||||
|
||||
# The class constructor takes a single required input, which is the number of
|
||||
# bits of the input data. The module creates a single output, which is a set
|
||||
# of code check bits and a parity bit.
|
||||
|
||||
# This generator and its corresponding checker will only generate a single-
|
||||
# error correct, double-error detect code. If double-error detection is
|
||||
# not desired, the most-significant code_out bit can be left unconnected.
|
||||
|
||||
# If generated as a top-level module, contains its suggested module name
|
||||
# in self.name and list of ports in self.ports
|
||||
class HammingGenerator(Module):
|
||||
def __init__(self, input_size):
|
||||
self.input_size = input_size
|
||||
self.data_in = Signal(input_size)
|
||||
self.code_out = Signal(calc_code_bits(input_size)+1)
|
||||
|
||||
xor_bits = build_bits(self.input_size)
|
||||
for b in range(len(xor_bits)):
|
||||
self.comb += self.code_out[b].eq(xor_tree(self.data_in, xor_bits[b]))
|
||||
|
||||
|
||||
# Hamming code checker class
|
||||
|
||||
# Constructor takes two parameters:
|
||||
# input_size (bits of data bus, not counting check bits)
|
||||
# correct (boolean, True if output data should be corrected)
|
||||
|
||||
# If used as a check/correct module, the module creates an
|
||||
# enable input which can dynamically turn off error correction
|
||||
# for debug.
|
||||
|
||||
# If double-bit detection is not desired, the most-significant
|
||||
# code_in bit can be tied to 0, and the dberr output port left
|
||||
# unconnected.
|
||||
|
||||
# If generated as a top-level module, contains its suggested module name
|
||||
# in self.name and list of ports in self.ports
|
||||
class HammingChecker(Module):
|
||||
def __init__(self, input_size, correct=True, gen_parity=True):
|
||||
self.input_size = input_size
|
||||
self.correct = correct
|
||||
self.data_in = Signal(input_size)
|
||||
self.code_bits = calc_code_bits(input_size)
|
||||
self.code_in = Signal(self.code_bits+1)
|
||||
self.code_out = Signal(self.code_bits)
|
||||
self.sberr = Signal()
|
||||
if gen_parity:
|
||||
self.dberr = Signal()
|
||||
|
||||
# vector of which interleaved bit position represents a particular
|
||||
# data bit, used for error correction
|
||||
dbits = []
|
||||
|
||||
# Create interleaved vector of code bits and data bits with code bits
|
||||
# in power-of-two positions
|
||||
pnum = 0
|
||||
dnum = 0
|
||||
self.par_vec = Signal(input_size+self.code_bits)
|
||||
for b in range(input_size+calc_code_bits(input_size)):
|
||||
if b+1 == 2**pnum:
|
||||
self.comb += self.par_vec[b].eq(self.code_in[pnum])
|
||||
pnum += 1
|
||||
else:
|
||||
self.comb += self.par_vec[b].eq(self.data_in[dnum])
|
||||
dbits.append(b)
|
||||
dnum += 1
|
||||
|
||||
if correct:
|
||||
self.enable = Signal()
|
||||
self.correct_out = Signal(input_size)
|
||||
self.data_out = Signal(input_size, name='data_out')
|
||||
for b in range(input_size):
|
||||
self.comb += self.correct_out[b].eq((self.code_out == (dbits[b]+1)) ^ self.data_in[b])
|
||||
self.comb += If(self.enable, self.data_out.eq(self.correct_out)).Else(self.data_out.eq(self.data_in))
|
||||
|
||||
self.comb += self.sberr.eq(self.code_out != 0)
|
||||
if gen_parity:
|
||||
parity = Signal()
|
||||
self.comb += parity.eq(xor_tree(self.data_in, range(input_size)) ^ xor_tree(self.code_in, range(self.code_bits+1)))
|
||||
self.comb += self.dberr.eq(~parity)
|
||||
|
||||
for b in range(calc_code_bits(self.input_size)):
|
||||
bits = [2**b-1]
|
||||
bits += build_seq(b, self.input_size+calc_code_bits(self.input_size))
|
||||
self.comb += self.code_out[b].eq(xor_tree(self.par_vec, bits))
|
|
@ -1,78 +0,0 @@
|
|||
from migen.fhdl.std import *
|
||||
|
||||
|
||||
class ReorderSlot:
|
||||
def __init__(self, tag_width, data_width):
|
||||
self.wait_data = Signal()
|
||||
self.has_data = Signal()
|
||||
self.tag = Signal(tag_width)
|
||||
self.data = Signal(data_width)
|
||||
|
||||
|
||||
class ReorderBuffer(Module):
|
||||
def __init__(self, tag_width, data_width, depth):
|
||||
# issue
|
||||
self.can_issue = Signal()
|
||||
self.issue = Signal()
|
||||
self.tag_issue = Signal(tag_width)
|
||||
|
||||
# call
|
||||
self.call = Signal()
|
||||
self.tag_call = Signal(tag_width)
|
||||
self.data_call = Signal(data_width)
|
||||
|
||||
# readback
|
||||
self.can_read = Signal()
|
||||
self.read = Signal()
|
||||
self.data_read = Signal(data_width)
|
||||
|
||||
###
|
||||
|
||||
empty_count = Signal(max=depth+1, reset=depth)
|
||||
produce = Signal(max=depth)
|
||||
consume = Signal(max=depth)
|
||||
slots = Array(ReorderSlot(tag_width, data_width)
|
||||
for n in range(depth))
|
||||
|
||||
# issue
|
||||
self.comb += self.can_issue.eq(empty_count != 0)
|
||||
self.sync += If(self.issue & self.can_issue,
|
||||
empty_count.eq(empty_count - 1),
|
||||
If(produce == depth - 1,
|
||||
produce.eq(0)
|
||||
).Else(
|
||||
produce.eq(produce + 1)
|
||||
),
|
||||
slots[produce].wait_data.eq(1),
|
||||
slots[produce].tag.eq(self.tag_issue)
|
||||
)
|
||||
|
||||
# call
|
||||
for n, slot in enumerate(slots):
|
||||
self.sync += If(self.call & slot.wait_data & (self.tag_call == slot.tag),
|
||||
slot.wait_data.eq(0),
|
||||
slot.has_data.eq(1),
|
||||
slot.data.eq(self.data_call)
|
||||
)
|
||||
|
||||
# readback
|
||||
self.comb += [
|
||||
self.can_read.eq(slots[consume].has_data),
|
||||
self.data_read.eq(slots[consume].data)
|
||||
]
|
||||
self.sync += [
|
||||
If(self.read & self.can_read,
|
||||
empty_count.eq(empty_count + 1),
|
||||
If(consume == depth - 1,
|
||||
consume.eq(0)
|
||||
).Else(
|
||||
consume.eq(consume + 1)
|
||||
),
|
||||
slots[consume].has_data.eq(0)
|
||||
)
|
||||
]
|
||||
|
||||
# do not touch empty count when issuing and reading at the same time
|
||||
self.sync += If(self.issue & self.can_issue & self.read & self.can_read,
|
||||
empty_count.eq(empty_count)
|
||||
)
|
|
@ -1,71 +0,0 @@
|
|||
import unittest
|
||||
|
||||
from migen.fhdl.std import *
|
||||
from migen.flow.actor import *
|
||||
from migen.flow.transactions import *
|
||||
from migen.flow.network import *
|
||||
from migen.actorlib.sim import *
|
||||
|
||||
from migen.test.support import SimCase, SimBench
|
||||
|
||||
|
||||
def source_gen(sent):
|
||||
for i in range(10):
|
||||
yield Token("source", {"value": i})
|
||||
sent.append(i)
|
||||
|
||||
|
||||
class SimSource(SimActor):
|
||||
def __init__(self):
|
||||
self.source = Source([("value", 32)])
|
||||
self.sent = []
|
||||
SimActor.__init__(self, source_gen(self.sent))
|
||||
|
||||
|
||||
def sink_gen(received):
|
||||
while True:
|
||||
t = Token("sink")
|
||||
yield t
|
||||
received.append(t.value["value"])
|
||||
|
||||
|
||||
class SimSink(SimActor):
|
||||
def __init__(self):
|
||||
self.sink = Sink([("value", 32)])
|
||||
self.received = []
|
||||
SimActor.__init__(self, sink_gen(self.received))
|
||||
|
||||
|
||||
class SourceSinkCase(SimCase, unittest.TestCase):
|
||||
class TestBench(SimBench):
|
||||
def __init__(self):
|
||||
self.source = SimSource()
|
||||
self.sink = SimSink()
|
||||
g = DataFlowGraph()
|
||||
g.add_connection(self.source, self.sink)
|
||||
self.submodules.comp = CompositeActor(g)
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
if self.source.token_exchanger.done:
|
||||
raise StopSimulation
|
||||
|
||||
def test_equal(self):
|
||||
self.run_with(lambda tb, tbp: None)
|
||||
self.assertEqual(self.tb.source.sent, self.tb.sink.received)
|
||||
|
||||
|
||||
class SourceSinkDirectCase(SimCase, unittest.TestCase):
|
||||
class TestBench(SimBench):
|
||||
def __init__(self):
|
||||
self.source = SimSource()
|
||||
self.sink = SimSink()
|
||||
self.submodules += self.source, self.sink
|
||||
self.comb += self.sink.sink.connect(self.source.source)
|
||||
|
||||
def do_simulation(self, selfp):
|
||||
if self.source.token_exchanger.done:
|
||||
raise StopSimulation
|
||||
|
||||
def test_equal(self):
|
||||
self.run_with(lambda tb, tbp: None)
|
||||
self.assertEqual(self.tb.source.sent, self.tb.sink.received)
|
Loading…
Reference in a new issue