Merge branch 'master' into naxriscv-merge

# Conflicts:
#	litex/soc/cores/cpu/naxriscv/core.py
This commit is contained in:
Dolu1990 2022-09-30 13:37:03 +02:00
commit c9f669d4ec
20 changed files with 317 additions and 170 deletions

View file

@ -337,6 +337,35 @@ design.create("{2}", "{3}", "./../gateware", overwrite=True)
cmd += "\n#---------- END PLL {} ---------\n\n".format(name)
return cmd
def generate_jtag(self, block, verbose=True):
name = block["name"]
id = block["id"]
pins = block["pins"]
def get_pin_name(pin):
return pin.backtrace[-1][0]
cmds = []
cmds.append(f"# ---------- JTAG {id} ---------")
cmds.append(f'jtag = design.create_block("jtag_soc", block_type="JTAG")')
cmds.append(f'design.assign_resource(jtag, "JTAG_USER{id}", "JTAG")')
cmds.append(f'jtag_config = {{')
cmds.append(f' "CAPTURE" : "{get_pin_name(pins.CAPTURE)}",')
cmds.append(f' "DRCK" : "{get_pin_name(pins.DRCK.backtrace)}",')
cmds.append(f' "RESET" : "{get_pin_name(pins.RESET.backtrace)}",')
cmds.append(f' "RUNTEST" : "{get_pin_name(pins.RUNTEST.backtrace)}",')
cmds.append(f' "SEL" : "{get_pin_name(pins.SEL.backtrace)}",')
cmds.append(f' "SHIFT" : "{get_pin_name(pins.SHIFT.backtrace)}",')
cmds.append(f' "TCK" : "{get_pin_name(pins.TCK.backtrace)}",')
cmds.append(f' "TDI" : "{get_pin_name(pins.TDI.backtrace)}",')
cmds.append(f' "TMS" : "{get_pin_name(pins.TMS.backtrace)}",')
cmds.append(f' "UPDATE" : "{get_pin_name(pins.UPDATE.backtrace)}",')
cmds.append(f' "TDO" : "{get_pin_name(pins.TDO.backtrace)}"')
cmds.append(f'}}')
cmds.append(f'design.set_property("jtag_soc", jtag_config, block_type="JTAG")')
cmds.append(f"# ---------- END JTAG {id} ---------\n")
return "\n".join(cmds)
def generate(self, partnumber):
output = ""
for block in self.blocks:
@ -351,6 +380,8 @@ design.create("{2}", "{3}", "./../gateware", overwrite=True)
output += self.generate_mipi_tx(block)
if block["type"] == "MIPI_RX_LANE":
output += self.generate_mipi_rx(block)
if block["type"] == "JTAG":
output += self.generate_jtag(block)
return output
def footer(self):

View file

@ -17,7 +17,7 @@ from migen.fhdl.structure import _Fragment
from litex.build.generic_platform import *
from litex.build import tools
from litex.build.lattice import common
from litex.build.yosys_nextpnr_toolchain import YosysNextPNRToolchain
from litex.build.yosys_nextpnr_toolchain import YosysNextPNRToolchain, yosys_nextpnr_args, yosys_nextpnr_argdict
# LatticeIceStormToolchain -------------------------------------------------------------------------
@ -107,13 +107,9 @@ class LatticeIceStormToolchain(YosysNextPNRToolchain):
def icestorm_args(parser):
toolchain_group = parser.add_argument_group(title="Toolchain options")
toolchain_group.add_argument("--nextpnr-timingstrict", action="store_true", help="Make the build fail when Timing is not met.")
toolchain_group.add_argument("--nextpnr-ignoreloops", action="store_true", help="Use strict Timing mode (Build will fail when Timings are not met).")
toolchain_group.add_argument("--nextpnr-seed", default=1, type=int, help="Set Nextpnr's seed.")
yosys_nextpnr_args(toolchain_group)
def icestorm_argdict(args):
return {
"timingstrict": args.nextpnr_timingstrict,
"ignoreloops": args.nextpnr_ignoreloops,
"seed": args.nextpnr_seed,
**yosys_nextpnr_argdict(args),
}

View file

@ -17,7 +17,7 @@ from litex.build.generic_platform import *
from litex.build import tools
from litex.build.lattice import common
from litex.build.lattice.radiant import _format_constraint, _format_ldc, _build_pdc
from litex.build.yosys_nextpnr_toolchain import YosysNextPNRToolchain
from litex.build.yosys_nextpnr_toolchain import YosysNextPNRToolchain, yosys_nextpnr_args, yosys_nextpnr_argdict
import math
@ -63,19 +63,11 @@ class LatticeOxideToolchain(YosysNextPNRToolchain):
def oxide_args(parser):
toolchain_group = parser.add_argument_group(title="Toolchain options")
toolchain_group.add_argument("--yosys-nowidelut", action="store_true", help="Use Yosys's nowidelut mode.")
toolchain_group.add_argument("--yosys-abc9", action="store_true", help="Use Yosys's abc9 mode.")
toolchain_group.add_argument("--nextpnr-timingstrict", action="store_true", help="Use strict Timing mode (Build will fail when Timings are not met).")
toolchain_group.add_argument("--nextpnr-ignoreloops", action="store_true", help="Ignore combinatorial loops in Timing Analysis.")
toolchain_group.add_argument("--nextpnr-seed", default=1, type=int, help="Set Nextpnr's seed.")
toolchain_group.add_argument("--nexus-es-device", action="store_true", help="Use Nexus-ES1 part.")
yosys_nextpnr_args(toolchain_group)
toolchain_group.add_argument("--nexus-es-device", action="store_true", help="Use Nexus-ES1 part.")
def oxide_argdict(args):
return {
"nowidelut": args.yosys_nowidelut,
"abc9": args.yosys_abc9,
"timingstrict": args.nextpnr_timingstrict,
"ignoreloops": args.nextpnr_ignoreloops,
"seed": args.nextpnr_seed,
**yosys_nextpnr_argdict(args),
"es_device": args.nexus_es_device,
}

View file

@ -17,7 +17,7 @@ from migen.fhdl.structure import _Fragment
from litex.build.generic_platform import *
from litex.build import tools
from litex.build.lattice import common
from litex.build.yosys_nextpnr_toolchain import YosysNextPNRToolchain
from litex.build.yosys_nextpnr_toolchain import YosysNextPNRToolchain, yosys_nextpnr_args, yosys_nextpnr_argdict
# LatticeTrellisToolchain --------------------------------------------------------------------------
@ -151,11 +151,7 @@ class LatticeTrellisToolchain(YosysNextPNRToolchain):
def trellis_args(parser):
toolchain_group = parser.add_argument_group(title="Toolchain options")
toolchain_group.add_argument("--yosys-nowidelut", action="store_true", help="Use Yosys's nowidelut mode.")
toolchain_group.add_argument("--yosys-abc9", action="store_true", help="Use Yosys's abc9 mode.")
toolchain_group.add_argument("--nextpnr-timingstrict", action="store_true", help="Use strict Timing mode (Build will fail when Timings are not met).")
toolchain_group.add_argument("--nextpnr-ignoreloops", action="store_true", help="Ignore combinatorial loops in Timing Analysis.")
toolchain_group.add_argument("--nextpnr-seed", default=1, type=int, help="Set Nextpnr's seed.")
yosys_nextpnr_args(toolchain_group)
toolchain_group.add_argument("--ecppack-bootaddr", default=0, help="Set boot address for next image.")
toolchain_group.add_argument("--ecppack-spimode", default=None, help="Set slave SPI programming mode.")
toolchain_group.add_argument("--ecppack-freq", default=None, help="Set SPI MCLK frequency.")
@ -163,13 +159,9 @@ def trellis_args(parser):
def trellis_argdict(args):
return {
"nowidelut": args.yosys_nowidelut,
"abc9": args.yosys_abc9,
"timingstrict": args.nextpnr_timingstrict,
"ignoreloops": args.nextpnr_ignoreloops,
**yosys_nextpnr_argdict(args),
"bootaddr": args.ecppack_bootaddr,
"spimode": args.ecppack_spimode,
"freq": float(args.ecppack_freq) if args.ecppack_freq is not None else None,
"compress": args.ecppack_compress,
"seed": args.nextpnr_seed,
}

View file

@ -92,3 +92,15 @@ class NextPNRWrapper():
return base_cmd
else:
raise ValueError("Invalid target type")
def nextpnr_args(parser):
parser.add_argument("--nextpnr-timingstrict", action="store_true", help="Use strict Timing mode (Build will fail when Timings are not met).")
parser.add_argument("--nextpnr-ignoreloops", action="store_true", help="Ignore combinatorial loops in Timing Analysis.")
parser.add_argument("--nextpnr-seed", default=1, type=int, help="Set Nextpnr's seed.")
def nextpnr_argdict(args):
return {
"timingstrict": args.nextpnr_timingstrict,
"ignoreloops": args.nextpnr_ignoreloops,
"seed": args.nextpnr_seed,
}

View file

@ -99,20 +99,14 @@ class XilinxVivadoToolchain(GenericToolchain):
def __init__(self):
super().__init__()
self.bitstream_commands = []
self.additional_commands = []
self.pre_synthesis_commands = XilinxVivadoCommands()
self.pre_placement_commands = XilinxVivadoCommands()
self.pre_routing_commands = XilinxVivadoCommands()
self.incremental_implementation = False
self.vivado_synth_directive = "default"
self.opt_directive = "default"
self.vivado_place_directive = "default"
self.vivado_post_place_phys_opt_directive = None
self.vivado_route_directive = "default"
self.vivado_post_route_phys_opt_directive = "default"
self._synth_mode = "vivado"
self._enable_xpm = False
self.bitstream_commands = []
self.additional_commands = []
self.pre_synthesis_commands = XilinxVivadoCommands()
self.pre_placement_commands = XilinxVivadoCommands()
self.pre_routing_commands = XilinxVivadoCommands()
self.incremental_implementation = False
self._synth_mode = "vivado"
self._enable_xpm = False
def finalize(self):
# Convert clocks and false path to platform commands
@ -120,13 +114,28 @@ class XilinxVivadoToolchain(GenericToolchain):
self._build_false_path_constraints()
def build(self, platform, fragment,
synth_mode = "vivado",
enable_xpm = False,
synth_mode = "vivado",
enable_xpm = False,
vivado_synth_directive = "default",
opt_directive = "default",
vivado_place_directive = "default",
vivado_post_place_phys_opt_directive = None,
vivado_route_directive = "default",
vivado_post_route_phys_opt_directive = "default",
vivado_max_threads = None,
**kwargs):
self._synth_mode = synth_mode
self._enable_xpm = enable_xpm
self.vivado_synth_directive = vivado_synth_directive
self.opt_directive = opt_directive
self.vivado_place_directive = vivado_place_directive
self.vivado_post_place_phys_opt_directive = vivado_post_place_phys_opt_directive
self.vivado_route_directive = vivado_route_directive
self.vivado_post_route_phys_opt_directive = vivado_post_route_phys_opt_directive
self.vivado_max_threads = vivado_max_threads
return GenericToolchain.build(self, platform, fragment, **kwargs)
# Constraints (.xdc) ---------------------------------------------------------------------------
@ -211,6 +220,9 @@ class XilinxVivadoToolchain(GenericToolchain):
tcl.append(f"create_project -force -name {self._build_name} -part {self.platform.device}")
tcl.append("set_msg_config -id {Common 17-55} -new_severity {Warning}")
if self.vivado_max_threads:
tcl.append(f"set_param general.maxThreads {self.vivado_max_threads}")
# Enable Xilinx Parameterized Macros
if self._enable_xpm:
tcl.append("\n# Enable Xilinx Parameterized Macros\n")
@ -375,7 +387,23 @@ class XilinxVivadoToolchain(GenericToolchain):
def vivado_build_args(parser):
toolchain_group = parser.add_argument_group(title="Toolchain options")
toolchain_group.add_argument("--synth-mode", default="vivado", help="Synthesis mode (vivado or yosys).")
toolchain_group.add_argument("--synth-mode", default="vivado", help="Synthesis mode (vivado or yosys).")
toolchain_group.add_argument("--vivado-synth-directive", default="default", help="Specify synthesis directive.")
toolchain_group.add_argument("--vivado-opt-directive", default="default", help="Specify opt directive.")
toolchain_group.add_argument("--vivado-place-directive", default="default", help="Specify place directive.")
toolchain_group.add_argument("--vivado-post-place-phys-opt-directive", default=None, help="Specify phys opt directive.")
toolchain_group.add_argument("--vivado-route-directive", default="default", help="Specify route directive.")
toolchain_group.add_argument("--vivado-post-route-phys-opt-directive", default="default", help="Specify phys opt directive.")
toolchain_group.add_argument("--vivado-max-threads", default=None, help="Limit the max threads vivado is allowed to use.")
def vivado_build_argdict(args):
return {"synth_mode": args.synth_mode}
return {
"synth_mode" : args.synth_mode,
"vivado_synth_directive" : args.vivado_synth_directive,
"opt_directive" : args.vivado_opt_directive,
"vivado_place_directive" : args.vivado_place_directive,
"vivado_post_place_phys_opt_directive" : args.vivado_post_place_phys_opt_directive,
"vivado_route_directive" : args.vivado_route_directive,
"vivado_post_route_phys_opt_directive" : args.vivado_post_route_phys_opt_directive,
"vivado_max_threads" : args.vivado_max_threads
}

View file

@ -12,8 +12,8 @@ from shutil import which
from litex.build import tools
from litex.build.generic_toolchain import GenericToolchain
from litex.build.nextpnr_wrapper import NextPNRWrapper
from litex.build.yosys_wrapper import YosysWrapper
from litex.build.nextpnr_wrapper import NextPNRWrapper, nextpnr_args, nextpnr_argdict
from litex.build.yosys_wrapper import YosysWrapper, yosys_args, yosys_argdict
# YosysNextPNRToolchain ----------------------------------------------------------------------------
@ -86,6 +86,7 @@ class YosysNextPNRToolchain(GenericToolchain):
def build(self, platform, fragment,
nowidelut = False,
abc9 = False,
flow3 = False,
timingstrict = False,
ignoreloops = False,
seed = 1,
@ -100,6 +101,8 @@ class YosysNextPNRToolchain(GenericToolchain):
than native for the target (Yosys)
abc9 : str
use new ABC9 flow (Yosys)
flow3 : str
use ABC9 with flow3 (Yosys)
timingstrict : list
check timing failures (nextpnr)
ignoreloops : str
@ -110,6 +113,9 @@ class YosysNextPNRToolchain(GenericToolchain):
self._nowidelut = nowidelut
self._abc9 = abc9
if flow3:
self._abc9 = True
self._yosys_cmds.append("scratchpad -copy abc9.script.flow3 abc9.script")
self.timingstrict = timingstrict
self.ignoreloops = ignoreloops
self.seed = seed
@ -210,3 +216,13 @@ class YosysNextPNRToolchain(GenericToolchain):
def build_io_constraints(self):
raise NotImplementedError("GenericToolchain.build_io_constraints must be overloaded.")
def yosys_nextpnr_args(parser):
yosys_args(parser)
nextpnr_args(parser)
def yosys_nextpnr_argdict(args):
return {
**yosys_argdict(args),
**nextpnr_argdict(args),
}

View file

@ -137,3 +137,15 @@ class YosysWrapper():
return base_cmd
else:
raise ValueError("Invalid script type")
def yosys_args(parser):
parser.add_argument("--yosys-nowidelut", action="store_true", help="Use Yosys's nowidelut mode.")
parser.add_argument("--yosys-abc9", action="store_true", help="Use Yosys's abc9 mode.")
parser.add_argument("--yosys-flow3", action="store_true", help="Use Yosys's abc9 mode with the flow3 script.")
def yosys_argdict(args):
return {
"nowidelut": args.yosys_nowidelut,
"abc9": args.yosys_abc9,
"flow3": args.yosys_flow3,
}

View file

@ -98,12 +98,16 @@ class NaxRiscv(CPU):
cpu_group.add_argument("--xlen", default=32, help="Specify the RISC-V data width.")
cpu_group.add_argument("--with-jtag-tap", action="store_true", help="Add a embedded JTAG tap for debugging")
cpu_group.add_argument("--with-jtag-instruction", action="store_true", help="Add a JTAG instruction port which implement tunneling for debugging (TAP not included)")
cpu_group.add_argument("--update-repo", default="recommended", choices=["latest","wipe+latest","recommended","wipe+recommended","no"], help="Specify how the NaxRiscv & SpinalHDL repo should be updated (latest: update to HEAD, recommended: Update to known compatible version, no: Don't update, wipe+*: Do clean&reset before checkout)")
cpu_group.add_argument("--no-netlist-cache", action="store_true", help="Always (re-)build the netlist")
@staticmethod
def args_read(args):
print(args)
NaxRiscv.jtag_tap = args.with_jtag_tap
NaxRiscv.jtag_instruction = args.with_jtag_instruction
NaxRiscv.update_repo = args.update_repo
NaxRiscv.no_netlist_cache = args.no_netlist_cache
if args.scala_file:
NaxRiscv.scala_files = args.scala_file
if args.scala_args:
@ -218,8 +222,11 @@ class NaxRiscv(CPU):
options = dir
), shell=True)
# Use specific SHA1 (Optional).
print(f"Updating {name} Git repository...")
os.chdir(os.path.join(dir))
os.system(f"cd {dir} && git checkout {branch} && git pull && git checkout {hash}")
wipe_cmd = "&& git clean --force -d -x && git reset --hard" if "wipe" in NaxRiscv.update_repo else ""
checkout_cmd = f"&& git checkout {hash}" if hash is not None else ""
subprocess.check_call(f"cd {dir} {wipe_cmd} && git checkout {branch} && git pull {checkout_cmd}", shell=True)
# Netlist Generation.
@staticmethod
@ -228,8 +235,9 @@ class NaxRiscv(CPU):
ndir = os.path.join(vdir, "ext", "NaxRiscv")
sdir = os.path.join(vdir, "ext", "SpinalHDL")
NaxRiscv.git_setup("NaxRiscv", ndir, "https://github.com/SpinalHDL/NaxRiscv.git" , "main", "518d2713")
NaxRiscv.git_setup("SpinalHDL", sdir, "https://github.com/SpinalHDL/SpinalHDL.git", "dev" , "56400992")
if NaxRiscv.update_repo != "no":
NaxRiscv.git_setup("NaxRiscv", ndir, "https://github.com/SpinalHDL/NaxRiscv.git" , "main", "518d2713" if NaxRiscv.update_repo=="recommended" else None)
NaxRiscv.git_setup("SpinalHDL", sdir, "https://github.com/SpinalHDL/SpinalHDL.git", "dev" , "56400992" if NaxRiscv.update_repo=="recommended" else None)
gen_args = []
gen_args.append(f"--netlist-name={NaxRiscv.netlist_name}")
@ -259,7 +267,7 @@ class NaxRiscv(CPU):
def add_sources(self, platform):
vdir = get_data_mod("cpu", "naxriscv").data_location
print(f"NaxRiscv netlist : {self.netlist_name}")
if not os.path.exists(os.path.join(vdir, self.netlist_name + ".v")):
if NaxRiscv.no_netlist_cache or not os.path.exists(os.path.join(vdir, self.netlist_name + ".v")):
self.generate_netlist(self.reset_address)
# Add RAM.

View file

@ -51,9 +51,9 @@ CPU_VARIANTS = {
"linuxq": "freechips.rocketchip.system.LitexLinuxQConfig",
"linux2q": "freechips.rocketchip.system.LitexLinux2QConfig",
"full": "freechips.rocketchip.system.LitexFullConfig",
"fulld": "freechips.rocketchip.system.LitexFullConfig",
"fulld": "freechips.rocketchip.system.LitexFullDConfig",
"full4d": "freechips.rocketchip.system.LitexFull4DConfig",
"fullq": "freechips.rocketchip.system.LitexFullConfig",
"fullq": "freechips.rocketchip.system.LitexFullQConfig",
"full4q": "freechips.rocketchip.system.LitexFull4QConfig",
}

View file

@ -155,7 +155,7 @@ class WishboneDMAWriter(Module, AutoCSR):
if with_csr:
self.add_csr()
def add_csr(self, default_base=0, default_length=0, default_enable=0, default_loop=0):
def add_csr(self, default_base=0, default_length=0, default_enable=0, default_loop=0, ready_on_idle=1):
self._sink = self.sink
self.sink = stream.Endpoint([("data", self.bus.data_width)])
@ -182,13 +182,13 @@ class WishboneDMAWriter(Module, AutoCSR):
self.submodules += fsm
self.comb += fsm.reset.eq(~self._enable.storage)
fsm.act("IDLE",
self.sink.ready.eq(1),
self.sink.ready.eq(ready_on_idle),
NextValue(offset, 0),
NextState("RUN"),
)
fsm.act("RUN",
self._sink.valid.eq(self.sink.valid),
self._sink.last.eq(offset == (length - 1)),
self._sink.last.eq(self.sink.last | (offset + 1 == length)),
self._sink.address.eq(base + offset),
self._sink.data.eq(self.sink.data),
self.sink.ready.eq(self._sink.ready),

View file

@ -12,6 +12,7 @@
from migen import *
from migen.genlib.cdc import AsyncResetSynchronizer, MultiReg
from litex.build.generic_platform import *
from litex.soc.interconnect import stream
# JTAG TAP FSM -------------------------------------------------------------------------------------
@ -491,3 +492,55 @@ class JTAGPHY(Module):
NextState("XFER-READY")
)
)
# Efinix / TRION -----------------------------------------------------------------------------------
class EfinixJTAG(Module):
# id refer to the JTAG_USER{id}
def __init__(self, platform, id=1):
self.name = f"jtag_{id}"
self.platform = platform
self.id = id
_io = [
(self.name, 0,
Subsignal("CAPTURE", Pins(1)),
Subsignal("DRCK", Pins(1)),
Subsignal("RESET", Pins(1)),
Subsignal("RUNTEST", Pins(1)),
Subsignal("SEL", Pins(1)),
Subsignal("SHIFT", Pins(1)),
Subsignal("TCK", Pins(1)),
Subsignal("TDI", Pins(1)),
Subsignal("TMS", Pins(1)),
Subsignal("UPDATE", Pins(1)),
Subsignal("TDO", Pins(1)),
),
]
platform.add_extension(_io)
self.pins = pins = platform.request(self.name)
for pin in pins.flatten():
self.platform.toolchain.excluded_ios.append(pin.backtrace[-1][0])
block = {}
block["type"] = "JTAG"
block["name"] = self.name
block["id"] = self.id
block["pins"] = pins
self.platform.toolchain.ifacewriter.blocks.append(block)
def bind_vexriscv_smp(self, cpu):
self.comb += [
# JTAG -> CPU.
cpu.jtag_clk.eq( self.pins.TCK),
cpu.jtag_enable.eq( self.pins.SEL),
cpu.jtag_capture.eq( self.pins.CAPTURE),
cpu.jtag_shift.eq( self.pins.SHIFT),
cpu.jtag_update.eq( self.pins.UPDATE),
cpu.jtag_reset.eq( self.pins.RESET),
cpu.jtag_tdi.eq( self.pins.TDI),
# CPU -> JTAG.
self.pins.TDO.eq(cpu.jtag_tdo),
]

View file

@ -163,6 +163,9 @@ video_data_layout = [
("b", 8),
]
# DVI Color <-> channel mapping --------------------------------------------------------------------
_dvi_c2d = {"b": 0, "g": 1, "r": 2}
class VideoTimingGenerator(Module, AutoCSR):
def __init__(self, default_video_timings="800x600@60Hz"):
# Check / Get Video Timings (can be str or dict)
@ -790,18 +793,17 @@ class VideoHDMIPHY(Module):
# Encode/Serialize Datas.
for pol in drive_pols:
for color in ["r", "g", "b"]:
for color, channel in _dvi_c2d.items():
# TMDS Encoding.
encoder = ClockDomainsRenamer(clock_domain)(TMDSEncoder())
setattr(self.submodules, f"{color}_encoder", encoder)
self.comb += encoder.d.eq(getattr(sink, color))
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if color == "r" else 0)
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if channel == 0 else 0)
self.comb += encoder.de.eq(sink.de)
# 10:1 Serialization + Pseudo Differential Signaling.
c2d = {"r": 0, "g": 1, "b": 2}
data_i = encoder.out if color not in pn_swap else ~encoder.out
data_o = getattr(pads, f"data{c2d[color]}_{pol}")
data_o = getattr(pads, f"data{channel}_{pol}")
serializer = VideoHDMI10to1Serializer(
data_i = {"p":data_i, "n": ~data_i}[pol],
data_o = data_o,
@ -828,12 +830,12 @@ class VideoGowinHDMIPHY(Module):
o_OB = pads.clk_n,
)
for color in ["r", "g", "b"]:
for color, channel in _dvi_c2d.items():
# TMDS Encoding.
encoder = ClockDomainsRenamer(clock_domain)(TMDSEncoder())
setattr(self.submodules, f"{color}_encoder", encoder)
self.comb += encoder.d.eq(getattr(sink, color))
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if color == "r" else 0)
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if channel == 0 else 0)
self.comb += encoder.de.eq(sink.de)
# 10:1 Serialization + Differential Signaling.
@ -847,11 +849,10 @@ class VideoGowinHDMIPHY(Module):
o_Q = pad_o,
)
c2d = {"r": 0, "g": 1, "b": 2}
self.specials += Instance("ELVDS_OBUF",
i_I = pad_o,
o_O = getattr(pads, f"data{c2d[color]}_p"),
o_OB = getattr(pads, f"data{c2d[color]}_n"),
o_O = getattr(pads, f"data{channel}_p"),
o_OB = getattr(pads, f"data{channel}_n"),
)
@ -872,13 +873,13 @@ class VideoS6HDMIPHY(Module):
self.specials += Instance("OBUFDS", i_I=pads_clk, o_O=pads.clk_p, o_OB=pads.clk_n)
# Encode/Serialize Datas.
for color in ["r", "g", "b"]:
for color, channel in _dvi_c2d.items():
# TMDS Encoding.
encoder = ClockDomainsRenamer(clock_domain)(TMDSEncoder())
setattr(self.submodules, f"{color}_encoder", encoder)
self.comb += encoder.d.eq(getattr(sink, color))
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if color == "r" else 0)
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if channel == 0 else 0)
self.comb += encoder.de.eq(sink.de)
# 10:1 Serialization + Differential Signaling.
@ -889,9 +890,8 @@ class VideoS6HDMIPHY(Module):
clock_domain = clock_domain,
)
setattr(self.submodules, f"{color}_serializer", serializer)
c2d = {"r": 0, "g": 1, "b": 2}
pad_p = getattr(pads, f"data{c2d[color]}_p")
pad_n = getattr(pads, f"data{c2d[color]}_n")
pad_p = getattr(pads, f"data{channel}_p")
pad_n = getattr(pads, f"data{channel}_n")
self.specials += Instance("OBUFDS", i_I=pad_o, o_O=pad_p, o_OB=pad_n)
# HDMI (Xilinx 7-Series).
@ -953,13 +953,13 @@ class VideoS7HDMIPHY(Module):
self.specials += Instance("OBUFDS", i_I=pads_clk, o_O=pads.clk_p, o_OB=pads.clk_n)
# Encode/Serialize Datas.
for color in ["r", "g", "b"]:
for color, channel in _dvi_c2d.items():
# TMDS Encoding.
encoder = ClockDomainsRenamer(clock_domain)(TMDSEncoder())
self.submodules += encoder
self.comb += encoder.d.eq(getattr(sink, color))
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if color == "r" else 0)
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if channel == 0 else 0)
self.comb += encoder.de.eq(sink.de)
# 10:1 Serialization + Differential Signaling.
@ -970,9 +970,8 @@ class VideoS7HDMIPHY(Module):
clock_domain = clock_domain,
)
self.submodules += serializer
c2d = {"r": 0, "g": 1, "b": 2}
pad_p = getattr(pads, f"data{c2d[color]}_p")
pad_n = getattr(pads, f"data{c2d[color]}_n")
pad_p = getattr(pads, f"data{channel}_p")
pad_n = getattr(pads, f"data{channel}_n")
self.specials += Instance("OBUFDS", i_I=pad_o, o_O=pad_p, o_OB=pad_n)
@ -1010,12 +1009,12 @@ class VideoS7GTPHDMIPHY(Module):
self.submodules.pll = pll = GTPQuadPLL(refclk, clk_freq, 1.485e9)
# Encode/Serialize Datas.
for color in ["r", "g", "b"]:
for color, channel in _dvi_c2d.items():
# TMDS Encoding.
encoder = ClockDomainsRenamer(clock_domain)(TMDSEncoder())
self.submodules += encoder
self.comb += encoder.d.eq(getattr(sink, color))
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if color == "r" else 0)
self.comb += encoder.c.eq(Cat(sink.hsync, sink.vsync) if channel == 0 else 0)
self.comb += encoder.de.eq(sink.de)
# 10:20 (SerDes has a minimal 20:1 Serialization ratio).
@ -1031,14 +1030,13 @@ class VideoS7GTPHDMIPHY(Module):
self.comb += cdc.source.ready.eq(1) # No backpressure.
# 20:1 Serialization + Differential Signaling.
c2d = {"r": 0, "g": 1, "b": 2}
class GTPPads:
def __init__(self, p, n):
self.p = p
self.n = n
tx_pads = GTPPads(p=getattr(pads, f"data{c2d[color]}_p"), n=getattr(pads, f"data{c2d[color]}_n"))
tx_pads = GTPPads(p=getattr(pads, f"data{channel}_p"), n=getattr(pads, f"data{channel}_n"))
# FIXME: Find a way to avoid RX pads.
rx_pads = GTPPads(p=getattr(pads, f"rx{c2d[color]}_p"), n=getattr(pads, f"rx{c2d[color]}_n"))
rx_pads = GTPPads(p=getattr(pads, f"rx{channe}_p"), n=getattr(pads, f"rx{channel}_n"))
gtp = GTP(pll, tx_pads, rx_pads=rx_pads, sys_clk_freq=sys_clk_freq,
tx_polarity = 1, # FIXME: Specific to Decklink Mini 4K, make it configurable.
tx_buffer_enable = True,

View file

@ -41,6 +41,12 @@ def get_mem_regions(filename_or_regions, offset):
def get_mem_data(filename_or_regions, data_width=32, endianness="big", mem_size=None, offset=0):
assert data_width in [32, 64]
assert endianness in ["big", "little"]
# Return empty list if no filename or regions.
if filename_or_regions is None:
return []
# Create memory regions.
regions = get_mem_regions(filename_or_regions, offset)

View file

@ -15,12 +15,13 @@ from litex.soc.interconnect import stream
from litex.build.generic_platform import *
from litex.soc.interconnect.axi.axi_common import *
from litex.soc.interconnect.axi.axi_stream import AXIStreamInterface
# AXI Definition -----------------------------------------------------------------------------------
def ax_description(address_width, id_width=0, user_width=0):
def ax_description(address_width):
# * present for interconnect with others cores but not used by LiteX.
ax = [
return [
("addr", address_width), # Address Width.
("burst", 2), # Burst type.
("len", 8), # Number of data (-1) transfers (up to 256).
@ -31,41 +32,21 @@ def ax_description(address_width, id_width=0, user_width=0):
("qos", 4), # *
("region", 4), # *
]
if id_width:
ax += [("id", id_width)] # ID Width.
if user_width:
ax += [("user", user_width)] # *
return ax
def w_description(data_width, id_width=0, user_width=0):
w = [
def w_description(data_width):
return [
("data", data_width),
("strb", data_width//8),
]
if id_width:
w += [("id", id_width)]
if user_width:
w += [("user", user_width)]
return w
def b_description(id_width=0, user_width=0):
b = [("resp", 2)]
if id_width:
b += [("id", id_width)]
if user_width:
b += [("user", user_width)]
return b
def b_description():
return [("resp", 2)]
def r_description(data_width, id_width=0, user_width=0):
r = [
def r_description(data_width):
return [
("resp", 2),
("data", data_width),
]
if id_width:
r += [("id", id_width)]
if user_width:
r += [("user", user_width)]
return r
class AXIInterface:
def __init__(self, data_width=32, address_width=32, id_width=1, clock_domain="sys", name=None, bursting=False,
@ -73,18 +54,45 @@ class AXIInterface:
w_user_width = 0,
b_user_width = 0,
ar_user_width = 0,
r_user_width = 0):
r_user_width = 0
):
# Parameters.
self.data_width = data_width
self.address_width = address_width
self.id_width = id_width
self.clock_domain = clock_domain
self.bursting = bursting # FIXME: Use or add check.
self.aw = stream.Endpoint(ax_description(address_width, id_width, aw_user_width), name=name)
self.w = stream.Endpoint(w_description(data_width, id_width, w_user_width), name=name)
self.b = stream.Endpoint(b_description(id_width, b_user_width), name=name)
self.ar = stream.Endpoint(ax_description(address_width, id_width, ar_user_width), name=name)
self.r = stream.Endpoint(r_description(data_width, id_width, r_user_width), name=name)
# Write Channels.
# ---------------
self.aw = AXIStreamInterface(name=name,
layout = ax_description(address_width),
id_width = id_width,
user_width = aw_user_width
)
self.w = AXIStreamInterface(name=name,
layout = w_description(data_width),
id_width = id_width,
user_width = w_user_width
)
self.b = AXIStreamInterface(name=name,
layout = b_description(),
id_width = id_width,
user_width = b_user_width
)
# Read Channels.
# --------------
self.ar = AXIStreamInterface(name=name,
layout = ax_description(address_width),
id_width = id_width,
user_width = ar_user_width
)
self.r = AXIStreamInterface(name=name,
layout = r_description(data_width),
id_width = id_width,
user_width = r_user_width
)
def connect_to_pads(self, pads, mode="master"):
return connect_to_pads(self, pads, mode, axi_full=True)
@ -92,9 +100,14 @@ class AXIInterface:
def get_ios(self, bus_name="wb"):
subsignals = []
for channel in ["aw", "w", "b", "ar", "r"]:
# Control Signals.
for name in ["valid", "ready"] + (["last"] if channel in ["w", "r"] else []):
subsignals.append(Subsignal(channel + name, Pins(1)))
for name, width in getattr(self, channel).description.payload_layout:
# Payload/Params Signals.
channel_layout = (getattr(self, channel).description.payload_layout +
getattr(self, channel).description.param_layout)
for name, width in channel_layout:
subsignals.append(Subsignal(channel + name, Pins(width)))
ios = [(bus_name , 0) + tuple(subsignals)]
return ios
@ -185,9 +198,11 @@ class AXIUpConverter(Module):
description_to = [("data", dw_to), ("strb", dw_to//8)],
)
self.submodules += w_converter
self.comb += axi_from.w.connect(w_converter.sink, omit={"id"})
self.comb += axi_from.w.connect(w_converter.sink, omit={"id", "dest", "user"})
self.comb += w_converter.source.connect(axi_to.w)
self.comb += axi_to.w.id.eq(axi_from.w.id)
self.comb += axi_to.w.dest.eq(axi_from.w.dest)
self.comb += axi_to.w.user.eq(axi_from.w.user)
# B Channel.
self.comb += axi_to.b.connect(axi_from.b)
@ -207,11 +222,12 @@ class AXIUpConverter(Module):
description_to = [("data", dw_from)],
)
self.submodules += r_converter
self.comb += axi_to.r.connect(r_converter.sink, omit={"id", "resp"})
self.comb += axi_to.r.connect(r_converter.sink, omit={"id", "dest", "user", "resp"})
self.comb += r_converter.source.connect(axi_from.r)
self.comb += axi_from.r.resp.eq(axi_to.r.resp)
self.comb += axi_from.r.id.eq(axi_to.r.id)
self.comb += axi_from.r.user.eq(axi_to.r.user)
self.comb += axi_from.r.dest.eq(axi_to.r.dest)
class AXIDownConverter(Module):
def __init__(self, axi_from, axi_to):

View file

@ -25,9 +25,10 @@ class AXI2AXILite(Module):
assert axi.data_width == axi_lite.data_width
assert axi.address_width == axi_lite.address_width
ax_buffer = stream.Buffer(ax_description(axi.address_width, axi.id_width))
ax_burst = stream.Endpoint(ax_description(axi.address_width, axi.id_width))
ax_beat = stream.Endpoint(ax_description(axi.address_width, axi.id_width))
ax_burst = AXIStreamInterface(layout=ax_description(axi.address_width), id_width=axi.id_width)
ax_beat = AXIStreamInterface(layout=ax_description(axi.address_width), id_width=axi.id_width)
ax_buffer = stream.Buffer(ax_burst.description)
self.comb += ax_burst.connect(ax_buffer.sink)
ax_burst2beat = AXIBurst2Beat(ax_buffer.source, ax_beat)
self.submodules += ax_buffer, ax_burst2beat

View file

@ -17,29 +17,28 @@ from litex.soc.interconnect.axi.axi_common import *
# AXI-Stream Definition ----------------------------------------------------------------------------
class AXIStreamInterface(stream.Endpoint):
def __init__(self, data_width=32, keep_width=0, id_width=0, dest_width=0, user_width=0):
def __init__(self, data_width=0, keep_width=None, id_width=0, dest_width=0, user_width=0, layout=None, name=None):
self.data_width = data_width
self.keep_width = keep_width
self.keep_width = data_width//8 if keep_width is None else keep_width
self.id_width = id_width
self.dest_width = dest_width
self.user_width = user_width
# Define Payload Layout.
payload_layout = [("data", data_width)]
if self.keep_width:
payload_layout += [("keep", keep_width)]
if layout is not None:
payload_layout = layout
else:
payload_layout = [("data", max(1, self.data_width))]
payload_layout += [("keep", max(1, self.keep_width))]
# Define Param Layout.
param_layout = []
if self.id_width:
param_layout += [("id", id_width)]
if self.dest_width:
param_layout += [("dest", dest_width)]
if self.user_width:
param_layout += [("user", user_width)]
param_layout = []
param_layout += [("id", max(1, self.id_width))]
param_layout += [("dest", max(1, self.dest_width))]
param_layout += [("user", max(1, self.user_width))]
# Create Endpoint.
stream.Endpoint.__init__(self, stream.EndpointDescription(payload_layout, param_layout))
stream.Endpoint.__init__(self, stream.EndpointDescription(payload_layout, param_layout), name=name)
def get_ios(self, bus_name="axi"):
# Control Signals.
@ -49,18 +48,10 @@ class AXIStreamInterface(stream.Endpoint):
Subsignal("tready", Pins(1)),
]
# Payload Signals.
subsignals += [Subsignal("tdata", Pins(self.data_width))]
if self.keep_width:
subsignals += [Subsignal("tkeep", Pins(self.keep_width))]
# Param Signals.
if self.id_width:
subsignals += [Subsignal("tid", Pins(self.id_width))]
if self.dest_width:
subsignals += [Subsignal("tdest", Pins(self.dest_width))]
if self.user_width:
subsignals += [Subsignal("tuser", Pins(self.user_width))]
# Payload/Params Signals.
channel_layout = (self.description.payload_layout + self.description.param_layout)
for name, width in channel_layout:
subsignals.append(Subsignal(f"t{name}", Pins(width)))
ios = [(bus_name , 0) + tuple(subsignals)]
return ios
@ -74,15 +65,11 @@ class AXIStreamInterface(stream.Endpoint):
r.append(pads.tlast.eq(self.last))
# Payload Signals.
r.append(pads.tdata.eq(self.data))
if self.keep_width:
r.append(pads.tkeep.eq(self.keep))
r.append(pads.tkeep.eq(self.keep))
# Param Signals.
if self.id_width:
r.append(pads.tid.eq(self.id))
if self.dest_width:
r.append(pads.tdest.eq(self.dest))
if self.user_width:
r.append(pads.tuser.eq(self.user))
r.append(pads.tid.eq(self.id))
r.append(pads.tdest.eq(self.dest))
r.append(pads.tuser.eq(self.user))
if mode == "slave":
# Control Signals.
r.append(self.valid.eq(pads.tvalid))
@ -90,13 +77,9 @@ class AXIStreamInterface(stream.Endpoint):
r.append(self.last.eq(pads.tlast))
# Payload Signals.
r.append(self.data.eq(pads.tdata))
if self.keep_width:
r.append(self.keep.eq(pads.tkeep))
r.append(self.keep.eq(pads.tkeep))
# Param Signals.
if self.id_width:
r.append(self.id.eq(pads.tid))
if self.dest_width:
r.append(self.dest.eq(pads.tdest))
if self.user_width:
r.append(self.user.eq(pads.tuser))
r.append(self.id.eq(pads.tid))
r.append(self.dest.eq(pads.tdest))
r.append(self.user.eq(pads.tuser))
return r

View file

@ -111,7 +111,7 @@ void romboot(void)
static void timer0_load(unsigned int value) {
timer0_en_write(0);
timer0_reload_write(0);
#ifndef CONFIG_DISABLE_DELAYS
#ifndef CONFIG_BIOS_NO_DELAYS
timer0_load_write(value);
#else
timer0_load_write(0);

View file

@ -7,6 +7,7 @@
# SPDX-License-Identifier: BSD-2-Clause
import os
import sys
import argparse
from litex.build.tools import replace_in_file
@ -23,6 +24,7 @@ def main():
# Copy contents to demo directory
os.system(f"cp {os.path.abspath(os.path.dirname(__file__))}/* demo")
os.system("chmod -R u+w demo") # Nix specific: Allow linker script to be modified.
# Update memory region.
replace_in_file("demo/linker.ld", "main_ram", args.mem)
@ -35,7 +37,8 @@ def main():
os.system("cp demo/demo.bin ./")
# Prepare flash boot image.
os.system("python3 -m litex.soc.software.crcfbigen demo.bin -o demo.fbi --fbi --little") # FIXME: Endianness.
python3 = sys.executable or "python3" # Nix specific: Reuse current Python executable if available.
os.system(f"{python3} -m litex.soc.software.crcfbigen demo.bin -o demo.fbi --fbi --little") # FIXME: Endianness.
if __name__ == "__main__":
main()

View file

@ -96,9 +96,9 @@ class TestAXI(unittest.TestCase):
yield
# DUT
ax_burst = stream.Endpoint(ax_description(32, 32))
ax_beat = stream.Endpoint(ax_description(32, 32))
dut = AXIBurst2Beat(ax_burst, ax_beat)
ax_burst = AXIStreamInterface(layout=ax_description(32), id_width=32)
ax_beat = AXIStreamInterface(layout=ax_description(32), id_width=32)
dut = AXIBurst2Beat(ax_burst, ax_beat)
# Generate DUT input (bursts).
prng = random.Random(42)