import os
import time
from collections import deque
from typing import Optional
import ipywidgets
#import jpy_canvas
from ipycanvas import Canvas
import ipyevents as ipe
import numpy as np
from ipywidgets import IntSlider, VBox, HBox, Checkbox, Output, Text, RadioButtons, Tab
from numpy import array
import flatland.utils.rendertools as rt
from flatland.core.grid.grid4_utils import mirror
from flatland.envs.agent_utils import EnvAgent
from flatland.envs.line_generators import sparse_line_generator
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator, empty_rail_generator
from flatland.utils.editor_interfaces import AbstractController, AbstractModel, AbstractView
[docs]
class View(AbstractView):
""" The Jupyter Editor View - creates and holds the widgets comprising the Editor.
"""
def __init__(self, editor:AbstractModel, sGL="MPL", screen_width=800, screen_height=800):
self.editor = self.model = editor
self.sGL = sGL
self.xyScreen = (screen_width, screen_height)
self.controller: Optional[AbstractController] = None
[docs]
def display(self):
self.wOutput.clear_output()
return self.wMain
[docs]
def clear_output(self, oDummy):
self.log("clear output", oDummy)
self.wOutput.clear_output()
[docs]
def init_canvas(self):
# update the rendertool with the env
self.new_env()
self.oRT.render_env(show=False, show_observations=False, show_predictions=False, show_rowcols=True)
img = self.oRT.get_image()
# NCW (new canvas widget)
#self.wImage = jpy_canvas.Canvas(img)
self.wImage = Canvas(width=img.shape[1], height=img.shape[0])
# NCW
#self.yxSize = self.wImage.data.shape[:2]
self.yxSize = img.shape[:2]
# NCW - not sure if we need a "writableData" any more
#self.writableData = np.copy(self.wImage.data) # writable copy of image - wid_img.data is somehow readonly
self.wImage.put_image_data(img)
self.writableData = np.copy(img)
# Register Canvas event handler
# NCW:
#self.wImage.register_move(self.controller.on_mouse_move)
#self.wImage.register_click(self.controller.on_click)
oEvent = ipe.Event(source=self.wImage, watched_events=['mousemove', 'click'])
self.yxBase = self.oRT.gl.yxBase
self.nPixCell = self.oRT.gl.nPixCell
oEvent.on_dom_event(self.controller.handle_event)
[docs]
def draw_stroke(self):
pass
[docs]
def new_env(self):
""" Tell the view to update its graphics when a new env is created.
"""
self.oRT = rt.RenderTool(self.editor.env, gl=self.sGL, show_debug=True,
screen_height=self.xyScreen[1], screen_width=self.xyScreen[0])
[docs]
def redraw(self):
""" Redraw the environment and agents.
This will erase the current image and draw a new one.
See also redisplay_image()
"""
with self.wOutput:
self.oRT.set_new_rail()
self.model.env.reset_agents()
for a in self.model.env.agents:
if hasattr(a, 'old_position') is False:
a.old_position = a.position
if hasattr(a, 'old_direction') is False:
a.old_direction = a.direction
self.oRT.render_env(show_agents=True,
show_inactive_agents=True,
show=False,
selected_agent=self.model.selected_agent,
show_observations=False,
show_rowcols=True,
)
img = self.oRT.get_image()
#self.wImage.data = img
#self.writableData = np.copy(self.wImage.data)
self.writableData = np.copy(img)
self.wImage.put_image_data(img)
# the size should only be updated on regenerate at most
#self.yxSize = self.wImage.data.shape[:2]
return img
[docs]
def redisplay_image(self):
""" Redisplay the writable image in the Canvas.
Called during image editing, when minor changes are made directly to the image,
between redraws.
"""
#if self.writableData is not None:
# This updates the image in the browser to be the new edited version
# self.wImage.data = self.writableData
self.wImage.put_image_data(self.writableData)
[docs]
def drag_path_element(self, x, y):
""" Add another x,y point to a drag gesture.
Just draw a black square on the in-memory copy of the image.
With ipyCanvas, we need to adjust the Event x,y coordinates to image x,y.
"""
#
yxRectEv = array(self.controller.getBoundingRectYX())
yxPointEv = array([y, x])
yxPointImg = np.clip(yxPointEv * self.yxSize / yxRectEv, 0, self.yxSize).astype(int)
#if x > 10 and x < self.yxSize[1] and y > 10 and y < self.yxSize[0]:
#self.writableData[(y - 2):(y + 2), (x - 2):(x + 2), :3] = 0
self.writableData[yxPointImg[0]-2:yxPointImg[0]+2, # y
yxPointImg[1]-2:yxPointImg[1]+2, # x
:3] = 0 # color
self.log("drag_path_element: ", x, y)
#else:
# self.log("Drag out of bounds: ", x, y)
[docs]
def xy_to_rc(self, x, y):
""" Convert from x,y coordinates to row,col coordinates.
This is used to convert mouse clicks to row,col coordinates.
"""
yxRect = array(self.controller.getBoundingRectYX())
yxPoint = array(((array([y, x]) - self.yxBase)))
rcRect = array([self.model.env.height, self.model.env.width])
# Scale factors for converting from pixels (y,x) to cells (r,c)
#nY = np.floor((self.yxSize[0] - self.yxBase[0]) / self.model.env.height)
#nX = np.floor((self.yxSize[1] - self.yxBase[1]) / self.model.env.width)
rc_cell = np.floor(np.clip(yxPoint / yxRect * rcRect, [0,0], rcRect - 1)).astype(int)
self.log("xy_to_rc: ", x, y, " -> ", rc_cell, type(rc_cell))
# Row from y
#rc_cell[0] = max(0, min(np.floor(yxPoint[0] / nY), self.model.env.height - 1))
# Column from x
#=rc_cell[1] = max(0, min(np.floor(yxPoint[1] / nX), self.model.env.width - 1))
# Using numpy arrays for coords not currently supported downstream in the env, observations, etc
return tuple(rc_cell)
[docs]
def log(self, *args, **kwargs):
if self.wOutput:
with self.wOutput:
print(*args, **kwargs)
else:
print(*args, **kwargs)