Touchscreen GUI, and some other helpful things

Posted on
  • I may be reinventing the wheel here, but I couldn't find any basic GUI elements for touchscreen devices like the HY-MiniSTM32V board I'm playing with. So, I started writing a simple, very basic GUI library.

    Right now, all I've done is create a pretty functional Button class, and only rectangular buttons. What you can do at the moment is place a button on the screen specifying location/size, color, and callbacks for press and release. The button will be drawn with the specified color, and the border and the button text drawn in inverse-color.

    When pressed, the button highlights by doing color invert (like a film negative) while pressed, calls the pressed callback, then redraws in its original state when released, and the released callback is called. Both callbacks are optional.

    Whole point is to easily draw buttons on a touchscreen that can then be used to control things.

    Code follows. As this file stands, it creates 4 demo buttons on the screen when loaded, one of which is labeled "BL off" which turns the backlight (on the HY-MiniSTM32V) off.

    echo(0);
    ts=require("Touchscreen");
    
    function unpack565RGB(rgb) {
      return {
        R : (rgb & 0b1111100000000000) >> 8,
        G : (rgb & 0b11111100000) >> 3,
        B : (rgb & 0b11111) << 3
      };
    }
    
    function unpack888RGB(rgb) {
      return {
        R : (rgb & 0xff0000) >> 16,
        G : (rgb & 0xff00) >> 8,
        B : (rgb & 0xff)
      };
    }
    
    Graphics.prototype.getRGB = function() {
      return unpack565RGB(this.getColor());
    };
    
    Graphics.prototype.setRGB = function() {
      if (arguments.length>1) c={R:arguments[0],G:arguments[1],B:argum­ents[2]};
      else c=arguments[0];
      this.setColor(c.R/255.0, c.G/255.0, c.B/255.0);
    };
    
    Graphics.prototype.getBgRGB = function() {
      return unpack555RGB(this.getBgColor());
    };
    
    Graphics.prototype.setBgRGB = function(r,g,b) {
      this.setBgColor(r/255.0, g/255.0, b/255.0);
    };
    
    var ON=true;
    var OFF=false;
    LCD.bl=function() {
      if (arguments.length===0) return digitalRead(B5);
      if (!arguments[0]) digitalWrite(B5,0);
      else digitalWrite(B5,1);
    };
    
    LCD.blon=function() {LCD.bl(ON);};
    LCD.bloff=function() {LCD.bl(OFF);};
    
    var buttons=[];
    function Button(b) {
      this.tl=b.tl;
      this.br=b.br;
      if (this.tl.x>this.br.x) this.tl.x=[this.br.x,this.br.x=this.tl.x­][0];
      if (this.tl.y>this.br.y) this.tl.y=[this.br.y,this.br.y=this.tl.y­][0];
      this.w=this.br.x-this.tl.x;
      this.h=this.br.y-this.tl.y;
      this.RGB=b.RGB;
      this.inverseRGB={R:255-this.RGB.R,G:255-­this.RGB.G,B:255-this.RGB.B};
      this.label=b.label;
      this.ID=b.ID;
      if (b.hasOwnProperty("onPress")) this.onPress=b.onPress;
      else this.onPress=undefined;
      if (b.hasOwnProperty("onRelease")) this.onRelease=b.onRelease;
      else this.onRelease=undefined;
      this.fontSize=Math.min(this.h-4,b.fontSi­ze);
      LCD.setFontVector(this.fontSize);
      while (LCD.stringWidth(this.label)>this.w-4) LCD.setFontVector(--this.fontSize);
      this.fontX=(this.tl.x+this.br.x-LCD.stri­ngWidth(this.label))/2;
      this.fontY=(this.tl.y+this.br.y-this.fon­tSize)/2;
      buttons.push(this);
      this.draw(this.RGB);
    }
                  
    Button.prototype.draw=function(c) {
      inverse={R:255-c.R,G:255-c.G,B:255-c.B};­
      LCD.setRGB(c);
      LCD.fillRect(this.tl.x,this.tl.y,this.br­.x,this.br.y);
      LCD.setRGB(inverse);
      LCD.drawRect(this.tl.x,this.tl.y,this.br­.x,this.br.y);
      LCD.setFontVector(this.fontSize);
      LCD.drawString(this.label,this.fontX,thi­s.fontY);
    };
    
    Button.prototype.hit=function(x,y) {
      if ((x<=this.br.x) && (x>=this.tl.x) && (y<=this.br.y) && (y>=this.tl.y)) return true;
      else return false;
    };
    
    var curBtn;
    function doButtons(state,x,y) {
      if (!curBtn) {
        for (var i=buttons.length-1; i>=0; i--) if (buttons[i].hit(x,y)) {
          curBtn=buttons[i];
          curBtn.draw(curBtn.inverseRGB);
          if (curBtn.onPress) curBtn.onPress(x,y);
        }
      }
      else {
        if (state) return;
        curBtn.draw(curBtn.RGB);
        if (curBtn.onRelease) curBtn.onRelease(x,y);
        curBtn=undefined;
      }
    }
    
    function onTouch(x,y) {doButtons(x?true:false,LCD.getWidth()-x­,y);}
    ts.connect(onTouch);
    
    function btnPress() {console.log(this.ID+" pressed");}
    function btnRelease() {console.log(this.ID+" released");}
    
    var btns=[
      {tl:{x:50,y:100},br:{x:170,y:150},RGB:{R­:255,G:0,B:0},label:"Button",fontSize:20­,onPress:btnPress,onRelease:btnRelease,I­D:"btn0"},
      {tl:{x:10,y:40},br:{x:130,y:90},RGB:{R:0­,G:0,B:0},label:"BL off",fontSize:20,onPress:function(){LCD.­bloff();},ID:"btn1"},
      {tl:{x:280,y:100},br:{x:320,y:120},RGB:{­R:255,G:255,B:0},label:"Button",fontSize­:20,onPress:btnPress,onRelease:btnReleas­e,ID:"btn2"},
      {tl:{x:100,y:160},br:{x:220,y:185},RGB:{­R:255,G:0,B:255},label:"Butt
    
  • Check this thread out: UI Module w/ Buttons, Checkboxes,... for Display w/ Touchscreen. It uses a bit different code for the touch screen, but the software interface should be the same...

    The challenge was to make the behavior fast. In my case the communication with the display was serial. Using parallel interface would make it faster.

  • Forgot to mention there are some helper function extensions to the Graphics class in there too, dealing with 24 and 16 bit packed RGB triples. Feel free to add these to the Graphics class in the future if want to, Gordon :-) :-)

    The 565 and 888 RGB packing/unpacking functions are particularly handy.

  • That's very nice work! Did you take it any further than the buttons and checkboxes?

    I'm taking a little different approach with object classes, so the UI elements are easily accessed and can be modified after creation. I haven't examined your code closely (and will), so there may be benefit in continuing this project...

  • For the overall view (ui), I followed the DOM pattern and for the individual node - UI element - the box model, with the the ui being the DOM-'tree'. I like the object approach more than the array approach I took, but I did it intentionally - for performance and compactness/memory footprint. Indexes work like property names, as 'documented' in code for ui element (object - button, check(box), ...) in ui.elts[] (DOM):

    // 0=idx, 1=active, 2=type, 3=state, 4..7=x..y2, 8=brdrClr, 9=callback
    

    The ui three 'understands' touch screen events, passes them on to the element under the x/y coordinates (figured out by iin() - is 'in', passes control to the 'in' ui element, and lets it act on touch release like 'clicked' or 'tapped' (it also manages the drag: you touch in button 'aBtn' and release somewhere else: 'aBtn' has not been clicked/tapped - just as in the browser.

    The color values are basic color numbers - or when the ui.clrs[] table is populated, they are index into this colors table.

    More O-O: hink of ui as the factory and at the same time superclass that implements the UI element classes' behavior.

    Currently, the factory provides only only btn(...) and chk(box)(...) constructor methods and implements the behavior for these 'classes'. Arrays are a much more light-weight implementation than actual objects and by holding the type/class information in them, method branch out is possible (slow case statement, and slower the more types/classes there are), or - to be fast - the actual function/method is held on as action property (lambda) - just like callbacks are as well.

    I did not go beyond yet... but that's where its worth to extend. For a new view element:

    1. add a factory/constructor method
    2. add a draw method
    3. as needed, add ui specific method to for behavior

    Since drag is implemented, sliders are almost there, and so would be gauges.

    You may have noticed that for performance I try to use overlaying rectangles to get as fast as possible and with the least information border and fill. As said earlier: speed is a challenge.

    On construction UI element is returned and can be modified as needed, as well as in the event, it passes itself in the callback to allow access to the (ui)e(lement) from within the callback.

    If you generalize in your code btns[] and doBttons() and tuck it away (encapsulate/hide implementation) in a ui singleton - including some of your other globals, 'we' are not much different. Btw, I wondered where the Button() constructor is used... do I miss something, or did not all code make it into the post? Can you share a pic of the running UI?

  • Embarassing... yes, some code got cut off -- note line 116 ends with '...label:"Butt'! HAHAHA! I assure everyone I did not have an example button with the text "Butt" on the screen :-)

    Here's the complete end of the code, the btns array initialization repeated:

    var btns=[
      {tl:{x:50,y:100},br:{x:170,y:150},RGB:{R­:255,G:0,B:0},label:"Button",fontSize:20­,onPress:btnPress,onRelease:btnRelease,I­D:"btn0"},
      {tl:{x:10,y:40},br:{x:130,y:90},RGB:{R:0­,G:0,B:0},label:"BL off",fontSize:20,onPress:function(){LCD.­bloff();},ID:"btn1"},
      {tl:{x:280,y:100},br:{x:320,y:120},RGB:{­R:255,G:255,B:0},label:"Button",fontSize­:20,onPress:btnPress,onRelease:btnReleas­e,ID:"btn2"},
      {tl:{x:100,y:160},br:{x:220,y:185},RGB:{­R:255,G:0,B:255},label:"Button",fontSize­:20,onPress:btnPress,onRelease:btnReleas­e,ID:"btn3"}
    ];
    
    btns.forEach(function(c){a=new Button(c);});
                 
    
    

    Buttons are created in line 8 above. Since this was simply code to get some buttons on the screen to test the GUI code, I'm not saving the references to the 4 buttons anywhere. In a real app I'd keep those references when necessary.

    I agree completely that our two approaches are really (stripped of minor differences) fundamentally the same. However, ignoring performance considerations (:-)) leaves me more flexibility to implement a more convenient OO model approach. Also makes the code easier to understand (that is, code using the GUI).

    There's good reasons for both approaches... Ease-of-use OO-biased design where performance isn't critical (things like footprint, or where there is a heavy on-going CPU load for the application, so minimizing UI processing overhead is really needed). Performance-oriented, efficient running code where CPU cycles and RAM are precious.

    Alternatively, for applications like a distributed WS2812B light strip controller, performance isn't an issue at all -- that's basically my case. I'm implementing the controller on a HY-MiniSTM32V board with touchscreen display, then each strip has its own dedicated ESP8266-12E running Espruino, everything connected over wifi on my home network.

    So, for me, simplicity in writing the controller code (after the effort of creating the GUI library!) is the driving value proposition.

    Is there a screencap function for displays? Otherwise, I'll have to shoot a picture and post it.

  • Looks good! The RGB functions are tricky - right now the Graphics lib doesn't really understand about R, G and B in a meaningful way, as a lot of displays are different. Even with 16 bits, you get different ordering of colour channels - so there would be some extra complexity and configuration required.

    For the sake of saving a bit of memory, you could convert all your RGB triples into 16 bit values when you store them in the button class though? It makes doing an invert nice and easy too, with just col^0xFFFF ;)

    I'm afraid there's no screencap - I think you can get the pixel data back on those displays using getPixel (on some you can't), but it'll be a slow and painful process :)

  • ...and getPixel(... is not even implemented... at least at the time I used it. I wrote my own retrieval (and so did @JumJum).

    Going for a bit more elaborate UI, for example for having drop-down menu / overlay, the current serial connectivity is too slow and memory is too scarce... and repainting the whole screen is even slower (if even possible... because state in source form would have to be preserved).

    Check out this conversation about Graphics.getPixel(x,y) on ILI9341 Module for ILI9341 LCD CONTROLLER and Resistive Touchscreen directly (no touch controller), especially post #12, where I'm talking about calibration of the resistive touch screen. For calibration (at any time), 5 squares would overlay the current display to provide defined touch targets. Before the display the underlaying area is saved and afterwards restored.

    After that 'slow' experience my excitement about the display and its display capabilities weren't of prime interest anymore. The display would need its own controller implementation beyond just managing the display memory and fixed font usage. I struggled with the display update speed as well when going for DIY Marine GPS using enhanced GPS Module, u-blox NEO-6M GPS receiver, and ILI9341 Module controlled 2.8" Color TFT LCD. One second was not enough to update longitude, lattitude, number of satelites, time, etc... I had to resort to update only changed data, and of that only a section, and the next second, another section, etc.

    My initial intent with the display was to write the PacMan game... but the speed let this project fell into a deep sleep. The only game I then successfully implemented was the 15 out of 16 number puzzle game Puzzle16+ Game on ILI9341 2.8" 262K Color TFT LCD w/ Resistive Touch Screen. I'm sure you can load the code and experience the behavior yourself... I optimized already the implementation by choosing what is working the fastest...

  • ...and getPixel(... is not even implemented...

    Only when you access a display you wired up yourself. On the HY boards I think it works fine, since they use a special driver for the display that's written in C. It's an awful lot faster too.

  • HY boards... they use a special driver for the display that's written in C. It's an awful lot faster too

    Makes absolutely sense... and if enough memory is available to save and restore graphics areas fast enough, nice UIs are well possible. I - respectively -my Espruino code puts up with bare metal... for both, display and touch screen, which shows clearly limits to non-native (and serial) implementations.

    Above may sound negative... it is not at all... I think the Espruino built-in Graphics library is a excellent piece of work - engineering AND art - because it can handle all kinds of graphic display hardware. It just just hosts the right amount of (middle layer) functionality to enable graphics what ever is used. The code is complemented / completed with the display hardware specific modules to handle display controller specific implementations - like adapters. This is last but not least confirmed by @Gordon's post #7 in this very same conversation. Writing reusable components, which starts with the most difficult part - defining the scope of the module - is a challenge. It is always easy to find a short cut the - at first sight - yields better results, but is just one of a kind implementation... unfortunately with limited resources - cycles and memory - this may become the final particular solution, but moves completely away from being - or becoming - a platform.

  • I've been satisified so far with the speed of the HY display/MCU combination, and the driver that Gordon leveraged in to run it. It uses FSMC to talk to the board and transfers data at 16 bit words over a parallel I/F, rather than SPI or some other serial interface, so it's quite fast.

    Latest progress attached for anyone that wants to play with this. Since this code is in development and evolving, there are some naming changes (unpack565RGB is now unpack16RGB, for example, among other changes), some things have been completely rewritten, etc.

    Some new stuff: polarFillPoly function that provides for creating a filled polygon with a set of vertices in polar coordinates (radius, angle), and a specified origin. Makes it simple to draw regular polygons, and circles.

    Also a set of duplicated Graphics functions with "2" appended (e.g. fillRect2()) that can be used in place of the standard functions, but allow for the inclusion of a color parameter to set the drawing color.

    Plan is to create separate modules for each control so that only the code for the controls being used is loaded into memory via 'require' statements in the code using the GUI.

    Anyway, current code -- messy state that it is, not everything working -- attached, for anyone that wants to play around with this current state of progress. I'll be finishing Button, Checkbox, and implementing a RadioButton today.

    At U$32, the HY 3.2" display is a very attractive solution for projects that need user interaction... nothing beats a color graphics touchscreen for interacting with a user :-) Hence my interest in developing a GUI library/module. My needs/wants are enough GUI for me to put in the effort, and hopefully having something that others can and will use too.

    In the end, having a simple, flexible, easy to use GUI for the HY boards may stimulate some use of them, expanding the use of Espruino, creating more support, etc. I know Gordon et. al. do not get financial support directly when people use these boards, but I'd expect good people to chip in via donate... That's what I do :-)

    Plus, developing this code is fun.


    1 Attachment

  • Also a set of duplicated Graphics functions with "2" appended (e.g. fillRect2()) that can be used in place of the standard functions, but allow for the inclusion of a color parameter to set the drawing color.

    Why not create a pen object that you set the colour on, and then call the same function with the same arguments on that object. It's constructor could take the lcd as as a param and a colour.

  • @Wilberforce: Could you elaborate with some more detail? I'm not clear on what you're suggesting.

    My intent was to eliminate the extra line of code setting the color before calling the drawing commands, as I need to change colors regularly for animating various widgets, and the Button widget (which I'm working on right now) has 3D effects (looks raised, then pushes in when pressed).

    I'm interested in your suggestion -- I just need to understand it :-)

  • One other comment I'll add is I'm certain I'm doing much of this "wrong" in the sense that I'm overbloating the code with flexibility and functionality. I'm new to the MCU world, so am not particularly sensitive to keeping things compact and to the essentials for a small memory footprint.

    I plan to do a bunch of optimization after I get this all working, throwing out stuff that really isn't necessary in this environment. Also, as mentioned above, the plan is to make a basket of individual modules that can be loaded as needed for particular UI elements, so not all the code needs to be taking up space if it isn't used.

  • JavaScript's dynamic nature enable very elegantly to have a sum of code compose of selectable components. For example, a UI (singleton) instance can be complemented / expanded with multiple methods and properties that are needed for a particular widget, for example, a button, or for a check box. If more sophisticated, the UI object acts as a factory with passing the widget class name as a string...

    In regard of overbloating: with limited resources, the conceptually clean, o-o, robust and flexible design has to be transformed into a resource adequate implementation design. It is nothing wrong to think o-o bloated but very clear. It is though a question whether the implementation should be the literal translation of the design. Furthermore, thinking in objects is always better than in functions. How an object is implemented though, may vary. Even though JSON is already terse compared to XML, it is still way bulkier than an array of values.

    Think of the array as a the static properties of the object with the index numbers mapping to variable names. the first value in the array can hold to the type (class) of the object and is used in the behavioral object (singleton) to pick the appropriate method - the method that goes with the particular array of values... This is not only the most compact way of saying things - especially in Espruino where the source is executed - but also the fastest in execution.

  • Posting up a video of my enhancements to the HD44780 driver reminded me that I never got back here and posted a video of the GUI in action. So... here it is!

    Shown here is a demo of the Button, Checkbox, and Slider classes. There's a layout bug in the Checkbox class that I still need to fix. The slider can have the slide hidden so it can be used to manage more interesting "sliding" things, like shown here with the addressible light strip... pretty cool, eh?

    Button supports a flat and a 3D style.

    I asked a question elsewhere today about modules, which is directly related to this work. The entire GUI code, even minified, is quite big, and the STM32F1 chips used on the Haoyu MiniSTM32 display boards have about half the RAM of the F4's used on the Pico. So I'm going to break it up into individual modules for each widget type (Button, Slider, etc.). Then, only the elements actually being used can be loaded, using only the space needed.

    Any and all feedback welcome! This is a "fun" project for me I've been tinkering with as I get to know Espruino and the MCU world. I've ordered about U$50 worth of various sensors and things to play with, so more funny business from me over the coming months.

    I just gotta get off my lazy tusch and do one of the REAL projects I've been thinking about. I'm having too much fun experimenting :-) :-)
    https://youtu.be/FkOfEuJQW-g

  • Looking pretty snappy... noticeable that there is a native integration between Espruino and display controller.

    What I notice is the push down already calls the function... I'd expect that to happen only after button release... because what happens when moving out of the button while button is pressed?

    Every touch UI goes for a tap - touch and release... because the touch and move (drag/swipe) - within a defined time - is something else... BUT: touching longer than a (that configurable) defined time - long touch - is interpreted like a right-click (and disabling the drag/swipe).

    The mentions above have an impact on the behavior of the slider: not 'grabing' the slider button but tapping to the left or right are interpreted as a set after release (tap). Moving out of the active element allows to cancel the touch down... ;-)

  • Actually, the Button implementation has 3 callbacks -- onPress, onHold, and onRelease. What you're seeing on the action of the buttons is simply because I implemented the action in the onPress callback for those two buttons.

    However, you point out unimplemented functionality that I just went and implemented -- a check to see if the touch was still in the button area on release in order to call the onRelease callback... I agree that onRelease shouldn't be called if the finger isn't on the button anymore when lifted from the screen.

    I'm leaving onHold alone -- i.e. once the press is detected, the "focus" remains on that control even if the finger leaves the area of the control, and continues to affect it through the onHold callback. This allows seeing where the slider is more easily while moving it if you can still move it back and forth while your finger is above or below the actual control, after it's been "activated" with a press.

    Here's how easy it is to set up one of these buttons, if the GUI is available (this is the 3D purplish one):

    new Button({
    	area: [163,5,100,50],
    	text: 'LED2 on',
    	color: new Uint8Array([1, 0, 128, 153]),
    	onPress: function() {
    		LED2.write(!LED2.read());
    		this.text = LED2.read() ? "LED2 off" : "LED2 on";
    	},
    });
    
  • The plain assignment of the button text took me first a bit by surprise... I thought @Gordon implemented the set/get on property of an object... but going back to the clip I noticed that the redraw of the button on release will put the text in place...

    Now we are back to some thoughts we exchanged earlier in the thread... As I interpret, the push redraws the button in pressed state - handled obviously just before the onPress registered function is invoked... no splitting hairs intended here! It just shows how challenging UIs are in regard of communication with the user, especially when it comes to instant feedback. I'm glad for the native implementation, it gives the user the feel of great responsiveness!

  • Post a reply
    • Bold
    • Italics
    • Link
    • Image
    • List
    • Quote
    • code
    • Preview
About

Touchscreen GUI, and some other helpful things

Posted by Avatar for dwallersv @dwallersv

Actions