int dx,dy;
rx=Xrottable[stateX+1]-Xrottable[stateX];
-ry=Yrottable[stateX+1]-Yrottable[stateY];
+ry=Yrottable[stateY+1]-Yrottable[stateY];
for (x=0;x<=rx;x++)
}
+#ifdef HAVE_XDBE_EXTENSION
+static XErrorHandler old_handler = 0;
+static Bool got_BadMatch = False;
+static int
+BadMatch_ehandler (Display *dpy, XErrorEvent *error)
+{
+ if (error->error_code == BadMatch) {
+ got_BadMatch = True;
+ return 0;
+ } else if (old_handler)
+ return old_handler(dpy, error);
+ else
+ exit(1);
+}
+#endif /* HAVE_XDBE_EXTENSION */
+
+
Bool InitializeAll(void)
{
XGCValues xgcv;
usedouble=False;
}
}
-if (usedouble) win[1]=XdbeAllocateBackBufferName(dpy,win[0],XdbeUndefined);
+if (usedouble)
+ {
+ /* We need to trap an X error when calling XdbeAllocateBackBufferName,
+ because there is no way to know beforehand whether the call will
+ succeed! This is a totally fucked design, but the man page says:
+
+ ERRORS
+ BadMatch
+ The specified window is not an InputOutput window or
+ its visual does not support DBE.
+
+ With SGI's O2 X server, some visuals support double-buffering (the
+ 12-bit pseudocolor visuals) and others yield a BadMatch error, as
+ documented.
+
+ However, it doesn't matter, because using the DBUF extension seems
+ to make it run *slower* instead of faster anyway.
+
+ -- jwz, 1-Jul-98
+ */
+ XSync(dpy, False);
+ old_handler = XSetErrorHandler (BadMatch_ehandler);
+ got_BadMatch = False;
+ win[1] = 0;
+ win[1] = XdbeAllocateBackBufferName(dpy,win[0],XdbeUndefined);
+ XSync(dpy, False);
+ XSetErrorHandler (old_handler);
+ old_handler = 0;
+ XSync(dpy, False);
+ if (got_BadMatch || !win[1])
+ {
+ fprintf(stderr, "%s: visual 0x%x does not support double-buffering.\n",
+ progname, XVisualIDFromVisual(xgwa.visual));
+ usedouble = False;
+ win[1] = win[0];
+ got_BadMatch = False;
+ }
+ }
#endif /* HAVE_XDBE_EXTENSION */
delay=get_integer_resource("delay","Integer");