X-Git-Url: http://git.hungrycats.org/cgi-bin/gitweb.cgi?p=xscreensaver;a=blobdiff_plain;f=hacks%2Fwebcollage;h=cd3fa4319bbf911a96d56e7323ed3320f84eb98c;hp=53625101adb65c11db8a978b23dfb7ef1fbfccdf;hb=c494fd2e6b3b25582375d62e40f4f5cc984ca424;hpb=c28aecf9fc41e3a03494bacf7279745425e2fa18 diff --git a/hacks/webcollage b/hacks/webcollage index 53625101..cd3fa431 100755 --- a/hacks/webcollage +++ b/hacks/webcollage @@ -1,6 +1,6 @@ #!/usr/bin/perl -w # -# webcollage, Copyright (c) 1999-2002 by Jamie Zawinski +# webcollage, Copyright (c) 1999-2005 by Jamie Zawinski # This program decorates the screen with random images from the web. # One satisfied customer described it as "a nonstop pop culture brainbath." # @@ -15,10 +15,17 @@ # To run this as a display mode with xscreensaver, add this to `programs': # -# default-n: webcollage -root \n\ -# default-n: webcollage -root -filter 'vidwhacker -stdin -stdout' \n\ - - +# webcollage -root +# webcollage -root -filter 'vidwhacker -stdin -stdout' +# +# +# You can see this in action at http://www.jwz.org/webcollage/ -- +# it auto-reloads about once a minute. To make a page similar to +# that on your own system, do this: +# +# webcollage -size '800x600' -imagemap $HOME/www/webcollage/index +# +# # If you have the "driftnet" program installed, webcollage can display a # collage of images sniffed off your local ethernet, instead of pulled out # of search engines: in that way, your screensaver can display the images @@ -27,7 +34,7 @@ # Driftnet is available here: http://www.ex-parrot.com/~chris/driftnet/ # Use it like so: # -# default-n: webcollage -root -driftnet \n\ +# webcollage -root -driftnet # # Driftnet is the Unix implementation of the MacOS "EtherPEG" program. @@ -53,34 +60,64 @@ use bytes; # Larry can take Unicode and shove it up his ass sideways. my $progname = $0; $progname =~ s@.*/@@g; -my $version = q{ $Revision: 1.96 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/; -my $copyright = "WebCollage $version, Copyright (c) 1999-2002" . +my $version = q{ $Revision: 1.133 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/; +my $copyright = "WebCollage $version, Copyright (c) 1999-2005" . " Jamie Zawinski \n" . - " http://www.jwz.org/xscreensaver/\n"; - - - -my @search_methods = ( 40, "imagevista", \&pick_from_alta_vista_images, - 30, "altavista", \&pick_from_alta_vista_text, - 19, "yahoorand", \&pick_from_yahoo_random_link, - 9, "lycos", \&pick_from_lycos_text, - 2, "yahoonews", \&pick_from_yahoo_news_text, - - # Hotbot gives me "no matches" just about every time. - # Then I try the same URL again, and it works. I guess - # it caches searches, and webcollage always busts its - # cache and time out? Or it just sucks. - # 0, "hotbot", \&pick_from_hotbot_text, - - # Google asked (nicely) for me to stop searching them. - # 0, "googlenums", \&pick_from_google_image_numbers, - # 0, "googleimgs", \&pick_from_google_images, - + " http://www.jwz.org/webcollage/\n"; + + + +my @search_methods = ( 56, "altavista", \&pick_from_alta_vista_random_link, + 11, "livejournal", \&pick_from_livejournal_images, + 5, "yahoorand", \&pick_from_yahoo_random_link, + 10, "googlephotos", \&pick_from_google_image_photos, + 5, "googleimgs", \&pick_from_google_images, + 3, "googlenums", \&pick_from_google_image_numbers, + 2, "flickr_recent", \&pick_from_flickr_recent, + 8, "flickr_random", \&pick_from_flickr_random, + + # In Apr 2002, Google asked me to stop searching them. + # I asked them to add a "random link" url. They said + # "that would be easy, we'll think about it" and then + # never wrote back. Booo Google! Booooo! So, screw + # those turkeys, I've turned Google searching back on. + # I'm sure they can take it. (Jan 2005.) + + # Jan 2005: Yahoo fucked up their search form so that + # it's no longer possible to do "or" searches on news + # images, so we rarely get any hits there any more. + # + # 0, "yahoonews", \&pick_from_yahoo_news_text, + + # Dec 2004: the ircimages guy's server can't take the + # heat, so he started banning the webcollage user agent. + # I tried to convince him to add a lighter-weight page to + # support webcollage better, but he doesn't care. + # + # 0, "ircimages", \&pick_from_ircimages, + + # Dec 2002: Alta Vista has a new "random link" URL now. + # They added it specifically to better support webcollage! + # That was super cool of them. This is how we used to do + # it, before: + # + # 0, "avimages", \&pick_from_alta_vista_images, + # 0, "avtext", \&pick_from_alta_vista_text, + + # This broke in 2004. Eh, Lycos sucks anyway. + # + # 0, "lycos", \&pick_from_lycos_text, + + # This broke in 2003, I think. I suspect Hotbot is + # actually the same search engine data as Lycos. + # + # 0, "hotbot", \&pick_from_hotbot_text, ); # programs we can use to write to the root window (tried in ascending order.) # my @root_displayers = ( + "xscreensaver-getimage -root -file", "chbg -once -xscreensaver -max_size 100", "xv -root -quit -viewonly +noresetroot -quick24 -rmode 5" . " -rfg black -rbg black", @@ -102,6 +139,8 @@ my %cookies = ( "www.nytimes.com" => 'NYT-S=18cHMIlJOn2Y1bu5xvEG3Ufuk6E1oJ.' . 'FMxWaQV0igaB5Yi/Q/guDnLeoL.pe7i1oakSb' . '/VqfdUdb2Uo27Vzt1jmPn3cpYRlTw9', + + "ircimages.com" => 'disclaimer=1', ); @@ -128,19 +167,20 @@ my $opacity = 0.85; # my %poisoners = ( "die.net" => 1, # 'l33t h4ck3r d00dz. - "genforum.genealogy.com" => 1, # Cluttering altavista with human names. - "rootsweb.com" => 1, # Cluttering altavista with human names. + "genforum.genealogy.com" => 1, # Cluttering avtext with human names. + "rootsweb.com" => 1, # Cluttering avtext with human names. "akamai.net" => 1, # Lots of sites have their images on Akamai. - # But those are pretty much all banners. + "akamaitech.net" => 1, # But those are pretty much all banners. # Since Akamai is super-expensive, let's # go out on a limb and assume that all of # their customers are rich-and-boring. - "bartleby.com" => 1, # Dictionary, cluttering altavista. - "encyclopedia.com" => 1, # Dictionary, cluttering altavista. - "onlinedictionary.datasegment.com" => 1, # Dictionary, cluttering altavista. - "hotlinkpics.com" => 1, # Porn site that has poisoned imagevista + "bartleby.com" => 1, # Dictionary, cluttering avtext. + "encyclopedia.com" => 1, # Dictionary, cluttering avtext. + "onlinedictionary.datasegment.com" => 1, # Dictionary, cluttering avtext. + "hotlinkpics.com" => 1, # Porn site that has poisoned avimages # (I don't see how they did it, though!) "alwayshotels.com" => 1, # Poisoned Lycos pretty heavily. + "nextag.com" => 1, # Poisoned Alta Vista real good. ); @@ -155,12 +195,54 @@ my %warningless_sites = ( "www.geocities.com" => 1, "www.angelfire.com" => 1, "members.aol.com" => 1, + "img.photobucket.com" => 1, + "pics.livejournal.com" => 1, + "tinypic.com" => 1, + "flickr.com" => 1, "yimg.com" => 1, # This is where dailynews.yahoo.com stores "eimg.com" => 1, # its images, so pick_from_yahoo_news_text() # hits this every time. + "images.quizfarm.com" => 1, # damn those LJ quizzes... + "images.quizilla.com" => 1, + "images.quizdiva.net" => 1, + "driftnet" => 1, # builtin... + "local-directory" => 1, # builtin... +); + + +# For decoding HTML-encoded character entities to URLs. +# +my %entity_table = ( + "apos" => '\'', + "quot" => '"', "amp" => '&', "lt" => '<', "gt" => '>', + "nbsp" => ' ', "iexcl" => '¡', "cent" => '¢', "pound" => '£', + "curren" => '¤', "yen" => '¥', "brvbar" => '¦', "sect" => '§', + "uml" => '¨', "copy" => '©', "ordf" => 'ª', "laquo" => '«', + "not" => '¬', "shy" => '­', "reg" => '®', "macr" => '¯', + "deg" => '°', "plusmn" => '±', "sup2" => '²', "sup3" => '³', + "acute" => '´', "micro" => 'µ', "para" => '¶', "middot" => '·', + "cedil" => '¸', "sup1" => '¹', "ordm" => 'º', "raquo" => '»', + "frac14" => '¼', "frac12" => '½', "frac34" => '¾', "iquest" => '¿', + "Agrave" => 'À', "Aacute" => 'Á', "Acirc" => 'Â', "Atilde" => 'Ã', + "Auml" => 'Ä', "Aring" => 'Å', "AElig" => 'Æ', "Ccedil" => 'Ç', + "Egrave" => 'È', "Eacute" => 'É', "Ecirc" => 'Ê', "Euml" => 'Ë', + "Igrave" => 'Ì', "Iacute" => 'Í', "Icirc" => 'Î', "Iuml" => 'Ï', + "ETH" => 'Ð', "Ntilde" => 'Ñ', "Ograve" => 'Ò', "Oacute" => 'Ó', + "Ocirc" => 'Ô', "Otilde" => 'Õ', "Ouml" => 'Ö', "times" => '×', + "Oslash" => 'Ø', "Ugrave" => 'Ù', "Uacute" => 'Ú', "Ucirc" => 'Û', + "Uuml" => 'Ü', "Yacute" => 'Ý', "THORN" => 'Þ', "szlig" => 'ß', + "agrave" => 'à', "aacute" => 'á', "acirc" => 'â', "atilde" => 'ã', + "auml" => 'ä', "aring" => 'å', "aelig" => 'æ', "ccedil" => 'ç', + "egrave" => 'è', "eacute" => 'é', "ecirc" => 'ê', "euml" => 'ë', + "igrave" => 'ì', "iacute" => 'í', "icirc" => 'î', "iuml" => 'ï', + "eth" => 'ð', "ntilde" => 'ñ', "ograve" => 'ò', "oacute" => 'ó', + "ocirc" => 'ô', "otilde" => 'õ', "ouml" => 'ö', "divide" => '÷', + "oslash" => 'ø', "ugrave" => 'ù', "uacute" => 'ú', "ucirc" => 'û', + "uuml" => 'ü', "yacute" => 'ý', "thorn" => 'þ', "yuml" => 'ÿ', + "ndash" => '-', "mdash" => "--" ); @@ -189,7 +271,7 @@ my $verbose_exec = 0; # diagnostics about executing programs my $report_performance_interval = 60 * 15; # print some stats every 15 minutes my $http_proxy = undef; -my $http_timeout = 30; +my $http_timeout = 20; my $cvt_timeout = 10; my $min_width = 50; @@ -201,6 +283,8 @@ my $min_gif_area = (120 * 120); my $no_output_p = 0; my $urls_only_p = 0; +my $cocoa_p = 0; +my $imagemap_base = undef; my @pids_to_kill = (); # forked pids we should kill when we exit, if any. @@ -208,6 +292,9 @@ my $driftnet_magic = 'driftnet'; my $driftnet_dir = undef; my $default_driftnet_cmd = "driftnet -a -m 100"; +my $local_magic = 'local-directory'; +my $local_dir = undef; + my $wordlist; my %rejected_urls; @@ -227,8 +314,8 @@ my @tripwire_words = ("aberrate", "abode", "amorphous", "antioch", # returns three values: the HTTP response line; the document headers; # and the document body. # -sub get_document_1 { - my ( $url, $referer, $timeout ) = @_; +sub get_document_1($$$) { + my ($url, $referer, $timeout) = @_; if (!defined($timeout)) { $timeout = $http_timeout; } if ($timeout > $http_timeout) { $timeout = $http_timeout; } @@ -248,6 +335,11 @@ sub get_document_1 { my ($url_proto, $dummy, $serverstring, $path) = split(/\//, $url, 4); $path = "" unless $path; + if (!$url_proto || !$serverstring) { + LOG (($verbose_net || $verbose_load), "unparsable URL: $url"); + return (); + } + my ($them,$port) = split(/:/, $serverstring); $port = 80 unless $port; @@ -255,6 +347,7 @@ sub get_document_1 { my $port2 = $port; if ($http_proxy) { $serverstring = $http_proxy if $http_proxy; + $serverstring =~ s@^[a-z]+://@@; ($them2,$port2) = split(/:/, $serverstring); $port2 = 80 unless $port2; } @@ -300,9 +393,13 @@ sub get_document_1 { my $cookie = $cookies{$them}; my $user_agent = "$progname/$version"; - if ($url =~ m@^http://www\.altavista\.com/@) { + + if ($url =~ m@^http://www\.altavista\.com/@ || + $url =~ m@^http://random\.yahoo\.com/@ || + $url =~ m@^http://images\.google\.com/@) { # block this, you turkeys. - $user_agent = "Mozilla/4.76 [en] (X11; U; Linux 2.2.16-22 i686; Nav)"; + $user_agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.5)" . + " Gecko/20041111 Firefox/1.0"; } my $hdrs = "GET " . ($http_proxy ? $url : "/$path") . " HTTP/1.0\r\n" . @@ -323,6 +420,11 @@ sub get_document_1 { print S $hdrs; my $http = || ""; + # Kludge: the Yahoo Random Link is now returning as its first + # line "Status: 301" instead of "HTTP/1.0 301 Found". Fix it... + # + $http =~ s@^Status:\s+(\d+)\b@HTTP/1.0 $1@i; + $_ = $http; s/[\r\n]+$//s; LOG ($verbose_http, " <== $_"); @@ -354,9 +456,16 @@ sub get_document_1 { return (); } + $SIG{ALRM} = 'DEFAULT'; # seem to be suffering a race? return ( $http, $head, $body ); }; die if ($@ && $@ ne "alarm\n"); # propagate errors + + if ($@ && $@ ne "alarm\n") { + print STDERR blurb() . "DIE " . join(" ", $@) . "\n"; + die; + } + if ($@) { # timed out $head = undef; @@ -374,14 +483,18 @@ sub get_document_1 { # returns two values: the document headers; and the document body. # if the given URL did a redirect, returns the redirected-to document. # -sub get_document { - my ( $url, $referer, $timeout ) = @_; +sub get_document($$;$) { + my ($url, $referer, $timeout) = @_; my $start = time; if (defined($referer) && $referer eq $driftnet_magic) { return get_driftnet_file ($url); } + if (defined($referer) && $referer eq $local_magic) { + return get_local_file ($url); + } + my $orig_url = $url; my $loop_count = 0; my $max_loop_count = 4; @@ -408,6 +521,7 @@ sub get_document { if ( $http =~ m@^HTTP/[0-9.]+ 30[123]@ ) { $_ = $head; + my ( $location ) = m@^location:[ \t]*(.*)$@im; if ( $location ) { $location =~ s/[\r\n]$//; @@ -466,7 +580,7 @@ sub get_document { # in again, but you have to present the old cookie to get the new cookie. # So, by doing this, the built-in cypherpunks cookie will never go "stale". # -sub set_cookie { +sub set_cookie($$) { my ($host, $cookie) = @_; my $oc = $cookies{$host}; return unless $oc; @@ -492,8 +606,8 @@ sub set_cookie { # given a URL and the body text at that URL, selects and returns a random # image from it. returns () if no suitable images found. # -sub pick_image_from_body { - my ( $url, $body ) = @_; +sub pick_image_from_body($$) { + my ($url, $body) = @_; my $base = $url; $_ = $url; @@ -526,9 +640,9 @@ sub pick_image_from_body { # randomly from the set of images on the web. All the logic here for # rejecting some images is really a set of heuristics for rejecting # images that are not really images: for rejecting *text* that is in - # GIF/JPEG form. I don't want text, I want pictures, and I want the - # content of the pictures to be randomly selected from among all the - # available content. + # GIF/JPEG/PNG form. I don't want text, I want pictures, and I want + # the content of the pictures to be randomly selected from among all + # the available content. # # So, filtering out "dirty" pictures by looking for "dirty" keywords # would be wrong: dirty pictures exist, like it or not, so webcollage @@ -586,7 +700,7 @@ sub pick_image_from_body { } elsif ( m/^(img|a) .*(src|href) ?= ?\"? ?(.*?)[ >\"]/io ) { - my $was_inline = ( "$1" eq "a" || "$1" eq "A" ); + my $was_inline = (! ( "$1" eq "a" || "$1" eq "A" )); my $link = $3; my ( $width ) = m/width ?=[ \"]*(\d+)/oi; my ( $height ) = m/height ?=[ \"]*(\d+)/oi; @@ -608,7 +722,7 @@ sub pick_image_from_body { } # skip non-image - if ( ! m@[.](gif|jpg|jpeg|pjpg|pjpeg)$@io ) { + if ( ! m@[.](gif|jpg|jpeg|pjpg|pjpeg|png)$@io ) { next; } @@ -641,6 +755,13 @@ sub pick_image_from_body { next; } + # skip images with a URL that indicates a Yahoo thumbnail. + if (m@\.yimg\.com/.*/t/@) { + if (!$width) { $width = "?"; } + if (!$height) { $height = "?"; } + LOG ($verbose_filter, " skip yahoo thumb $_ (${width}x$height)"); + next; + } my $url = $_; @@ -657,9 +778,9 @@ sub pick_image_from_body { $urls[++$#urls] = $url; $unique_urls{$url}++; - # jpegs are preferable to gifs. + # JPEGs are preferable to GIFs and PNGs. $_ = $url; - if ( ! m@[.]gif$@io ) { + if ( ! m@[.](gif|png)$@io ) { $urls[++$#urls] = $url; } @@ -692,6 +813,29 @@ sub pick_image_from_body { return $url; } +# Given a URL and the RSS feed from that URL, pick a random image from +# the feed. This is a lot simpler than extracting images out of a page: +# we already know we have reasonable images, so we just pick one. +# Returns: the real URL of the page (preferably not the RSS version), +# and the image. + +sub pick_image_from_rss($$) { + my ( $url, $body ) = @_; + my @suitable = ($body =~ m/([^<>]+)@i); + $base = $url unless $base; + + # pick a random element of the table + if (@suitable) { + my $i = int(rand(scalar @suitable)); + my $url = $suitable[$i]; + LOG ($verbose_load, "picked image " .($i+1) . "/" . + ($#suitable+1) . ": $url"); + return ($base, $url); + } + return; +} ############################################################################ @@ -701,7 +845,7 @@ sub pick_image_from_body { ############################################################################ -sub pick_dictionary { +sub pick_dictionary() { my @dicts = ("/usr/dict/words", "/usr/share/dict/words", "/usr/share/lib/dict/words"); @@ -716,42 +860,59 @@ sub pick_dictionary { # returns a random word from the dictionary # -sub random_word { - my $word = 0; - if (open (IN, "<$wordlist")) { - my $size = (stat(IN))[7]; - my $pos = rand $size; - if (seek (IN, $pos, 0)) { - $word = ; # toss partial line - $word = ; # keep next line - } - if (!$word) { - seek( IN, 0, 0 ); - $word = ; - } - close (IN); - } +sub random_word() { - return 0 if (!$word); + local *IN; + if (! open (IN, "<$wordlist")) { + return undef; + } + + my $size = (stat(IN))[7]; + my $word = undef; + my $count = 0; - $word =~ s/^[ \t\n\r]+//; - $word =~ s/[ \t\n\r]+$//; - $word =~ s/ys$/y/; - $word =~ s/ally$//; - $word =~ s/ly$//; - $word =~ s/ies$/y/; - $word =~ s/ally$/al/; - $word =~ s/izes$/ize/; - $word =~ tr/A-Z/a-z/; + while (1) { + error ("looping ($count) while reading $wordlist") + if (++$count > 100); - if ( $word =~ s/[ \t\n\r]/\+/g ) { # convert intra-word spaces to "+". - $word = "\%22$word\%22"; # And put quotes (%22) around it. + my $pos = int (rand ($size)); + if (seek (IN, $pos, 0)) { + $word = ; # toss partial line + $word = ; # keep next line } - return $word; + next unless ($word); + next if ($word =~ m/^[-\']/); + + $word = lc($word); + $word =~ s/^.*-//s; + $word =~ s/^[^a-z]+//s; + $word =~ s/[^a-z]+$//s; + $word =~ s/\'s$//s; + $word =~ s/ys$/y/s; + $word =~ s/ally$//s; + $word =~ s/ly$//s; + $word =~ s/ies$/y/s; + $word =~ s/ally$/al/s; + $word =~ s/izes$/ize/s; + $word =~ s/esses$/ess/s; + $word =~ s/(.{5})ing$/$1/s; + + next if (length ($word) > 14); + last if ($word); + } + + close (IN); + + if ( $word =~ s/\s/\+/gs ) { # convert intra-word spaces to "+". + $word = "\%22$word\%22"; # And put quotes (%22) around it. + } + + return $word; } -sub random_words { + +sub random_words($) { my ($or_p) = @_; my $sep = ($or_p ? "%20OR%20" : "%20"); return (random_word . $sep . @@ -762,19 +923,35 @@ sub random_words { } -sub url_quote { +sub url_quote($) { my ($s) = @_; $s =~ s|([^-a-zA-Z0-9.\@/_\r\n])|sprintf("%%%02X", ord($1))|ge; return $s; } -sub url_unquote { +sub url_unquote($) { my ($s) = @_; $s =~ s/[+]/ /g; $s =~ s/%([a-z0-9]{2})/chr(hex($1))/ige; return $s; } +sub html_quote($) { + my ($s) = @_; + $s =~ s/&/&/gi; + $s =~ s//>/gi; + $s =~ s/\"/"/gi; + return $s; +} + +sub html_unquote($) { + my ($s) = @_; + $s =~ s/(&([a-z]+);)/{ $entity_table{$2} || $1; }/gexi; # e.g., ' + $s =~ s/(&\#(\d+);)/{ chr($2) }/gexi; # e.g., ' + return $s; +} + # Loads the given URL (a search on some search engine) and returns: # - the total number of hits the search engine claimed it had; @@ -782,7 +959,7 @@ sub url_unquote { # Note that this list contains all kinds of internal search engine # junk URLs too -- caller must prune them. # -sub pick_from_search_engine { +sub pick_from_search_engine($$$) { my ( $timeout, $search_url, $words ) = @_; $_ = $words; @@ -822,9 +999,9 @@ sub pick_from_search_engine { $search_count = $1; } elsif ($body =~ m@found about ((\d{1,3})(,\d{3})*|\d+) results@) { $search_count = $1; - } elsif ($body =~ m@\b\d+ - \d+ of (\d+)\b@i) { # imagevista + } elsif ($body =~ m@\b\d+ - \d+ of (\d+)\b@i) { # avimages $search_count = $1; - } elsif ($body =~ m@About ((\d{1,3})(,\d{3})*) images@i) { # imagevista + } elsif ($body =~ m@About ((\d{1,3})(,\d{3})*) images@i) { # avimages $search_count = $1; } elsif ($body =~ m@We found ((\d{1,3})(,\d{3})*|\d+) results@i) { # *vista $search_count = $1; @@ -834,9 +1011,9 @@ sub pick_from_search_engine { $search_count = $1; # lycos } elsif ($body =~ m@WEB.*?RESULTS.*?\b((\d{1,3})(,\d{3})*)\b.*?Matches@i) { $search_count = $1; # hotbot - } elsif ($body =~ m@no photos were found containing@i) { # imagevista + } elsif ($body =~ m@no photos were found containing@i) { # avimages $search_count = "0"; - } elsif ($body =~ m@found no document matching@i) { # altavista + } elsif ($body =~ m@found no document matching@i) { # avtext $search_count = "0"; } 1 while ($search_count =~ s/^(\d+)(\d{3})/$1,$2/); @@ -890,7 +1067,7 @@ sub pick_from_search_engine { } -sub depoison { +sub depoison(@) { my (@urls) = @_; my @urls2 = (); foreach (@urls) { @@ -921,10 +1098,9 @@ sub depoison { # given a list of URLs, picks one at random; loads it; and returns a # random image from it. -# returns the url of the page loaded; the url of the image chosen; -# and a debugging description string. +# returns the url of the page loaded; the url of the image chosen. # -sub pick_image_from_pages { +sub pick_image_from_pages($$$$@) { my ($base, $total_hit_count, $unfiltered_link_count, $timeout, @pages) = @_; $total_hit_count = "?" unless defined($total_hit_count); @@ -968,15 +1144,15 @@ sub pick_image_from_pages { ############################################################################ # yahoorand -my $yahoo_random_link = "http://random.yahoo.com/bin/ryl"; +my $yahoo_random_link = "http://random.yahoo.com/fast/ryl"; # Picks a random page; picks a random image on that page; # returns two URLs: the page containing the image, and the image. # Returns () if nothing found this time. # -sub pick_from_yahoo_random_link { - my ( $timeout ) = @_; +sub pick_from_yahoo_random_link($) { + my ($timeout) = @_; print STDERR "\n\n" if ($verbose_load); LOG ($verbose_load, "URL: $yahoo_random_link"); @@ -1003,6 +1179,50 @@ sub pick_from_yahoo_random_link { } } + +############################################################################ +# +# Pick images from random pages returned by the Alta Vista Random Link +# +############################################################################ + +# altavista +my $alta_vista_random_link = "http://www.altavista.com/image/randomlink"; + + +# Picks a random page; picks a random image on that page; +# returns two URLs: the page containing the image, and the image. +# Returns () if nothing found this time. +# +sub pick_from_alta_vista_random_link($) { + my ($timeout) = @_; + + print STDERR "\n\n" if ($verbose_load); + LOG ($verbose_load, "URL: $alta_vista_random_link"); + + $last_search = $alta_vista_random_link; # for warnings + + $suppress_audit = 1; + + my ( $base, $body ) = get_document ($alta_vista_random_link, + undef, $timeout); + if (!$base || !$body) { + $body = undef; + return; + } + + LOG ($verbose_load, "redirected to: $base"); + + my $img = pick_image_from_body ($base, $body); + $body = undef; + + if ($img) { + return ($base, $img); + } else { + return (); + } +} + ############################################################################ # @@ -1011,22 +1231,20 @@ sub pick_from_yahoo_random_link { ############################################################################ -my $alta_vista_images_url = "http://www.altavista.com/cgi-bin/query" . +my $alta_vista_images_url = "http://www.altavista.com/image/results" . "?ipht=1" . # photos "&igrph=1" . # graphics "&iclr=1" . # color "&ibw=1" . # b&w "&micat=1" . # no partner sites - "&imgset=1" . # no partner sites - "&stype=simage" . # do image search - "&mmW=1" . # unknown, but required + "&sc=on" . # "site collapse" "&q="; -# imagevista -sub pick_from_alta_vista_images { - my ( $timeout ) = @_; +# avimages +sub pick_from_alta_vista_images($) { + my ($timeout) = @_; - my $words = random_words(1); + my $words = random_word(); my $page = (int(rand(9)) + 1); my $search_url = $alta_vista_images_url . $words; @@ -1041,12 +1259,13 @@ sub pick_from_alta_vista_images { my @candidates = (); foreach my $u (@subpages) { - # altavista is encoding their URLs now. - next unless ($u =~ m@^/r.*\&r=([^&]+).*@); - $u = url_unquote($1); + # avimages is encoding their URLs now. + next unless ($u =~ s/^.*\*\*(http%3a.*$)/$1/gsi); + $u = url_unquote($u); next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs next if ($u =~ m@[/.]altavista\.com\b@i); # skip altavista builtins + next if ($u =~ m@[/.]yahoo\.com\b@i); # yahoo and av in cahoots? next if ($u =~ m@[/.]doubleclick\.net\b@i); # you cretins next if ($u =~ m@[/.]clicktomarket\.com\b@i); # more cretins @@ -1079,10 +1298,13 @@ my $google_images_url = "http://images.google.com/images" . "&q="; # googleimgs -sub pick_from_google_images { - my ( $timeout ) = @_; +sub pick_from_google_images($;$$) { + my ($timeout, $words, $max_page) = @_; + + if (!defined($words)) { + $words = random_word; # only one word for Google + } - my $words = random_word; # only one word for Google my $page = (int(rand(9)) + 1); my $num = 20; # 20 images per page my $search_url = $google_images_url . $words; @@ -1096,34 +1318,45 @@ sub pick_from_google_images { pick_from_search_engine ($timeout, $search_url, $words); my @candidates = (); + my %referers; foreach my $u (@subpages) { next unless ($u =~ m@imgres\?imgurl@i); # All pics start with this next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins if ($u =~ m@^/imgres\?imgurl=(.*?)\&imgrefurl=(.*?)\&@) { - my $urlf = $2; - LOG ($verbose_filter, " candidate: $urlf"); - push @candidates, $urlf; + my $ref = $2; + my $img = $1; + $img = "http://$img" unless ($img =~ m/^http:/i); + + LOG ($verbose_filter, " candidate: $ref"); + push @candidates, $img; + $referers{$img} = $ref; } } - return pick_image_from_pages ($search_url, $search_hit_count, $#subpages+1, - $timeout, @candidates); + @candidates = depoison (@candidates); + return () if ($#candidates < 0); + my $i = int(rand($#candidates+1)); + my $img = $candidates[$i]; + my $ref = $referers{$img}; + + LOG ($verbose_load, "picked image " . ($i+1) . ": $img (on $ref)"); + return ($ref, $img); } ############################################################################ # -# Pick images by feeding random *numbers* into Google Image Search. -# By jwz, suggested by from Ian O'Donnell. +# Pick images by feeding random numbers into Google Image Search. +# By jwz, suggested by Ian O'Donnell. # ############################################################################ # googlenums -sub pick_from_google_image_numbers { - my ( $timeout ) = @_; +sub pick_from_google_image_numbers($) { + my ($timeout) = @_; my $max = 9999; my $number = int(rand($max)); @@ -1131,43 +1364,75 @@ sub pick_from_google_image_numbers { $number = sprintf("%04d", $number) if (rand() < 0.3); - my $words = "$number"; - my $page = (int(rand(40)) + 1); - my $num = 20; # 20 images per page - my $search_url = $google_images_url . $words; - - if ($page > 1) { - $search_url .= "&start=" . $page*$num; # page number - $search_url .= "&num=" . $num; #images per page - } - - my ($search_hit_count, @subpages) = - pick_from_search_engine ($timeout, $search_url, $words); - - my @candidates = (); - my %referers; - foreach my $u (@subpages) { - next unless ($u =~ m@imgres\?imgurl@i); # All pics start with this - next if ($u =~ m@[/.]google\.com\b@i); # skip google builtins - - if ($u =~ m@^/imgres\?imgurl=(.*?)\&imgrefurl=(.*?)\&@) { - my $ref = $2; - my $img = "http://$1"; + pick_from_google_images ($timeout, "$number"); +} - LOG ($verbose_filter, " candidate: $ref"); - push @candidates, $img; - $referers{$img} = $ref; - } - } - @candidates = depoison (@candidates); - return () if ($#candidates < 0); - my $i = int(rand($#candidates+1)); - my $img = $candidates[$i]; - my $ref = $referers{$img}; + +############################################################################ +# +# Pick images by feeding random digital camera file names into +# Google Image Search. +# By jwz, inspired by the excellent Random Personal Picture Finder +# at http://www.diddly.com/random/ +# +############################################################################ - LOG ($verbose_load, "picked image " . ($i+1) . ": $img (on $ref)"); - return ($ref, $img); +my @photomakers = ( + # + # Common digital camera file name formats, as described at + # http://www.diddly.com/random/about.html + # + sub { sprintf ("dcp%05d.jpg", int(rand(4000))); }, # Kodak + sub { sprintf ("dsc%05d.jpg", int(rand(4000))); }, # Nikon + sub { sprintf ("dscn%04d.jpg", int(rand(4000))); }, # Nikon + sub { sprintf ("mvc-%03d.jpg", int(rand(999))); }, # Sony Mavica + sub { sprintf ("mvc%05d.jpg", int(rand(9999))); }, # Sony Mavica + sub { sprintf ("P101%04d.jpg", int(rand(9999))); }, # Olympus w/ date=101 + sub { sprintf ("P%x%02d%04d.jpg", # Olympus + int(rand(0xC)), int(rand(30))+1, + rand(9999)); }, + sub { sprintf ("IMG_%03d.jpg", int(rand(999))); }, # ? + sub { sprintf ("IMAG%04d.jpg", int(rand(9999))); }, # RCA and Samsung + sub { my $n = int(rand(9999)); # Canon + sprintf ("1%02d-%04d.jpg", int($n/100), $n); }, + sub { my $n = int(rand(9999)); # Canon + sprintf ("1%02d-%04d_IMG.jpg", + int($n/100), $n); }, + sub { sprintf ("IMG_%04d.jpg", int(rand(9999))); }, # Canon + sub { sprintf ("dscf%04d.jpg", int(rand(9999))); }, # Fuji Finepix + sub { sprintf ("pdrm%04d.jpg", int(rand(9999))); }, # Toshiba PDR + sub { sprintf ("IM%06d.jpg", int(rand(9999))); }, # HP Photosmart + sub { sprintf ("EX%06d.jpg", int(rand(9999))); }, # HP Photosmart +# sub { my $n = int(rand(3)); # Kodak DC-40,50,120 +# sprintf ("DC%04d%s.jpg", int(rand(9999)), +# $n == 0 ? 'S' : $n == 1 ? 'M' : 'L'); }, + sub { sprintf ("pict%04d.jpg", int(rand(9999))); }, # Minolta Dimage + sub { sprintf ("P%07d.jpg", int(rand(9999))); }, # Kodak DC290 +# sub { sprintf ("%02d%02d%04d.jpg", # Casio QV3000, QV4000 +# int(rand(12))+1, int(rand(31))+1, +# int(rand(999))); }, +# sub { sprintf ("%02d%x%02d%04d.jpg", # Casio QV7000 +# int(rand(6)), # year +# int(rand(12))+1, int(rand(31))+1, +# int(rand(999))); }, + sub { sprintf ("IMGP%04d.jpg", int(rand(9999))); }, # Pentax Optio S + sub { sprintf ("PANA%04d.jpg", int(rand(9999))); }, # Panasonic vid still + sub { sprintf ("HPIM%04d.jpg", int(rand(9999))); }, # HP Photosmart + sub { sprintf ("PCDV%04d.jpg", int(rand(9999))); }, # ? + ); + + +# googlephotos +sub pick_from_google_image_photos($) { + my ($timeout) = @_; + + my $i = int(rand($#photomakers + 1)); + my $fn = $photomakers[$i]; + my $file = &$fn; + my $words .= $file . "%20filetype:jpg"; + + pick_from_google_images ($timeout, $words); } @@ -1179,18 +1444,19 @@ sub pick_from_google_image_numbers { ############################################################################ -my $alta_vista_url_1 = "http://www.altavista.com/cgi-bin/query?pg=q" . - "&text=yes&kl=XX&stype=stext&q="; -my $alta_vista_url_2 = "http://www.altavista.com/sites/search/web?pg=q" . - "&kl=XX&search=Search&q="; - -my $alta_vista_url = $alta_vista_url_2; +my $alta_vista_url = "http://www.altavista.com/web/results" . + "?pg=aq" . + "&aqmode=s" . + "&filetype=html" . + "&sc=on" . # "site collapse" + "&nbq=50" . + "&aqo="; -# altavista -sub pick_from_alta_vista_text { - my ( $timeout ) = @_; +# avtext +sub pick_from_alta_vista_text($) { + my ($timeout) = @_; - my $words = random_words(1); + my $words = random_words(0); my $page = (int(rand(9)) + 1); my $search_url = $alta_vista_url . $words; @@ -1210,8 +1476,12 @@ sub pick_from_alta_vista_text { # onMouseOver to make it look like they're not! Well, it makes it # easier for us to identify search results... # - next unless ($u =~ m@^/r.*\&r=([^&]+).*@); - $u = url_unquote($1); + next unless ($u =~ s/^.*\*\*(http%3a.*$)/$1/gsi); + $u = url_unquote($u); + + next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs + next if ($u =~ m@[/.]altavista\.com\b@i); # skip altavista builtins + next if ($u =~ m@[/.]yahoo\.com\b@i); # yahoo and av in cahoots? LOG ($verbose_filter, " candidate: $u"); push @candidates, $u; @@ -1229,23 +1499,30 @@ sub pick_from_alta_vista_text { # ############################################################################ -my $hotbot_search_url = "http://hotbot.lycos.com/" . - "?SM=SC" . - "&DV=0" . - "&LG=any" . - "&FVI=1" . - "&DC=100" . - "&DE=0" . - "&SQ=1" . - "&TR=13" . - "&AM1=MC" . - "&MT="; - -sub pick_from_hotbot_text { - my ( $timeout ) = @_; +my $hotbot_search_url =("http://hotbot.lycos.com/default.asp" . + "?ca=w" . + "&descriptiontype=0" . + "&imagetoggle=1" . + "&matchmode=any" . + "&nummod=2" . + "&recordcount=50" . + "&sitegroup=1" . + "&stem=1" . + "&cobrand=undefined" . + "&query="); + +sub pick_from_hotbot_text($) { + my ($timeout) = @_; + + $last_search = $hotbot_search_url; # for warnings + + # lycos seems to always give us back dictionaries and word lists if + # we search for more than one word... + # + my $words = random_word(); - my $words = random_words(0); - my $search_url = $hotbot_search_url . $words; + my $start = int(rand(8)) * 10 + 1; + my $search_url = $hotbot_search_url . $words . "&first=$start&page=more"; my ($search_hit_count, @subpages) = pick_from_search_engine ($timeout, $search_url, $words); @@ -1254,8 +1531,14 @@ sub pick_from_hotbot_text { foreach my $u (@subpages) { # Hotbot plays redirection games too - next unless ($u =~ m@^/director.asp\?target=([^&]+)@); - $u = url_decode($1); + # (not any more?) +# next unless ($u =~ m@/director.asp\?.*\btarget=([^&]+)@); +# $u = url_decode($1); + + next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs + next if ($u =~ m@[/.]hotbot\.com\b@i); # skip hotbot builtins + next if ($u =~ m@[/.]lycos\.com\b@i); # skip hotbot builtins + next if ($u =~ m@[/.]inktomi\.com\b@i); # skip hotbot builtins LOG ($verbose_filter, " candidate: $u"); push @candidates, $u; @@ -1273,17 +1556,24 @@ sub pick_from_hotbot_text { # ############################################################################ -my $lycos_search_url = "http://lycospro.lycos.com/srchpro/" . +my $lycos_search_url = "http://search.lycos.com/default.asp" . "?lpv=1" . - "&t=any" . + "&loc=searchhp" . + "&tab=web" . "&query="; -sub pick_from_lycos_text { - my ( $timeout ) = @_; +sub pick_from_lycos_text($) { + my ($timeout) = @_; + + $last_search = $lycos_search_url; # for warnings + + # lycos seems to always give us back dictionaries and word lists if + # we search for more than one word... + # + my $words = random_word(); - my $words = random_words(0); my $start = int(rand(8)) * 10 + 1; - my $search_url = $lycos_search_url . $words . "&start=$start"; + my $search_url = $lycos_search_url . $words . "&first=$start&page=more"; my ($search_hit_count, @subpages) = pick_from_search_engine ($timeout, $search_url, $words); @@ -1291,17 +1581,21 @@ sub pick_from_lycos_text { my @candidates = (); foreach my $u (@subpages) { - # Lycos plays exact the same redirection game as hotbot. - # Note that "id=0" is used for internal advertising links, - # and 1+ are used for search results. - next unless ($u =~ m@^http://click.hotbot.com/director.asp - .* - [?&]id=[1-9]\d* - .* - \&target=([^&]+) - .* - @x); - $u = url_decode($1); + # Lycos plays redirection games. + # (not any more?) +# next unless ($u =~ m@^http://click.lycos.com/director.asp +# .* +# \btarget=([^&]+) +# .* +# @x); +# $u = url_decode($1); + + next unless ($u =~ m@^http://@i); # skip non-HTTP or relative URLs + next if ($u =~ m@[/.]hotbot\.com\b@i); # skip lycos builtins + next if ($u =~ m@[/.]lycos\.com\b@i); # skip lycos builtins + next if ($u =~ m@[/.]terralycos\.com\b@i); # skip lycos builtins + next if ($u =~ m@[/.]inktomi\.com\b@i); # skip lycos builtins + LOG ($verbose_filter, " candidate: $u"); push @candidates, $u; @@ -1319,14 +1613,17 @@ sub pick_from_lycos_text { # ############################################################################ -my $yahoo_news_url = "http://search.news.yahoo.com/search/news_photos?" . - "&z=&n=100&o=o&2=&3=&p="; +my $yahoo_news_url = "http://news.search.yahoo.com/search/news" . + "?c=news_photos" . + "&p="; # yahoonews -sub pick_from_yahoo_news_text { - my ( $timeout ) = @_; +sub pick_from_yahoo_news_text($) { + my ($timeout) = @_; - my $words = random_words(1); + $last_search = $yahoo_news_url; # for warnings + + my $words = random_word(); my $search_url = $yahoo_news_url . $words; my ($search_hit_count, @subpages) = @@ -1334,9 +1631,14 @@ sub pick_from_yahoo_news_text { my @candidates = (); foreach my $u (@subpages) { + + # de-redirectize the URLs + $u =~ s@^http://rds\.yahoo\.com/.*-http%3A@http:@s; + # only accept URLs on Yahoo's news site next unless ($u =~ m@^http://dailynews\.yahoo\.com/@i || $u =~ m@^http://story\.news\.yahoo\.com/@i); + next unless ($u =~ m@&u=/@); LOG ($verbose_filter, " candidate: $u"); push @candidates, $u; @@ -1347,6 +1649,261 @@ sub pick_from_yahoo_news_text { } + +############################################################################ +# +# Pick images from LiveJournal's list of recently-posted images. +# +############################################################################ + +my $livejournal_img_url = "http://www.livejournal.com/stats/latest-img.bml"; + +# With most of our image sources, we get a random page and then select +# from the images on it. However, in the case of LiveJournal, the page +# of images tends to update slowly; so we'll remember the last N entries +# on it and randomly select from those, to get a wider variety each time. + +my $lj_cache_size = 1000; +my @lj_cache = (); # fifo, for ordering by age +my %lj_cache = (); # hash, for detecting dups + +# livejournal +sub pick_from_livejournal_images($) { + my ($timeout) = @_; + + $last_search = $livejournal_img_url; # for warnings + + my ( $base, $body ) = get_document ($livejournal_img_url, undef, $timeout); + return () unless $body; + + $body =~ s/\n/ /gs; + $body =~ s/(= $lj_cache_size) { + my $pairP = shift @lj_cache; + my $img = $pairP->[0]; + delete $lj_cache{$img}; + } + + LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img"); + + return ($page, $img); +} + + +############################################################################ +# +# Pick images from ircimages.com (images that have been in the /topic of +# various IRC channels.) +# +############################################################################ + +my $ircimages_url = "http://ircimages.com/"; + +# ircimages +sub pick_from_ircimages($) { + my ($timeout) = @_; + + $last_search = $ircimages_url; # for warnings + + my $n = int(rand(2900)); + my $search_url = $ircimages_url . "page-$n"; + + my ( $base, $body ) = get_document ($search_url, undef, $timeout); + return () unless $body; + + my @candidates = (); + + $body =~ s/\n/ /gs; + $body =~ s/(]+)>@i; + next unless $u; + + if ($u =~ m/^\"([^\"]*)\"/) { $u = $1; } # quoted string + elsif ($u =~ m/^([^\s]*)\s/) { $u = $1; } # or token + + next unless ($u =~ m/^http:/i); + next if ($u =~ m@^http://(searchirc\.com\|ircimages\.com)@i); + next unless ($u =~ m@[.](gif|jpg|jpeg|pjpg|pjpeg|png)$@i); + + LOG ($verbose_http, " HREF: $u"); + push @candidates, $u; + } + + LOG ($verbose_filter, "" . $#candidates+1 . " links on $search_url"); + + return () if ($#candidates == -1); + + my $i = int(rand($#candidates+1)); + my $img = $candidates[$i]; + + LOG ($verbose_load, "picked image " .($i+1) . "/" . ($#candidates+1) . + ": $img"); + + $search_url = $img; # hmm... + return ($search_url, $img); +} + + +############################################################################ +# +# Pick images from Flickr's page of recently-posted photos. +# +############################################################################ + +my $flickr_img_url = "http://www.flickr.com/photos/"; + +# Like LiveJournal, the Flickr page of images tends to update slowly, +# so remember the last N entries on it and randomly select from those. + +# I know that Flickr has an API (http://www.flickr.com/services/api/) +# but it was easy enough to scrape the HTML, so I didn't bother exploring. + +my $flickr_cache_size = 1000; +my @flickr_cache = (); # fifo, for ordering by age +my %flickr_cache = (); # hash, for detecting dups + + +# flickr_recent +sub pick_from_flickr_recent($) { + my ($timeout) = @_; + + my $start = 16 * int(rand(100)); + + $last_search = $flickr_img_url; # for warnings + $last_search .= "?start=$start" if ($start > 0); + + my ( $base, $body ) = get_document ($last_search, undef, $timeout); + return () unless $body; + + $body =~ s/[\r\n]/ /gs; + $body =~ s/(]* \b HREF=\"([^<>\"]+)\" [^<>]* > \s* + ]* \b SRC=\"([^<>\"]+)\" @xsi; + next unless defined ($thumb); + $page = html_unquote ($page); + $thumb = html_unquote ($thumb); + + next unless ($thumb =~ m@^http://photos\d*\.flickr\.com/@); + + my $base = "http://www.flickr.com/"; + $page =~ s@^/@$base@; + $thumb =~ s@^/@$base@; + + my $img = $thumb; + $img =~ s/_[a-z](\.[a-z\d]+)$/$1/si; # take off "thumb" suffix + + $count++; + next if ($flickr_cache{$img}); # already have it + + my @pair = ($img, $page, $start); + LOG ($verbose_filter, " candidate: $img"); + push @flickr_cache, \@pair; + $flickr_cache{$img} = \@pair; + $count2++; + } + + return () if ($#flickr_cache == -1); + + my $n = $#flickr_cache+1; + my $i = int(rand($n)); + my ($img, $page) = @{$flickr_cache[$i]}; + + # delete this one from @flickr_cache and from %flickr_cache. + # + @flickr_cache = ( @flickr_cache[0 .. $i-1], + @flickr_cache[$i+1 .. $#flickr_cache] ); + delete $flickr_cache{$img}; + + # Keep the size of the cache under the limit by nuking older entries + # + while ($#flickr_cache >= $flickr_cache_size) { + my $pairP = shift @flickr_cache; + my $img = $pairP->[0]; + delete $flickr_cache{$img}; + } + + LOG ($verbose_load, "picked image " .($i+1) . "/$n: $img"); + + return ($page, $img); +} + + +############################################################################ +# +# Pick images from a random RSS feed on Flickr. +# +############################################################################ + +my $flickr_rss_base = ("http://www.flickr.com/services/feeds/photos_public.gne" . + "?format=rss_200_enc&tags="); + +# Picks a random RSS feed; picks a random image from that feed; +# returns 2 URLs: the page containing the image, and the image. +# Mostly by Joe Mcmahon +# +# flickr_random +sub pick_from_flickr_random($) { + my $timeout = shift; + + my $rss = $flickr_rss_base . random_word(); + $last_search = $rss; + + print STDERR "\n\n" if ($verbose_load); + LOG ($verbose_load, "URL: $last_search"); + + $suppress_audit = 1; + + my ( $base, $body ) = get_document ($last_search, undef, $timeout); + if (!$base || !$body) { + $body = undef; + return; + } + + my $img; + ($base, $img) = pick_image_from_rss ($base, $body); + $body = undef; + return () unless defined ($img); + + LOG ($verbose_load, "redirected to: $base"); + return ($base, $img); +} ############################################################################ @@ -1360,8 +1917,8 @@ sub pick_from_yahoo_news_text { ############################################################################ # driftnet -sub pick_from_driftnet { - my ( $timeout ) = @_; +sub pick_from_driftnet($) { + my ($timeout) = @_; my $id = $driftnet_magic; my $dir = $driftnet_dir; @@ -1393,7 +1950,7 @@ sub pick_from_driftnet { } -sub get_driftnet_file { +sub get_driftnet_file($) { my ($file) = @_; error ("\$driftnet_dir unset?") unless ($driftnet_dir); @@ -1407,13 +1964,13 @@ sub get_driftnet_file { open (IN, $file) || error ("$id: $file: $!"); my $body = ''; while () { $body .= $_; } - close IN; - unlink ($file); + close IN || error ("$id: $file: $!"); + unlink ($file) || error ("$id: $file: rm: $!"); return ($id, $body); } -sub spawn_driftnet { +sub spawn_driftnet($) { my ($cmd) = @_; # make a directory to use. @@ -1455,6 +2012,46 @@ sub spawn_driftnet { unless (1 == kill (0, $pid)); } +# local-directory +sub pick_from_local_dir { + my ( $timeout ) = @_; + + my $id = $local_magic; + $last_search = $id; + + my $dir = $local_dir; + error ("\$local_dir unset?") unless ($dir); + $dir =~ s@/+$@@; + + error ("$dir unreadable") unless (-d "$dir/."); + + my $v = ($verbose_exec ? "-v" : ""); + my $pick = `xscreensaver-getimage-file $v "$dir"`; + + LOG ($verbose_load, "picked file $pick ($id)"); + return ($id, $pick); +} + + +sub get_local_file { + my ($file) = @_; + + error ("\$local_dir unset?") unless ($local_dir); + + my $id = $local_magic; + my $re = qr/$local_dir/; + error ("$id: $file not in $local_dir?") + unless ($file =~ m@^$re@o); + + local *IN; + open (IN, $file) || error ("$id: $file: $!"); + my $body = ''; + while () { $body .= $_; } + close IN || error ("$id: $file: $!"); + return ($id, $body); +} + + ############################################################################ # @@ -1468,8 +2065,8 @@ sub spawn_driftnet { # Returns () if nothing found this time. # -sub pick_image { - my ( $timeout ) = @_; +sub pick_image(;$) { + my ($timeout) = @_; $current_state = "select"; $load_method = "none"; @@ -1507,28 +2104,39 @@ sub pick_image { # ############################################################################ -sub timestr { +sub timestr() { return strftime ("%H:%M:%S: ", localtime); } -sub blurb { +sub blurb() { return "$progname: " . timestr() . "$current_state: "; } -sub error { +sub error($) { my ($err) = @_; print STDERR blurb() . "$err\n"; exit 1; } +sub stacktrace() { + my $i = 1; + print STDERR "$progname: stack trace:\n"; + while (1) { + my ($package, $filename, $line, $subroutine) = caller($i++); + last unless defined($package); + $filename =~ s@^.*/@@; + print STDERR " $filename#$line, $subroutine\n"; + } +} + my $lastlog = ""; -sub clearlog { +sub clearlog() { $lastlog = ""; } -sub showlog { +sub showlog() { my $head = "$progname: DEBUG: "; foreach (split (/\n/, $lastlog)) { print STDERR "$head$_\n"; @@ -1536,7 +2144,7 @@ sub showlog { $lastlog = ""; } -sub LOG { +sub LOG($$) { my ($print, $msg) = @_; my $blurb = timestr() . "$current_state: "; $lastlog .= "$blurb$msg\n"; @@ -1549,7 +2157,7 @@ my %stats_successes; my %stats_elapsed; my $last_state = undef; -sub record_attempt { +sub record_attempt($) { my ($name) = @_; if ($last_state) { @@ -1565,7 +2173,7 @@ sub record_attempt { $suppress_audit = 0; } -sub record_success { +sub record_success($$$) { my ($name, $url, $base) = @_; if (defined($stats_successes{$name})) { $stats_successes{$name}++; @@ -1583,7 +2191,7 @@ sub record_success { } -sub record_failure { +sub record_failure($) { my ($name) = @_; return if $image_succeeded; @@ -1615,7 +2223,7 @@ sub record_failure { -sub stats_of { +sub stats_of($) { my ($name) = @_; my $i = $stats_successes{$name}; my $j = $stats_attempts{$name}; @@ -1627,7 +2235,7 @@ sub stats_of { my $current_start_time = 0; -sub start_timer { +sub start_timer($) { my ($name) = @_; $current_start_time = time; @@ -1641,14 +2249,14 @@ sub start_timer { } } -sub stop_timer { +sub stop_timer($$) { my ($name, $success) = @_; $stats_elapsed{$name} += time - $current_start_time; } my $last_report_time = 0; -sub report_performance { +sub report_performance() { return unless $verbose_warnings; @@ -1682,16 +2290,17 @@ my $max_recent_sites = 20; my @recent_images = (); my @recent_sites = (); -sub save_recent_url { +sub save_recent_url($$) { my ($url, $base) = @_; return unless ($verbose_warnings); $_ = $url; my ($site) = m@^http://([^ \t\n\r/:]+)@; + return unless defined ($site); - if ($base eq $driftnet_magic) { - $site = $driftnet_magic; + if ($base eq $driftnet_magic || $base eq $local_magic) { + $site = $base; @recent_images = (); } @@ -1743,7 +2352,7 @@ sub save_recent_url { # Does %-decoding. # -sub url_decode { +sub url_decode($) { ($_) = @_; tr/+/ /; s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg; @@ -1753,19 +2362,20 @@ sub url_decode { # Given the raw body of a GIF document, returns the dimensions of the image. # -sub gif_size { +sub gif_size($) { my ($body) = @_; my $type = substr($body, 0, 6); my $s; return () unless ($type =~ /GIF8[7,9]a/); $s = substr ($body, 6, 10); my ($a,$b,$c,$d) = unpack ("C"x4, $s); + return () unless defined ($d); return (($b<<8|$a), ($d<<8|$c)); } # Given the raw body of a JPEG document, returns the dimensions of the image. # -sub jpeg_size { +sub jpeg_size($) { my ($body) = @_; my $i = 0; my $L = length($body); @@ -1814,20 +2424,35 @@ sub jpeg_size { return (); } -# Given the raw body of a GIF or JPEG document, returns the dimensions of -# the image. +# Given the raw body of a PNG document, returns the dimensions of the image. # -sub image_size { +sub png_size($) { + my ($body) = @_; + return () unless ($body =~ m/^\211PNG\r/); + my ($bits) = ($body =~ m/^.{12}(.{12})/s); + return () unless defined ($bits); + return () unless ($bits =~ /^IHDR/); + my ($ign, $w, $h) = unpack("a4N2", $bits); + return ($w, $h); +} + + +# Given the raw body of a GIF, JPEG, or PNG document, returns the dimensions +# of the image. +# +sub image_size($) { my ($body) = @_; my ($w, $h) = gif_size ($body); if ($w && $h) { return ($w, $h); } - return jpeg_size ($body); + ($w, $h) = jpeg_size ($body); + if ($w && $h) { return ($w, $h); } + return png_size ($body); } # returns the full path of the named program, or undef. # -sub which { +sub which($) { my ($prog) = @_; foreach (split (/:/, $ENV{PATH})) { if (-x "$_/$prog") { @@ -1839,7 +2464,7 @@ sub which { # Like rand(), but chooses numbers with a bell curve distribution. -sub bellrand { +sub bellrand(;$) { ($_) = @_; $_ = 1.0 unless defined($_); $_ /= 3.0; @@ -1847,31 +2472,34 @@ sub bellrand { } -sub signal_cleanup { - my ($sig) = @_; - print STDERR blurb() . (defined($sig) - ? "caught signal $sig." - : "exiting.") - . "\n" - if ($verbose_exec); - +sub exit_cleanup() { x_cleanup(); - + print STDERR "$progname: exiting\n" if ($verbose_warnings); if (@pids_to_kill) { print STDERR blurb() . "killing: " . join(' ', @pids_to_kill) . "\n"; kill ('TERM', @pids_to_kill); } +} +sub signal_cleanup($) { + my ($sig) = @_; + print STDERR blurb() . (defined($sig) + ? "caught signal $sig." + : "exiting.") + . "\n" + if ($verbose_exec || $verbose_warnings); exit 1; } + + ############################################################################## # # Generating a list of urls only # ############################################################################## -sub url_only_output { +sub url_only_output() { do { my ($base, $img) = pick_image; if ($img) { @@ -1884,32 +2512,45 @@ sub url_only_output { ############################################################################## # -# Running as an xscreensaver module +# Running as an xscreensaver module, or as a web page imagemap # ############################################################################## -my $image_ppm = ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp") . "/webcollage." . $$; -my $image_tmp1 = $image_ppm . "-1"; -my $image_tmp2 = $image_ppm . "-2"; +my $image_ppm = sprintf ("%s/webcollage-%08x", + ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"), + rand(0xFFFFFFFF)); +my $image_tmp1 = sprintf ("%s/webcollage-1-%08x", + ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"), + rand(0xFFFFFFFF)); +my $image_tmp2 = sprintf ("%s/webcollage-2-%08x", + ($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"), + rand(0xFFFFFFFF)); my $filter_cmd = undef; my $post_filter_cmd = undef; my $background = undef; +my @imagemap_areas = (); +my $imagemap_html_tmp = undef; +my $imagemap_jpg_tmp = undef; + + my $img_width; # size of the image being generated. my $img_height; my $delay = 2; -sub x_cleanup { +sub x_cleanup() { unlink $image_ppm, $image_tmp1, $image_tmp2; + unlink $imagemap_html_tmp, $imagemap_jpg_tmp + if (defined ($imagemap_html_tmp)); } # Like system, but prints status about exit codes, and kills this process # with whatever signal killed the sub-process, if any. # -sub nontrapping_system { +sub nontrapping_system(@) { $! = 0; $_ = join(" ", @_); @@ -1938,11 +2579,11 @@ sub nontrapping_system { } -# Given the URL of a GIF or JPEG image, and the body of that image, writes a -# PPM to the given output file. Returns the width/height of the image if -# successful. +# Given the URL of a GIF, JPEG, or PNG image, and the body of that image, +# writes a PPM to the given output file. Returns the width/height of the +# image if successful. # -sub image_to_pnm { +sub image_to_pnm($$$) { my ($url, $body, $output) = @_; my ($cmd, $cmd2, $w, $h); @@ -1952,10 +2593,13 @@ sub image_to_pnm { } elsif ((@_ = jpeg_size ($body))) { ($w, $h) = @_; $cmd = "djpeg"; + } elsif ((@_ = png_size ($body))) { + ($w, $h) = @_; + $cmd = "pngtopnm"; } else { LOG (($verbose_pbm || $verbose_load), - "not a GIF or JPG" . - (($body =~ m@<(base|html|head|body|script|table|a href)>@i) + "not a GIF, JPG, or PNG" . + (($body =~ m@<(base|html|head|body|script|table|a href)\b@i) ? " (looks like HTML)" : "") . ": $url"); $suppress_audit = 1; @@ -2029,9 +2673,43 @@ sub image_to_pnm { } } -sub pick_root_displayer { + +# Same as the "ppmmake" command: creates a solid-colored PPM. +# Does not understand the rgb.txt color names except "black" and "white". +# +sub ppmmake($$$$) { + my ($outfile, $bgcolor, $w, $h) = @_; + + my ($r, $g, $b); + if ($bgcolor =~ m/^\#?([\dA-F][\dA-F])([\dA-F][\dA-F])([\dA-F][\dA-F])$/i || + $bgcolor =~ m/^\#?([\dA-F])([\dA-F])([\dA-F])$/i) { + ($r, $g, $b) = (hex($1), hex($2), hex($3)); + } elsif ($bgcolor =~ m/^black$/i) { + ($r, $g, $b) = (0, 0, 0); + } elsif ($bgcolor =~ m/^white$/i) { + ($r, $g, $b) = (0xFF, 0xFF, 0xFF); + } else { + error ("unparsable color name: $bgcolor"); + } + + my $pixel = pack('CCC', $r, $g, $b); + my $bits = "P6\n$w $h\n255\n" . ($pixel x ($w * $h)); + + local *OUT; + open (OUT, ">$outfile") || error ("$outfile: $!"); + print OUT $bits; + close OUT; +} + + +sub pick_root_displayer() { my @names = (); + if ($cocoa_p) { + # see "xscreensaver/hacks/webcollage-cocoa.m" + return "echo COCOA LOAD "; + } + foreach my $cmd (@root_displayers) { $_ = $cmd; my ($name) = m/^([^ ]+)/; @@ -2051,7 +2729,8 @@ sub pick_root_displayer { my $ppm_to_root_window_cmd = undef; -sub x_or_pbm_output { +sub x_or_pbm_output($) { + my ($window_id) = @_; # Check for our helper program, to see whether we need to use PPM pipelines. # @@ -2064,13 +2743,20 @@ sub x_or_pbm_output { LOG (($verbose_pbm || $verbose_load), "no $_ program"); } + if ($cocoa_p && !defined ($webcollage_helper)) { + error ("webcollage-helper not found in Cocoa-mode!"); + } + + # make sure the various programs we execute exist, right up front. # - my @progs = ("ppmmake"); # always need this one + my @progs = (); if (!defined($webcollage_helper)) { # Only need these others if we don't have the helper. - @progs = (@progs, "giftopnm", "djpeg", "pnmpaste", "pnmscale", "pnmcut"); + @progs = (@progs, + "giftopnm", "pngtopnm", "djpeg", + "pnmpaste", "pnmscale", "pnmcut"); } foreach (@progs) { @@ -2081,13 +2767,41 @@ sub x_or_pbm_output { # $ppm_to_root_window_cmd = pick_root_displayer(); + if (defined ($window_id)) { + error ("-window-id only works if xscreensaver-getimage is installed") + unless ($ppm_to_root_window_cmd =~ m/^xscreensaver-getimage\b/); + + error ("unparsable window id: $window_id") + unless ($window_id =~ m/^\d+$|^0x[\da-f]+$/i); + $ppm_to_root_window_cmd =~ s/--?root\b/$window_id/ || + error ("unable to munge displayer: $ppm_to_root_window_cmd"); + } + if (!$img_width || !$img_height) { - $_ = "xdpyinfo"; - which ($_) || error "$_ not found on \$PATH."; - $_ = `$_`; - ($img_width, $img_height) = m/dimensions: *(\d+)x(\d+) /; - if (!defined($img_height)) { - error "xdpyinfo failed."; + + if (!defined ($window_id) && + defined ($ENV{XSCREENSAVER_WINDOW})) { + $window_id = $ENV{XSCREENSAVER_WINDOW}; + } + + if (!defined ($window_id)) { + $_ = "xdpyinfo"; + which ($_) || error "$_ not found on \$PATH."; + $_ = `$_`; + ($img_width, $img_height) = m/dimensions: *(\d+)x(\d+) /; + if (!defined($img_height)) { + error "xdpyinfo failed."; + } + } else { # we have a window id + $_ = "xwininfo"; + which ($_) || error "$_ not found on \$PATH."; + $_ .= " -id $window_id"; + $_ = `$_`; + ($img_width, $img_height) = m/^\s*Width:\s*(\d+)\n\s*Height:\s*(\d+)\n/m; + + if (!defined($img_height)) { + error "xwininfo failed."; + } } } @@ -2112,9 +2826,8 @@ sub x_or_pbm_output { # Create the sold-colored base image. # - $_ = "ppmmake '$bgcolor' $img_width $img_height"; - LOG ($verbose_pbm, "creating base image: $_"); - nontrapping_system "$_ > $image_ppm"; + LOG ($verbose_pbm, "creating base image: ${img_width}x${img_height}"); + $_ = ppmmake ($image_ppm, $bgcolor, $img_width, $img_height); # Paste the default background image in the middle of it. # @@ -2136,13 +2849,17 @@ sub x_or_pbm_output { ($iw, $ih) = @_; $cmd = "djpeg |"; + } elsif ((@_ = png_size ($body))) { + ($iw, $ih) = @_; + $cmd = "pngtopnm |"; + } elsif ($body =~ m/^P\d\n(\d+) (\d+)\n/) { $iw = $1; $ih = $2; $cmd = ""; } else { - error "$bgimage is not a GIF, JPEG, or PPM."; + error "$bgimage is not a GIF, JPEG, PNG, or PPM."; } my $x = int (($img_width - $iw) / 2); @@ -2181,7 +2898,7 @@ sub x_or_pbm_output { } } -sub paste_image { +sub paste_image($$$$) { my ($base, $img, $body, $source) = @_; $current_state = "paste"; @@ -2203,7 +2920,7 @@ sub paste_image { ($iw, $ih) = image_size ($body); if (!$iw || !$ih) { LOG (($verbose_pbm || $verbose_load), - "not a GIF or JPG" . + "not a GIF, JPG, or PNG" . (($body =~ m@<(base|html|head|body|script|table|a href)>@i) ? " (looks like HTML)" : "") . ": $img"); @@ -2256,7 +2973,7 @@ sub paste_image { return 0 unless ($iw && $ih); } - my $target_w = $img_width; + my $target_w = $img_width; # max rectangle into which the image must fit my $target_h = $img_height; my $cmd = ""; @@ -2264,25 +2981,27 @@ sub paste_image { # Usually scale the image to fit on the screen -- but sometimes scale it - # to fit on half or a quarter of the screen. Note that we don't merely - # scale it to fit, we instead cut it in half until it fits -- that should - # give a wider distribution of sizes. + # to fit on half or a quarter of the screen. (We do this by reducing the + # size of the target rectangle.) Note that the image is not merely scaled + # to fit; we instead cut the image in half repeatedly until it fits in the + # target rectangle -- that gives a wider distribution of sizes. # - if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; $scale /= 2; } - if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; $scale /= 2; } + if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; } # reduce target rect + if (rand() < 0.3) { $target_w /= 2; $target_h /= 2; } if ($iw > $target_w || $ih > $target_h) { while ($iw > $target_w || $ih > $target_h) { $iw = int($iw / 2); $ih = int($ih / 2); + $scale /= 2; } if ($iw <= 10 || $ih <= 10) { LOG ($verbose_pbm, "scaling to ${iw}x$ih would have been bogus."); return 0; } - LOG ($verbose_pbm, "scaling to ${iw}x$ih"); + LOG ($verbose_pbm, "scaling to ${iw}x$ih ($scale)"); $cmd .= " | pnmscale -xsize $iw -ysize $ih"; } @@ -2369,7 +3088,7 @@ sub paste_image { # If any cropping needs to happen, add pnmcut. # if ($crop_x != 0 || $crop_y != 0 || - $crop_w != $iw || $crop_h != $ih) { + $crop_w != $iw || $crop_h != $ih) { $iw = $crop_w; $ih = $crop_h; $cmd .= " | pnmcut $crop_x $crop_y $iw $ih"; @@ -2482,6 +3201,10 @@ sub paste_image { $source .= "-" . stats_of($source); print STDOUT "image: ${iw}x${ih} @ $x,$y $base $source\n" if ($verbose_imgmap); + if ($imagemap_base) { + update_imagemap ($base, $x, $y, $iw, $ih, + $image_ppm, $img_width, $img_height); + } clearlog(); @@ -2489,7 +3212,112 @@ sub paste_image { } -sub init_signals { +sub update_imagemap($$$$$$$$) { + my ($url, $x, $y, $w, $h, $image_ppm, $image_width, $image_height) = @_; + + $current_state = "imagemap"; + + my $max_areas = 200; + + $url = html_quote ($url); + my $x2 = $x + $w; + my $y2 = $y + $h; + my $area = ""; + unshift @imagemap_areas, $area; # put one on the front + if ($#imagemap_areas >= $max_areas) { + pop @imagemap_areas; # take one off the back. + } + + LOG ($verbose_pbm, "area: $x,$y,$x2,$y2 (${w}x$h)"); + + my $map_name = $imagemap_base; + $map_name =~ s@^.*/@@; + $map_name = 'collage' if ($map_name eq ''); + + my $imagemap_html = $imagemap_base . ".html"; + my $imagemap_jpg = $imagemap_base . ".jpg"; + + if (!defined ($imagemap_html_tmp)) { + $imagemap_html_tmp = $imagemap_html . sprintf (".%08x", rand(0xffffffff)); + $imagemap_jpg_tmp = $imagemap_jpg . sprintf (".%08x", rand(0xffffffff)); + } + + # Read the imagemap html file (if any) to get a template. + # + my $template_html = ''; + { + local *IN; + if (open (IN, "<$imagemap_html")) { + while () { $template_html .= $_; } + close IN; + LOG ($verbose_pbm, "read template $imagemap_html"); + } + + if ($template_html =~ m/^\s*$/s) { + $template_html = ("\n" . + "\n"); + LOG ($verbose_pbm, "created dummy template"); + } + } + + # Write the jpg to a tmp file + # + { + my $cmd; + if (defined ($webcollage_helper)) { + $cmd = "cp -p $image_ppm $imagemap_jpg_tmp"; + } else { + $cmd = "cjpeg < $image_ppm > $imagemap_jpg_tmp"; + } + my $rc = nontrapping_system ($cmd); + if ($rc != 0) { + error ("imagemap jpeg failed: \"$cmd\"\n"); + } + } + + # Write the html to a tmp file + # + { + my $body = $template_html; + my $areas = join ("\n\t", @imagemap_areas); + my $map = ("\n\t$areas\n"); + my $img = (""); + $body =~ s@().*?()@$map@is; + $body =~ s@]*\bUSEMAP\b[^<>]*>@$img@is; + + # if there are magic webcollage spans in the html, update those too. + # + { + my @st = stat ($imagemap_jpg_tmp); + my $date = strftime("%d-%b-%Y %l:%M:%S %p %Z", localtime($st[9])); + my $size = int(($st[7] / 1024) + 0.5) . "K"; + $body =~ s@().*?()@$1$date$2@si; + $body =~ s@().*?()@$1$size$2@si; + } + + local *OUT; + open (OUT, ">$imagemap_html_tmp") || error ("$imagemap_html_tmp: $!"); + print OUT $body || error ("$imagemap_html_tmp: $!"); + close OUT || error ("$imagemap_html_tmp: $!"); + LOG ($verbose_pbm, "wrote $imagemap_html_tmp"); + } + + # Rename the two tmp files to the real files + # + rename ($imagemap_html_tmp, $imagemap_html) || + error "renaming $imagemap_html_tmp to $imagemap_html"; + LOG ($verbose_pbm, "wrote $imagemap_html"); + rename ($imagemap_jpg_tmp, $imagemap_jpg) || + error "renaming $imagemap_jpg_tmp to $imagemap_jpg"; + LOG ($verbose_pbm, "wrote $imagemap_jpg"); +} + + +sub init_signals() { $SIG{HUP} = \&signal_cleanup; $SIG{INT} = \&signal_cleanup; @@ -2502,10 +3330,10 @@ sub init_signals { $SIG{PIPE} = 'IGNORE'; } -END { signal_cleanup(); } +END { exit_cleanup(); } -sub main { +sub main() { $| = 1; srand(time ^ $$); @@ -2517,6 +3345,7 @@ sub main { $load_method = "none"; my $root_p = 0; + my $window_id = undef; # historical suckage: the environment variable name is lower case. $http_proxy = $ENV{http_proxy} || $ENV{HTTP_PROXY}; @@ -2532,11 +3361,19 @@ sub main { $ENV{DISPLAY} = shift @ARGV; } elsif ($_ eq "-root") { $root_p = 1; + } elsif ($_ eq "-window-id" || $_ eq "--window-id") { + $window_id = shift @ARGV; + $root_p = 1; } elsif ($_ eq "-no-output") { $no_output_p = 1; } elsif ($_ eq "-urls-only") { $urls_only_p = 1; $no_output_p = 1; + } elsif ($_ eq "-cocoa") { + $cocoa_p = 1; + } elsif ($_ eq "-imagemap") { + $imagemap_base = shift @ARGV; + $no_output_p = 1; } elsif ($_ eq "-verbose") { $verbose++; } elsif (m/^-v+$/) { @@ -2563,6 +3400,10 @@ sub main { $http_proxy = shift @ARGV; } elsif ($_ eq "-dictionary" || $_ eq "-dict") { $dict = shift @ARGV; + } elsif ($_ eq "-opacity") { + $opacity = shift @ARGV; + error ("opacity must be between 0.0 and 1.0") + if ($opacity <= 0 || $opacity > 1); } elsif ($_ eq "-driftnet" || $_ eq "--driftnet") { @search_methods = ( 100, "driftnet", \&pick_from_driftnet ); if (! ($ARGV[0] =~ m/^-/)) { @@ -2570,6 +3411,13 @@ sub main { } else { $driftnet_cmd = $default_driftnet_cmd; } + } elsif ($_ eq "-directory" || $_ eq "--directory") { + @search_methods = ( 100, "local", \&pick_from_local_dir ); + if (! ($ARGV[0] =~ m/^-/)) { + $local_dir = shift @ARGV; + } else { + error ("local directory path must be set") + } } elsif ($_ eq "-debug" || $_ eq "--debug") { my $which = shift @ARGV; my @rest = @search_methods; @@ -2591,10 +3439,12 @@ sub main { } else { print STDERR "$copyright\nusage: $progname " . "[-root] [-display dpy] [-verbose] [-debug which]\n" . - "\t\t [-timeout secs] [-delay secs] [-filter cmd] [-filter2 cmd]\n" . - "\t\t [-no-output] [-urls-only] [-background color] [-size WxH]\n" . + "\t\t [-timeout secs] [-delay secs] [-size WxH]\n" . + "\t\t [-no-output] [-urls-only] [-imagemap filename]\n" . + "\t\t [-filter cmd] [-filter2 cmd] [-background color]\n" . "\t\t [-dictionary dictionary-file] [-http-proxy host[:port]]\n" . "\t\t [-driftnet [driftnet-program-and-args]]\n" . + "\t\t [-directory local-image-directory]\n" . "\n"; exit 1; } @@ -2608,12 +3458,12 @@ sub main { $http_proxy = $1; } - if (!$root_p && !$no_output_p) { + if (!$root_p && !$no_output_p && !$cocoa_p) { print STDERR $copyright; error "the -root argument is mandatory (for now.)"; } - if (!$no_output_p && !$ENV{DISPLAY}) { + if (!$no_output_p && !$cocoa_p && !$ENV{DISPLAY}) { error "\$DISPLAY is not set."; } @@ -2675,16 +3525,25 @@ sub main { pick_dictionary(); } + if ($imagemap_base && !($img_width && $img_height)) { + error ("-size WxH is required with -imagemap"); + } + + if (defined ($local_dir)) { + $_ = "xscreensaver-getimage-file"; + which ($_) || error "$_ not found on \$PATH."; + } + init_signals(); spawn_driftnet ($driftnet_cmd) if ($driftnet_cmd); if ($urls_only_p) { - url_only_output; + url_only_output (); } else { - x_or_pbm_output; + x_or_pbm_output ($window_id); } } -main; +main(); exit (0);